aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.mailmap2
-rw-r--r--CREDITS24
-rw-r--r--Documentation/ABI/testing/sysfs-class-devlink4
-rw-r--r--Documentation/ABI/testing/sysfs-devices-consumer5
-rw-r--r--Documentation/ABI/testing/sysfs-devices-supplier5
-rw-r--r--Documentation/ABI/testing/sysfs-driver-ufs36
-rw-r--r--Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.rst8
-rw-r--r--Documentation/RCU/Design/Requirements/Requirements.rst20
-rw-r--r--Documentation/admin-guide/binfmt-misc.rst4
-rw-r--r--Documentation/admin-guide/bootconfig.rst2
-rw-r--r--Documentation/admin-guide/device-mapper/dm-integrity.rst12
-rw-r--r--Documentation/admin-guide/kernel-parameters.rst4
-rw-r--r--Documentation/admin-guide/kernel-parameters.txt16
-rw-r--r--Documentation/admin-guide/mm/concepts.rst2
-rw-r--r--Documentation/core-api/index.rst1
-rw-r--r--Documentation/dev-tools/kasan.rst27
-rw-r--r--Documentation/dev-tools/kunit/usage.rst57
-rw-r--r--Documentation/devicetree/bindings/display/allwinner,sun4i-a10-display-backend.yaml23
-rw-r--r--Documentation/devicetree/bindings/display/allwinner,sun4i-a10-display-frontend.yaml19
-rw-r--r--Documentation/devicetree/bindings/display/allwinner,sun4i-a10-hdmi.yaml19
-rw-r--r--Documentation/devicetree/bindings/display/allwinner,sun4i-a10-tcon.yaml25
-rw-r--r--Documentation/devicetree/bindings/display/allwinner,sun4i-a10-tv-encoder.yaml6
-rw-r--r--Documentation/devicetree/bindings/display/allwinner,sun6i-a31-drc.yaml19
-rw-r--r--Documentation/devicetree/bindings/display/allwinner,sun6i-a31-mipi-dsi.yaml6
-rw-r--r--Documentation/devicetree/bindings/display/allwinner,sun8i-a83t-de2-mixer.yaml19
-rw-r--r--Documentation/devicetree/bindings/display/allwinner,sun8i-a83t-dw-hdmi.yaml19
-rw-r--r--Documentation/devicetree/bindings/display/allwinner,sun8i-r40-tcon-top.yaml110
-rw-r--r--Documentation/devicetree/bindings/display/allwinner,sun9i-a80-deu.yaml19
-rw-r--r--Documentation/devicetree/bindings/display/amlogic,meson-dw-hdmi.yaml4
-rw-r--r--Documentation/devicetree/bindings/display/amlogic,meson-vpu.yaml4
-rw-r--r--Documentation/devicetree/bindings/display/brcm,bcm2835-dpi.yaml7
-rw-r--r--Documentation/devicetree/bindings/display/brcm,bcm2835-dsi0.yaml1
-rw-r--r--Documentation/devicetree/bindings/display/bridge/analogix,anx7625.yaml6
-rw-r--r--Documentation/devicetree/bindings/display/bridge/analogix,anx7814.yaml19
-rw-r--r--Documentation/devicetree/bindings/display/bridge/anx6345.yaml18
-rw-r--r--Documentation/devicetree/bindings/display/bridge/cdns,mhdp8546.yaml22
-rw-r--r--Documentation/devicetree/bindings/display/bridge/chrontel,ch7033.yaml6
-rw-r--r--Documentation/devicetree/bindings/display/bridge/intel,keembay-dsi.yaml14
-rw-r--r--Documentation/devicetree/bindings/display/bridge/ite,it6505.yaml2
-rw-r--r--Documentation/devicetree/bindings/display/bridge/lontium,lt9611.yaml70
-rw-r--r--Documentation/devicetree/bindings/display/bridge/lvds-codec.yaml18
-rw-r--r--Documentation/devicetree/bindings/display/bridge/nwl-dsi.yaml41
-rw-r--r--Documentation/devicetree/bindings/display/bridge/ps8640.yaml24
-rw-r--r--Documentation/devicetree/bindings/display/bridge/renesas,lvds.yaml24
-rw-r--r--Documentation/devicetree/bindings/display/bridge/simple-bridge.yaml18
-rw-r--r--Documentation/devicetree/bindings/display/bridge/snps,dw-mipi-dsi.yaml7
-rw-r--r--Documentation/devicetree/bindings/display/bridge/thine,thc63lvd1024.yaml35
-rw-r--r--Documentation/devicetree/bindings/display/bridge/ti,sn65dsi86.yaml45
-rw-r--r--Documentation/devicetree/bindings/display/bridge/ti,tfp410.yaml24
-rw-r--r--Documentation/devicetree/bindings/display/bridge/toshiba,tc358762.yaml52
-rw-r--r--Documentation/devicetree/bindings/display/bridge/toshiba,tc358768.yaml48
-rw-r--r--Documentation/devicetree/bindings/display/bridge/toshiba,tc358775.yaml19
-rw-r--r--Documentation/devicetree/bindings/display/connector/analog-tv-connector.yaml1
-rw-r--r--Documentation/devicetree/bindings/display/connector/dp-connector.yaml56
-rw-r--r--Documentation/devicetree/bindings/display/connector/dvi-connector.yaml1
-rw-r--r--Documentation/devicetree/bindings/display/connector/hdmi-connector.yaml1
-rw-r--r--Documentation/devicetree/bindings/display/connector/vga-connector.yaml1
-rw-r--r--Documentation/devicetree/bindings/display/imx/nxp,imx8mq-dcss.yaml2
-rw-r--r--Documentation/devicetree/bindings/display/ingenic,ipu.yaml5
-rw-r--r--Documentation/devicetree/bindings/display/ingenic,lcd.yaml10
-rw-r--r--Documentation/devicetree/bindings/display/intel,keembay-display.yaml2
-rw-r--r--Documentation/devicetree/bindings/display/panel/advantech,idk-2121wr.yaml21
-rw-r--r--Documentation/devicetree/bindings/display/panel/mantix,mlaf057we51-x.yaml1
-rw-r--r--Documentation/devicetree/bindings/display/panel/panel-common.yaml11
-rw-r--r--Documentation/devicetree/bindings/display/panel/panel-simple-dsi.yaml2
-rw-r--r--Documentation/devicetree/bindings/display/panel/panel-simple.yaml2
-rw-r--r--Documentation/devicetree/bindings/display/panel/samsung,s6e63m0.yaml4
-rw-r--r--Documentation/devicetree/bindings/display/rockchip/rockchip,rk3066-hdmi.yaml16
-rw-r--r--Documentation/devicetree/bindings/display/rockchip/rockchip-vop.yaml5
-rw-r--r--Documentation/devicetree/bindings/display/st,stm32-dsi.yaml12
-rw-r--r--Documentation/devicetree/bindings/display/st,stm32-ltdc.yaml8
-rw-r--r--Documentation/devicetree/bindings/display/ste,mcde.txt104
-rw-r--r--Documentation/devicetree/bindings/display/ste,mcde.yaml168
-rw-r--r--Documentation/devicetree/bindings/display/ti/ti,am65x-dss.yaml19
-rw-r--r--Documentation/devicetree/bindings/display/ti/ti,j721e-dss.yaml23
-rw-r--r--Documentation/devicetree/bindings/display/ti/ti,k2g-dss.yaml3
-rw-r--r--Documentation/devicetree/bindings/dma/ti/k3-bcdma.yaml4
-rw-r--r--Documentation/devicetree/bindings/dma/ti/k3-pktdma.yaml4
-rw-r--r--Documentation/devicetree/bindings/dma/ti/k3-udma.yaml4
-rw-r--r--Documentation/devicetree/bindings/iio/accel/bosch,bma255.yaml4
-rw-r--r--Documentation/devicetree/bindings/net/renesas,etheravb.yaml1
-rw-r--r--Documentation/devicetree/bindings/net/snps,dwmac.yaml8
-rw-r--r--Documentation/devicetree/bindings/regulator/nxp,pf8x00-regulator.yaml6
-rw-r--r--Documentation/devicetree/bindings/regulator/qcom,rpmh-regulator.txt1
-rw-r--r--Documentation/devicetree/bindings/sound/ti,j721e-cpb-audio.yaml4
-rw-r--r--Documentation/devicetree/bindings/sound/ti,j721e-cpb-ivi-audio.yaml4
-rw-r--r--Documentation/devicetree/bindings/usb/ti,j721e-usb.yaml6
-rw-r--r--Documentation/devicetree/bindings/vendor-prefixes.yaml2
-rw-r--r--Documentation/doc-guide/sphinx.rst32
-rw-r--r--Documentation/firmware-guide/acpi/apei/einj.rst4
-rw-r--r--Documentation/gpu/drm-kms.rst52
-rw-r--r--Documentation/gpu/drm-uapi.rst3
-rw-r--r--Documentation/gpu/i915.rst2
-rw-r--r--Documentation/gpu/vkms.rst82
-rw-r--r--Documentation/hwmon/sbtsi_temp.rst2
-rw-r--r--Documentation/kbuild/makefiles.rst2
-rw-r--r--Documentation/kernel-hacking/locking.rst8
-rw-r--r--Documentation/networking/device_drivers/ethernet/marvell/octeontx2.rst62
-rw-r--r--Documentation/networking/netdev-FAQ.rst126
-rw-r--r--Documentation/networking/netdevices.rst175
-rw-r--r--Documentation/networking/packet_mmap.rst11
-rw-r--r--Documentation/networking/tls-offload.rst5
-rw-r--r--Documentation/process/4.Coding.rst6
-rw-r--r--Documentation/sound/alsa-configuration.rst2
-rw-r--r--Documentation/sound/kernel-api/writing-an-alsa-driver.rst16
-rw-r--r--Documentation/virt/kvm/api.rst9
-rw-r--r--MAINTAINERS112
-rw-r--r--Makefile2
-rw-r--r--arch/Kconfig6
-rw-r--r--arch/alpha/include/asm/local64.h1
-rw-r--r--arch/arc/Makefile20
-rw-r--r--arch/arc/boot/Makefile18
-rw-r--r--arch/arc/include/asm/Kbuild1
-rw-r--r--arch/arc/include/asm/page.h1
-rw-r--r--arch/arc/kernel/entry.S2
-rw-r--r--arch/arc/plat-hsdk/Kconfig1
-rw-r--r--arch/arm/boot/dts/omap3-n950-n9.dtsi8
-rw-r--r--arch/arm/boot/dts/omap5.dtsi6
-rw-r--r--arch/arm/boot/dts/picoxcell-pc3x2.dtsi4
-rw-r--r--arch/arm/boot/dts/ste-ux500-samsung-golden.dts1
-rw-r--r--arch/arm/configs/omap2plus_defconfig4
-rw-r--r--arch/arm/crypto/chacha-glue.c1
-rw-r--r--arch/arm/include/asm/Kbuild1
-rw-r--r--arch/arm/mach-omap2/omap_device.c8
-rw-r--r--arch/arm/mach-omap2/pmic-cpcap.c2
-rw-r--r--arch/arm/xen/enlighten.c2
-rw-r--r--arch/arm64/Kconfig2
-rw-r--r--arch/arm64/Makefile10
-rw-r--r--arch/arm64/boot/dts/bitmain/bm1880.dtsi6
-rw-r--r--arch/arm64/include/asm/Kbuild1
-rw-r--r--arch/arm64/include/asm/atomic.h10
-rw-r--r--arch/arm64/include/asm/kvm_host.h23
-rw-r--r--arch/arm64/include/asm/uaccess.h16
-rw-r--r--arch/arm64/kernel/asm-offsets.c2
-rw-r--r--arch/arm64/kernel/cpufeature.c2
-rw-r--r--arch/arm64/kernel/entry-ftrace.S12
-rw-r--r--arch/arm64/kernel/entry.S16
-rw-r--r--arch/arm64/kernel/perf_event.c41
-rw-r--r--arch/arm64/kernel/probes/kprobes.c4
-rw-r--r--arch/arm64/kernel/probes/kprobes_trampoline.S6
-rw-r--r--arch/arm64/kernel/signal.c7
-rw-r--r--arch/arm64/kernel/smp.c3
-rw-r--r--arch/arm64/kernel/syscall.c10
-rw-r--r--arch/arm64/kernel/traps.c1
-rw-r--r--arch/arm64/kernel/vdso/Makefile3
-rw-r--r--arch/arm64/kernel/vdso/vdso.lds.S5
-rw-r--r--arch/arm64/kvm/Kconfig8
-rw-r--r--arch/arm64/kvm/Makefile2
-rw-r--r--arch/arm64/kvm/arch_timer.c7
-rw-r--r--arch/arm64/kvm/arm.c32
-rw-r--r--arch/arm64/kvm/hyp/include/hyp/adjust_pc.h9
-rw-r--r--arch/arm64/kvm/hyp/nvhe/hyp-main.c12
-rw-r--r--arch/arm64/kvm/hyp/nvhe/hyp-smp.c6
-rw-r--r--arch/arm64/kvm/hyp/nvhe/psci-relay.c59
-rw-r--r--arch/arm64/kvm/pmu-emul.c2
-rw-r--r--arch/arm64/kvm/sys_regs.c6
-rw-r--r--arch/arm64/kvm/va_layout.c7
-rw-r--r--arch/arm64/kvm/vgic/vgic-init.c11
-rw-r--r--arch/arm64/kvm/vgic/vgic-v2.c20
-rw-r--r--arch/arm64/kvm/vgic/vgic-v3.c21
-rw-r--r--arch/arm64/mm/fault.c7
-rw-r--r--arch/arm64/mm/init.c33
-rw-r--r--arch/arm64/mm/proc.S2
-rw-r--r--arch/csky/include/asm/Kbuild1
-rw-r--r--arch/h8300/include/asm/Kbuild1
-rw-r--r--arch/hexagon/include/asm/Kbuild1
-rw-r--r--arch/ia64/include/asm/local64.h1
-rw-r--r--arch/ia64/include/asm/sparsemem.h1
-rw-r--r--arch/ia64/mm/init.c4
-rw-r--r--arch/m68k/include/asm/Kbuild1
-rw-r--r--arch/microblaze/include/asm/Kbuild1
-rw-r--r--arch/mips/boot/compressed/decompress.c3
-rw-r--r--arch/mips/cavium-octeon/octeon-irq.c2
-rw-r--r--arch/mips/include/asm/Kbuild1
-rw-r--r--arch/mips/include/asm/highmem.h1
-rw-r--r--arch/mips/kernel/binfmt_elfn32.c7
-rw-r--r--arch/mips/kernel/binfmt_elfo32.c7
-rw-r--r--arch/mips/kernel/relocate.c10
-rw-r--r--arch/nds32/include/asm/Kbuild1
-rw-r--r--arch/openrisc/include/asm/Kbuild1
-rw-r--r--arch/openrisc/include/asm/io.h2
-rw-r--r--arch/openrisc/mm/ioremap.c2
-rw-r--r--arch/parisc/include/asm/Kbuild1
-rw-r--r--arch/powerpc/include/asm/Kbuild1
-rw-r--r--arch/powerpc/include/asm/exception-64s.h13
-rw-r--r--arch/powerpc/include/asm/feature-fixups.h10
-rw-r--r--arch/powerpc/include/asm/highmem.h2
-rw-r--r--arch/powerpc/include/asm/vdso/gettimeofday.h16
-rw-r--r--arch/powerpc/kernel/entry_64.S2
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S19
-rw-r--r--arch/powerpc/kernel/head_book3s_32.S9
-rw-r--r--arch/powerpc/kernel/vmlinux.lds.S17
-rw-r--r--arch/powerpc/lib/feature-fixups.c24
-rw-r--r--arch/riscv/Kconfig6
-rw-r--r--arch/riscv/boot/dts/sifive/hifive-unleashed-a00.dts2
-rw-r--r--arch/riscv/configs/defconfig2
-rw-r--r--arch/riscv/include/asm/Kbuild1
-rw-r--r--arch/riscv/include/asm/pgtable.h1
-rw-r--r--arch/riscv/include/asm/vdso.h2
-rw-r--r--arch/riscv/kernel/cacheinfo.c11
-rw-r--r--arch/riscv/kernel/entry.S24
-rw-r--r--arch/riscv/kernel/setup.c24
-rw-r--r--arch/riscv/kernel/stacktrace.c5
-rw-r--r--arch/riscv/kernel/time.c3
-rw-r--r--arch/riscv/kernel/vdso.c2
-rw-r--r--arch/riscv/mm/init.c16
-rw-r--r--arch/riscv/mm/kasan_init.c4
-rw-r--r--arch/s390/Kconfig31
-rw-r--r--arch/s390/configs/debug_defconfig12
-rw-r--r--arch/s390/configs/defconfig11
-rw-r--r--arch/s390/configs/zfcpdump_defconfig2
-rw-r--r--arch/s390/include/asm/Kbuild1
-rw-r--r--arch/sh/Kconfig1
-rw-r--r--arch/sh/boards/mach-sh03/rtc.c1
-rw-r--r--arch/sh/configs/landisk_defconfig9
-rw-r--r--arch/sh/configs/microdev_defconfig2
-rw-r--r--arch/sh/configs/sdk7780_defconfig6
-rw-r--r--arch/sh/configs/sdk7786_defconfig3
-rw-r--r--arch/sh/configs/se7750_defconfig1
-rw-r--r--arch/sh/configs/sh03_defconfig3
-rw-r--r--arch/sh/drivers/dma/Kconfig3
-rw-r--r--arch/sh/include/asm/Kbuild1
-rw-r--r--arch/sh/include/asm/gpio.h1
-rw-r--r--arch/sh/kernel/cpu/sh3/entry.S1
-rw-r--r--arch/sh/mm/Kconfig2
-rw-r--r--arch/sh/mm/asids-debugfs.c15
-rw-r--r--arch/sh/mm/cache-debugfs.c15
-rw-r--r--arch/sh/mm/pmb.c15
-rw-r--r--arch/sparc/include/asm/Kbuild1
-rw-r--r--arch/sparc/include/asm/highmem.h9
-rw-r--r--arch/x86/Kconfig1
-rw-r--r--arch/x86/entry/common.c10
-rw-r--r--arch/x86/hyperv/hv_init.c33
-rw-r--r--arch/x86/hyperv/mmu.c12
-rw-r--r--arch/x86/include/asm/fpu/api.h15
-rw-r--r--arch/x86/include/asm/intel-family.h1
-rw-r--r--arch/x86/include/asm/kvm_host.h19
-rw-r--r--arch/x86/include/asm/local64.h1
-rw-r--r--arch/x86/include/asm/mshyperv.h2
-rw-r--r--arch/x86/include/asm/msr.h4
-rw-r--r--arch/x86/include/asm/topology.h4
-rw-r--r--arch/x86/kernel/cpu/amd.c4
-rw-r--r--arch/x86/kernel/cpu/mce/core.c7
-rw-r--r--arch/x86/kernel/cpu/mshyperv.c18
-rw-r--r--arch/x86/kernel/cpu/mtrr/generic.c6
-rw-r--r--arch/x86/kernel/cpu/resctrl/rdtgroup.c117
-rw-r--r--arch/x86/kernel/cpu/topology.c2
-rw-r--r--arch/x86/kernel/fpu/core.c9
-rw-r--r--arch/x86/kernel/setup.c20
-rw-r--r--arch/x86/kernel/sev-es-shared.c4
-rw-r--r--arch/x86/kernel/sev-es.c14
-rw-r--r--arch/x86/kernel/smpboot.c19
-rw-r--r--arch/x86/kvm/lapic.c4
-rw-r--r--arch/x86/kvm/mmu.h2
-rw-r--r--arch/x86/kvm/mmu/mmu.c53
-rw-r--r--arch/x86/kvm/mmu/tdp_mmu.c113
-rw-r--r--arch/x86/kvm/mmu/tdp_mmu.h4
-rw-r--r--arch/x86/kvm/svm/nested.c8
-rw-r--r--arch/x86/kvm/svm/sev.c24
-rw-r--r--arch/x86/kvm/svm/svm.c12
-rw-r--r--arch/x86/kvm/svm/svm.h2
-rw-r--r--arch/x86/kvm/vmx/nested.c2
-rw-r--r--arch/x86/kvm/vmx/vmx.c2
-rw-r--r--arch/x86/kvm/x86.c30
-rw-r--r--arch/x86/lib/mmx_32.c20
-rw-r--r--arch/x86/mm/pgtable.c2
-rw-r--r--arch/x86/xen/enlighten_hvm.c15
-rw-r--r--arch/x86/xen/smp_hvm.c29
-rw-r--r--arch/xtensa/include/asm/Kbuild1
-rw-r--r--block/bfq-iosched.c8
-rw-r--r--block/blk-core.c13
-rw-r--r--block/blk-iocost.c16
-rw-r--r--block/blk-mq-debugfs.c3
-rw-r--r--block/blk-mq.c4
-rw-r--r--block/blk-pm.c15
-rw-r--r--block/blk-pm.h14
-rw-r--r--block/genhd.c11
-rw-r--r--crypto/asymmetric_keys/asym_tpm.c2
-rw-r--r--crypto/asymmetric_keys/public_key.c3
-rw-r--r--crypto/ecdh.c3
-rw-r--r--crypto/xor.c2
-rw-r--r--drivers/acpi/Kconfig6
-rw-r--r--drivers/acpi/internal.h2
-rw-r--r--drivers/acpi/scan.c17
-rw-r--r--drivers/acpi/x86/s2idle.c14
-rw-r--r--drivers/atm/idt77252.c2
-rw-r--r--drivers/base/core.c53
-rw-r--r--drivers/base/dd.c9
-rw-r--r--drivers/base/platform.c2
-rw-r--r--drivers/base/regmap/regmap-debugfs.c9
-rw-r--r--drivers/block/Kconfig1
-rw-r--r--drivers/block/rnbd/Kconfig1
-rw-r--r--drivers/block/rnbd/README1
-rw-r--r--drivers/block/rnbd/rnbd-clt.c18
-rw-r--r--drivers/block/rnbd/rnbd-srv.c8
-rw-r--r--drivers/clk/tegra/clk-tegra30.c2
-rw-r--r--drivers/counter/ti-eqep.c35
-rw-r--r--drivers/cpufreq/intel_pstate.c16
-rw-r--r--drivers/cpufreq/powernow-k8.c9
-rw-r--r--drivers/crypto/Kconfig1
-rw-r--r--drivers/dma-buf/Kconfig8
-rw-r--r--drivers/dma-buf/dma-buf.c141
-rw-r--r--drivers/dma-buf/heaps/cma_heap.c3
-rw-r--r--drivers/dma/dw-edma/dw-edma-core.c4
-rw-r--r--drivers/dma/idxd/sysfs.c4
-rw-r--r--drivers/dma/mediatek/mtk-hsdma.c1
-rw-r--r--drivers/dma/milbeaut-xdmac.c4
-rw-r--r--drivers/dma/qcom/bam_dma.c6
-rw-r--r--drivers/dma/qcom/gpi.c10
-rw-r--r--drivers/dma/stm32-mdma.c2
-rw-r--r--drivers/dma/ti/k3-udma.c6
-rw-r--r--drivers/dma/xilinx/xilinx_dma.c11
-rw-r--r--drivers/gpio/Kconfig5
-rw-r--r--drivers/gpio/gpio-mvebu.c19
-rw-r--r--drivers/gpio/gpiolib-cdev.c145
-rw-r--r--drivers/gpio/gpiolib.c3
-rw-r--r--drivers/gpu/drm/Kconfig5
-rw-r--r--drivers/gpu/drm/Makefile6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Makefile5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c53
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c48
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fw_attestation.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.h40
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c45
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c220
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.c176
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.h36
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/athub_v2_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/athub_v2_1.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cz_ih.c37
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_virtual.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c61
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c39
-rw-r--r--drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c137
-rw-r--r--drivers/gpu/drm/amd/amdgpu/hdp_v4_0.h31
-rw-r--r--drivers/gpu/drm/amd/amdgpu/hdp_v5_0.c212
-rw-r--r--drivers/gpu/drm/amd/amdgpu/hdp_v5_0.h31
-rw-r--r--drivers/gpu/drm/amd/amdgpu/iceland_ih.c36
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_v10_1.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c86
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/navi10_ih.c478
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c23
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nv.c206
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v10_0.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v11_0.c38
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c31
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c103
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ta_secureDisplay_if.h154
-rw-r--r--drivers/gpu/drm/amd/amdgpu/tonga_ih.c37
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega10_ih.c483
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega20_ih.c703
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega20_ih.h30
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_crat.c11
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c54
-rw-r--r--drivers/gpu/drm/amd/display/Kconfig2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c261
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h46
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c54
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h3
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c4
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c1
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/conversion.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/dc_common.c20
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/dc_common.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser.c119
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table_helper.c20
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c20
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/Makefile4
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile21
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/dcn301_smu.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c79
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c69
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c60
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c42
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_stream.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_surface.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dp_types.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_helper.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_hw_types.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_link.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_stream.h11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_audio.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_aux.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_aux.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c57
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_opp.c24
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_opp.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_transform.c15
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c22
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce100/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c20
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c55
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c33
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.c17
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c19
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c76
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator_v.c19
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_transform_v.c19
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce112/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c20
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce120/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c99
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce60/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce60/dce60_resource.c18
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce60/dce60_timing_generator.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/Makefile7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c54
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/Makefile4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c21
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/Makefile4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/Makefile5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c83
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn301/Makefile4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn302/Makefile4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c15
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/Makefile4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c44
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/Makefile4
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/diagnostics/hw_factory_diag.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/diagnostics/hw_factory_diag.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/diagnostics/hw_translate_diag.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_ddc.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_types.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/irq_service.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/os_types.h4
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn30.c1
-rw-r--r--drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c13
-rw-r--r--drivers/gpu/drm/amd/display/modules/power/power_helpers.c6
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/oss/osssys_4_2_0_offset.h345
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/oss/osssys_4_2_0_sh_mask.h1300
-rw-r--r--drivers/gpu/drm/amd/include/kgd_pp_interface.h5
-rw-r--r--drivers/gpu/drm/amd/include/renoir_ip_offset.h2
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_pm.c191
-rw-r--r--drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h542
-rw-r--r--drivers/gpu/drm/amd/pm/inc/smu11_driver_if_vangogh.h1
-rw-r--r--drivers/gpu/drm/amd/pm/inc/smu_types.h3
-rw-r--r--drivers/gpu/drm/amd/pm/inc/smu_v11_0.h2
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/hwmgr.c2
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c4
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/process_pptables_v1_0.c1
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c166
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.h3
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c4
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c137
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.h1
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c11
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c11
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c22
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c332
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c1
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c1176
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.h26
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c220
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c1
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c58
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h2
-rw-r--r--drivers/gpu/drm/arc/arcpgu_crtc.c9
-rw-r--r--drivers/gpu/drm/arc/arcpgu_drv.c2
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_crtc.c1
-rw-r--r--drivers/gpu/drm/arm/malidp_crtc.c1
-rw-r--r--drivers/gpu/drm/armada/armada_crtc.c1
-rw-r--r--drivers/gpu/drm/ast/ast_cursor.c55
-rw-r--r--drivers/gpu/drm/ast/ast_drv.c4
-rw-r--r--drivers/gpu/drm/ast/ast_drv.h2
-rw-r--r--drivers/gpu/drm/ast/ast_main.c25
-rw-r--r--drivers/gpu/drm/ast/ast_mm.c17
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c6
-rw-r--r--drivers/gpu/drm/ast/ast_post.c8
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c1
-rw-r--r--drivers/gpu/drm/bochs/bochs_drv.c1
-rw-r--r--drivers/gpu/drm/bochs/bochs_hw.c4
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511_drv.c6
-rw-r--r--drivers/gpu/drm/bridge/display-connector.c46
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi.c6
-rw-r--r--drivers/gpu/drm/bridge/thc63lvd1024.c2
-rw-r--r--drivers/gpu/drm/drm_agpsupport.c67
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c81
-rw-r--r--drivers/gpu/drm/drm_blend.c6
-rw-r--r--drivers/gpu/drm/drm_bufs.c4
-rw-r--r--drivers/gpu/drm/drm_cache.c33
-rw-r--r--drivers/gpu/drm/drm_color_mgmt.c125
-rw-r--r--drivers/gpu/drm/drm_crtc.c130
-rw-r--r--drivers/gpu/drm/drm_dp_helper.c650
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c29
-rw-r--r--drivers/gpu/drm/drm_drv.c16
-rw-r--r--drivers/gpu/drm/drm_dsc.c30
-rw-r--r--drivers/gpu/drm/drm_dumb_buffers.c8
-rw-r--r--drivers/gpu/drm/drm_edid.c110
-rw-r--r--drivers/gpu/drm/drm_encoder.c113
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c25
-rw-r--r--drivers/gpu/drm/drm_file.c8
-rw-r--r--drivers/gpu/drm/drm_gem.c31
-rw-r--r--drivers/gpu/drm/drm_gem_cma_helper.c141
-rw-r--r--drivers/gpu/drm/drm_gem_vram_helper.c14
-rw-r--r--drivers/gpu/drm/drm_internal.h3
-rw-r--r--drivers/gpu/drm/drm_irq.c44
-rw-r--r--drivers/gpu/drm/drm_legacy.h2
-rw-r--r--drivers/gpu/drm/drm_memory.c51
-rw-r--r--drivers/gpu/drm/drm_mode_config.c51
-rw-r--r--drivers/gpu/drm/drm_modes.c4
-rw-r--r--drivers/gpu/drm/drm_pci.c59
-rw-r--r--drivers/gpu/drm/drm_plane.c179
-rw-r--r--drivers/gpu/drm/drm_prime.c66
-rw-r--r--drivers/gpu/drm/drm_probe_helper.c3
-rw-r--r--drivers/gpu/drm/drm_simple_kms_helper.c14
-rw-r--r--drivers/gpu/drm/drm_syncobj.c8
-rw-r--r--drivers/gpu/drm/drm_vblank.c2
-rw-r--r--drivers/gpu/drm/drm_vm.c2
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c3
-rw-r--r--drivers/gpu/drm/gma500/cdv_device.c30
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_crt.c5
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_display.c2
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_dp.c4
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_lvds.c11
-rw-r--r--drivers/gpu/drm/gma500/framebuffer.c15
-rw-r--r--drivers/gpu/drm/gma500/gem.c6
-rw-r--r--drivers/gpu/drm/gma500/gem.h2
-rw-r--r--drivers/gpu/drm/gma500/gma_device.c4
-rw-r--r--drivers/gpu/drm/gma500/gma_display.c12
-rw-r--r--drivers/gpu/drm/gma500/gtt.c20
-rw-r--r--drivers/gpu/drm/gma500/intel_bios.c8
-rw-r--r--drivers/gpu/drm/gma500/intel_gmbus.c4
-rw-r--r--drivers/gpu/drm/gma500/intel_i2c.c5
-rw-r--r--drivers/gpu/drm/gma500/mdfld_device.c4
-rw-r--r--drivers/gpu/drm/gma500/mdfld_dsi_dpi.c8
-rw-r--r--drivers/gpu/drm/gma500/mdfld_dsi_output.c4
-rw-r--r--drivers/gpu/drm/gma500/mdfld_intel_display.c6
-rw-r--r--drivers/gpu/drm/gma500/mid_bios.c9
-rw-r--r--drivers/gpu/drm/gma500/mmu.c15
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_crtc.c6
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_device.c5
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c22
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_lvds.c5
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_lvds_i2c.c10
-rw-r--r--drivers/gpu/drm/gma500/opregion.c3
-rw-r--r--drivers/gpu/drm/gma500/power.c21
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.c18
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.h8
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_display.c2
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_lvds.c7
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_modes.c3
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_sdvo.c8
-rw-r--r--drivers/gpu/drm/gma500/psb_irq.c3
-rw-r--r--drivers/gpu/drm/gma500/tc35876x-dsi-lvds.c36
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/Makefile2
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c2
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c104
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h10
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_i2c.c2
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c20
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c61
-rw-r--r--drivers/gpu/drm/i915/Kconfig.debug22
-rw-r--r--drivers/gpu/drm/i915/Makefile27
-rw-r--r--drivers/gpu/drm/i915/display/i9xx_plane.c899
-rw-r--r--drivers/gpu/drm/i915/display/i9xx_plane.h24
-rw-r--r--drivers/gpu/drm/i915/display/icl_dsi.c7
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic_plane.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.c21
-rw-r--r--drivers/gpu/drm/i915/display/intel_bw.c80
-rw-r--r--drivers/gpu/drm/i915/display/intel_cdclk.c7
-rw-r--r--drivers/gpu/drm/i915/display/intel_color.c16
-rw-r--r--drivers/gpu/drm/i915/display/intel_combo_phy.c20
-rw-r--r--drivers/gpu/drm/i915/display/intel_connector.c29
-rw-r--r--drivers/gpu/drm/i915/display/intel_connector.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_crtc.c325
-rw-r--r--drivers/gpu/drm/i915/display/intel_crtc.h22
-rw-r--r--drivers/gpu/drm/i915/display/intel_cursor.c806
-rw-r--r--drivers/gpu/drm/i915/display/intel_cursor.h17
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.c249
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.h6
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c4579
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.h21
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_debugfs.c133
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_debugfs.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.c133
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.h37
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_types.h205
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c2785
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.h17
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux.c692
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux.h18
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c404
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_hdcp.c178
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_link_training.c74
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_link_training.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.c16
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll.c1363
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll.h23
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c23
-rw-r--r--drivers/gpu/drm/i915/display/intel_dvo.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbc.c21
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbdev.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_fdi.c683
-rw-r--r--drivers/gpu/drm/i915/display/intel_fdi.h22
-rw-r--r--drivers/gpu/drm/i915/display/intel_frontbuffer.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp.c309
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp.h8
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdmi.c278
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdmi.h7
-rw-r--r--drivers/gpu/drm/i915/display/intel_lspcon.c162
-rw-r--r--drivers/gpu/drm/i915/display/intel_lspcon.h12
-rw-r--r--drivers/gpu/drm/i915/display/intel_overlay.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_panel.c553
-rw-r--r--drivers/gpu/drm/i915/display/intel_panel.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_pps.c1406
-rw-r--r--drivers/gpu/drm/i915/display/intel_pps.h52
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.c143
-rw-r--r--drivers/gpu/drm/i915/display/intel_sprite.c213
-rw-r--r--drivers/gpu/drm/i915/display/intel_sprite.h10
-rw-r--r--drivers/gpu/drm/i915/display/intel_tc.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_vbt_defs.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_vdsc.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_vrr.c209
-rw-r--r--drivers/gpu/drm/i915/display/intel_vrr.h33
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi.c16
-rw-r--r--drivers/gpu/drm/i915/dma_resv_utils.c17
-rw-r--r--drivers/gpu/drm/i915/dma_resv_utils.h13
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context.c132
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context.h1
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context_types.h1
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_create.c113
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_domain.c104
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c28
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_lmem.c15
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_lmem.h8
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.c112
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.h44
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object_blt.c9
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object_types.h19
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_pages.c51
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_phys.c4
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_pm.c41
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_region.c52
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shmem.c28
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shrinker.c3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_stolen.c75
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_stolen.h2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_tiling.c12
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_wait.c54
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c2
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/huge_pages.c22
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c4
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c1
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c1
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c1
-rw-r--r--drivers/gpu/drm/i915/gt/debugfs_gt_pm.c27
-rw-r--r--drivers/gpu/drm/i915/gt/gen6_ppgtt.c15
-rw-r--r--drivers/gpu/drm/i915/gt/gen7_renderclear.c170
-rw-r--r--drivers/gpu/drm/i915/gt/gen8_engine_cs.c635
-rw-r--r--drivers/gpu/drm/i915/gt/gen8_engine_cs.h127
-rw-r--r--drivers/gpu/drm/i915/gt/gen8_ppgtt.c13
-rw-r--r--drivers/gpu/drm/i915/gt/intel_breadcrumbs.c115
-rw-r--r--drivers/gpu/drm/i915/gt/intel_breadcrumbs.h17
-rw-r--r--drivers/gpu/drm/i915/gt/intel_breadcrumbs_types.h7
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context.h11
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context_sseu.c1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context_types.h25
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine.h93
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_cs.c167
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c59
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_pm.c22
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_stats.h60
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_types.h22
-rw-r--r--drivers/gpu/drm/i915/gt/intel_execlists_submission.c3896
-rw-r--r--drivers/gpu/drm/i915/gt/intel_execlists_submission.h47
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt.c18
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c22
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt.c4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c12
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.h3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_buffer_pool_types.h4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c197
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_clock_utils.h8
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_irq.c1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm.c49
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm.h2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_requests.c7
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_types.h25
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gtt.c29
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gtt.h3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc.c5418
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc.h167
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc_reg.h41
-rw-r--r--drivers/gpu/drm/i915/gt/intel_mocs.c4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ppgtt.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rc6.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_region_lmem.c (renamed from drivers/gpu/drm/i915/intel_region_lmem.c)4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_region_lmem.h (renamed from drivers/gpu/drm/i915/intel_region_lmem.h)2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_renderstate.c3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_reset.c101
-rw-r--r--drivers/gpu/drm/i915/gt/intel_reset.h2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ring.c4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ring_submission.c278
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rps.c61
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rps_types.h2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_timeline.c113
-rw-r--r--drivers/gpu/drm/i915/gt/intel_timeline.h25
-rw-r--r--drivers/gpu/drm/i915/gt/intel_timeline_types.h2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_workarounds.c646
-rw-r--r--drivers/gpu/drm/i915/gt/mock_engine.c15
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_context.c2
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_engine_cs.c1
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c13
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_engine_pm.c203
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_execlists.c4741
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_gt_pm.c8
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_hangcheck.c173
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_lrc.c4701
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_mocs.c80
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_rc6.c1
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_reset.c28
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_rps.c5
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_timeline.c14
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_workarounds.c196
-rw-r--r--drivers/gpu/drm/i915/gt/shmem_utils.c2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc.c16
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc.h7
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c1
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c13
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c457
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h1
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc.c54
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c8
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c336
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.h5
-rw-r--r--drivers/gpu/drm/i915/gvt/display.c81
-rw-r--r--drivers/gpu/drm/i915/gvt/execlist.h3
-rw-r--r--drivers/gpu/drm/i915/gvt/fb_decoder.h6
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.h11
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.h41
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c22
-rw-r--r--drivers/gpu/drm/i915/gvt/interrupt.h5
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio.h3
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio_context.c1
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio_context.h12
-rw-r--r--drivers/gpu/drm/i915/gvt/mpt.h2
-rw-r--r--drivers/gpu/drm/i915/gvt/reg.h2
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c24
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.h5
-rw-r--r--drivers/gpu/drm/i915/gvt/vgpu.c9
-rw-r--r--drivers/gpu/drm/i915/i915_active.c7
-rw-r--r--drivers/gpu/drm/i915/i915_cmd_parser.c30
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c763
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c25
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h121
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c132
-rw-r--r--drivers/gpu/drm/i915/i915_gem.h9
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c13
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c25
-rw-r--r--drivers/gpu/drm/i915/i915_getparam.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c6
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c475
-rw-r--r--drivers/gpu/drm/i915/i915_irq.h3
-rw-r--r--drivers/gpu/drm/i915/i915_mitigations.c146
-rw-r--r--drivers/gpu/drm/i915/i915_mitigations.h13
-rw-r--r--drivers/gpu/drm/i915/i915_mm.c2
-rw-r--r--drivers/gpu/drm/i915/i915_params.c2
-rw-r--r--drivers/gpu/drm/i915/i915_params.h1
-rw-r--r--drivers/gpu/drm/i915/i915_pci.c7
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c17
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.c117
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.h35
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h74
-rw-r--r--drivers/gpu/drm/i915/i915_request.c178
-rw-r--r--drivers/gpu/drm/i915/i915_request.h47
-rw-r--r--drivers/gpu/drm/i915/i915_scheduler.c32
-rw-r--r--drivers/gpu/drm/i915/i915_scheduler.h7
-rw-r--r--drivers/gpu/drm/i915/i915_scheduler_types.h10
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c33
-rw-r--r--drivers/gpu/drm/i915/i915_sw_fence.c25
-rw-r--r--drivers/gpu/drm/i915/i915_utils.c2
-rw-r--r--drivers/gpu/drm/i915/i915_utils.h7
-rw-r--r--drivers/gpu/drm/i915/i915_vma.h15
-rw-r--r--drivers/gpu/drm/i915/i915_vma_types.h3
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.c159
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.h4
-rw-r--r--drivers/gpu/drm/i915/intel_dram.c136
-rw-r--r--drivers/gpu/drm/i915/intel_memory_region.c2
-rw-r--r--drivers/gpu/drm/i915/intel_memory_region.h13
-rw-r--r--drivers/gpu/drm/i915/intel_pch.c39
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c1108
-rw-r--r--drivers/gpu/drm/i915/intel_pm.h7
-rw-r--r--drivers/gpu/drm/i915/intel_sideband.c4
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c4
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.h6
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_evict.c28
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_gtt.c1
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_perf.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_request.c9
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_spinner.c4
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_memory_region.c110
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gem_device.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_region.c19
-rw-r--r--drivers/gpu/drm/imx/Kconfig3
-rw-r--r--drivers/gpu/drm/imx/dw_hdmi-imx.c95
-rw-r--r--drivers/gpu/drm/imx/imx-ldb.c109
-rw-r--r--drivers/gpu/drm/imx/imx-tve.c109
-rw-r--r--drivers/gpu/drm/imx/ipuv3-crtc.c131
-rw-r--r--drivers/gpu/drm/imx/ipuv3-plane.c69
-rw-r--r--drivers/gpu/drm/imx/ipuv3-plane.h3
-rw-r--r--drivers/gpu/drm/imx/parallel-display.c93
-rw-r--r--drivers/gpu/drm/ingenic/Kconfig1
-rw-r--r--drivers/gpu/drm/ingenic/ingenic-drm-drv.c60
-rw-r--r--drivers/gpu/drm/ingenic/ingenic-drm.h14
-rw-r--r--drivers/gpu/drm/kmb/kmb_drv.c4
-rw-r--r--drivers/gpu/drm/kmb/kmb_plane.c3
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_crtc.c1
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_gem.c2
-rw-r--r--drivers/gpu/drm/mga/mga_ioc32.c14
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.c20
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_i2c.c2
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mm.c10
-rw-r--r--drivers/gpu/drm/msm/adreno/a2xx_gpu.c6
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx_gpu.c6
-rw-r--r--drivers/gpu/drm/msm/adreno/a4xx_gpu.c6
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_device.c4
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c21
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.h6
-rw-r--r--drivers/gpu/drm/msm/dp/dp_ctrl.c6
-rw-r--r--drivers/gpu/drm/msm/dp/dp_display.c7
-rw-r--r--drivers/gpu/drm/msm/dp/dp_panel.c12
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c8
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c15
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/arb.c12
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/dfp.c5
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/disp.h14
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/hw.c10
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/core.c1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/core507d.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/corec37d.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/curs.c1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.c234
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.h2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/head.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/head907d.c11
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wimm.c1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wimmc37b.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndw.c1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndw.h8
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndwc57e.c10
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndwc67e.c106
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/cl0080.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/class.h5
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/device.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/conn.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/devinit.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/gpio.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/i2c.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.c7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_backlight.c1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c11
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c20
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_chan.c1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c11
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c8
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_encoder.h13
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_prime.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sgdma.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vga.c20
-rw-r--r--drivers/gpu/drm/nouveau/nv17_fence.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fence.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvif/disp.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/base.c87
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/user.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c33
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/ga102.c46
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/rootga102.c52
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/sorga102.c140
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/sortu102.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/tu102.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c46
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h32
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/tu102.c364
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowramin.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/ga100.c76
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/priv.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/tu102.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fault/tu102.c21
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga100.c40
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga102.c40
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gv100.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramga102.c40
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gpio/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gpio/ga102.c118
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c17
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gk110.c45
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gm200.c7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/pad.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/priv.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf100.c10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk104.c10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/base.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/ga100.c74
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/priv.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/tu102.c113
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c6
-rw-r--r--drivers/gpu/drm/omapdrm/Kconfig120
-rw-r--r--drivers/gpu/drm/omapdrm/Makefile19
-rw-r--r--drivers/gpu/drm/omapdrm/displays/Kconfig10
-rw-r--r--drivers/gpu/drm/omapdrm/displays/Makefile2
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c1385
-rw-r--r--drivers/gpu/drm/omapdrm/dss/Kconfig135
-rw-r--r--drivers/gpu/drm/omapdrm/dss/Makefile20
-rw-r--r--drivers/gpu/drm/omapdrm/dss/base.c87
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dispc.c202
-rw-r--r--drivers/gpu/drm/omapdrm/dss/display.c60
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dpi.c1
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dsi.c1949
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dsi.h456
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dss.c28
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dss.h72
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi4.c1
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi5.c1
-rw-r--r--drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c229
-rw-r--r--drivers/gpu/drm/omapdrm/dss/omapdss.h347
-rw-r--r--drivers/gpu/drm/omapdrm/dss/output.c57
-rw-r--r--drivers/gpu/drm/omapdrm/dss/pll.c6
-rw-r--r--drivers/gpu/drm/omapdrm/dss/sdi.c1
-rw-r--r--drivers/gpu/drm/omapdrm/dss/venc.c2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_connector.c157
-rw-r--r--drivers/gpu/drm/omapdrm/omap_connector.h28
-rw-r--r--drivers/gpu/drm/omapdrm/omap_crtc.c153
-rw-r--r--drivers/gpu/drm/omapdrm/omap_crtc.h2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.c73
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.h3
-rw-r--r--drivers/gpu/drm/omapdrm/omap_encoder.c59
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem.c3
-rw-r--r--drivers/gpu/drm/omapdrm/omap_irq.c34
-rw-r--r--drivers/gpu/drm/omapdrm/omap_plane.c41
-rw-r--r--drivers/gpu/drm/omapdrm/tcm-sita.c1
-rw-r--r--drivers/gpu/drm/panel/Kconfig20
-rw-r--r--drivers/gpu/drm/panel/Makefile2
-rw-r--r--drivers/gpu/drm/panel/panel-dsi-cm.c665
-rw-r--r--drivers/gpu/drm/panel/panel-khadas-ts050.c870
-rw-r--r--drivers/gpu/drm/panel/panel-mantix-mlaf057we51.c39
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e63m0.c59
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c223
-rw-r--r--drivers/gpu/drm/panel/panel-sitronix-st7703.c24
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_devfreq.c1
-rw-r--r--drivers/gpu/drm/pl111/pl111_drv.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_dev.h16
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.h1
-rw-r--r--drivers/gpu/drm/qxl/qxl_ioctl.c3
-rw-r--r--drivers/gpu/drm/qxl/qxl_irq.c3
-rw-r--r--drivers/gpu/drm/qxl/qxl_kms.c1
-rw-r--r--drivers/gpu/drm/qxl/qxl_object.h2
-rw-r--r--drivers/gpu/drm/qxl/qxl_release.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_ttm.c1
-rw-r--r--drivers/gpu/drm/r128/r128_ioc32.c14
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c6
-rw-r--r--drivers/gpu/drm/radeon/r100.c27
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon.h36
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c89
-rw-r--r--drivers/gpu/drm/radeon/radeon_bios.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c55
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c17
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_i2c.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq_kms.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c24
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_encoders.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c13
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.h4
-rw-r--r--drivers/gpu/drm/radeon/radeon_prime.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_trace.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c18
-rw-r--r--drivers/gpu/drm/radeon/radeon_uvd.c38
-rw-r--r--drivers/gpu/drm/radeon/rs780_dpm.c7
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_cmm.c2
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_crtc.c13
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_drv.c33
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_drv.h16
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_encoder.c98
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_encoder.h2
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_kms.c42
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_plane.c8
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_vsp.c28
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_writeback.c2
-rw-r--r--drivers/gpu/drm/rockchip/Kconfig2
-rw-r--r--drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c2
-rw-r--r--drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c2
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.c1
-rw-r--r--drivers/gpu/drm/scheduler/sched_main.c123
-rw-r--r--drivers/gpu/drm/sti/sti_cursor.c9
-rw-r--r--drivers/gpu/drm/sti/sti_gdp.c9
-rw-r--r--drivers/gpu/drm/sti/sti_hqvdp.c9
-rw-r--r--drivers/gpu/drm/stm/ltdc.c1
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tcon.c21
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tcon.h1
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_csc.c109
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_mixer.h6
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_vi_layer.c2
-rw-r--r--drivers/gpu/drm/tdfx/tdfx_drv.c2
-rw-r--r--drivers/gpu/drm/tegra/dc.c2
-rw-r--r--drivers/gpu/drm/tegra/drm.c2
-rw-r--r--drivers/gpu/drm/tegra/dsi.c2
-rw-r--r--drivers/gpu/drm/tegra/falcon.c9
-rw-r--r--drivers/gpu/drm/tegra/gr2d.c9
-rw-r--r--drivers/gpu/drm/tegra/hdmi.c2
-rw-r--r--drivers/gpu/drm/tegra/hub.c2
-rw-r--r--drivers/gpu/drm/tegra/sor.c2
-rw-r--r--drivers/gpu/drm/tegra/vic.c35
-rw-r--r--drivers/gpu/drm/tiny/cirrus.c1
-rw-r--r--drivers/gpu/drm/ttm/ttm_agp_backend.c1
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c105
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c16
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c7
-rw-r--r--drivers/gpu/drm/ttm/ttm_execbuf_util.c4
-rw-r--r--drivers/gpu/drm/ttm/ttm_memory.c3
-rw-r--r--drivers/gpu/drm/ttm/ttm_module.c3
-rw-r--r--drivers/gpu/drm/ttm/ttm_module.h (renamed from include/drm/ttm/ttm_module.h)0
-rw-r--r--drivers/gpu/drm/ttm/ttm_pool.c35
-rw-r--r--drivers/gpu/drm/ttm/ttm_range_manager.c1
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c15
-rw-r--r--drivers/gpu/drm/tve200/tve200_display.c1
-rw-r--r--drivers/gpu/drm/tve200/tve200_drv.c1
-rw-r--r--drivers/gpu/drm/v3d/v3d_drv.c4
-rw-r--r--drivers/gpu/drm/v3d/v3d_irq.c7
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_drv.c11
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_irq.c4
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_main.c8
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_ttm.c7
-rw-r--r--drivers/gpu/drm/vc4/vc4_bo.c111
-rw-r--r--drivers/gpu/drm/vc4/vc4_crtc.c38
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.c19
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.h30
-rw-r--r--drivers/gpu/drm/vc4/vc4_dsi.c111
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.c166
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.h23
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi_phy.c8
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi_regs.h9
-rw-r--r--drivers/gpu/drm/vc4/vc4_hvs.c18
-rw-r--r--drivers/gpu/drm/vc4/vc4_kms.c248
-rw-r--r--drivers/gpu/drm/vc4/vc4_plane.c7
-rw-r--r--drivers/gpu/drm/vc4/vc4_txp.c11
-rw-r--r--drivers/gpu/drm/vgem/vgem_drv.c3
-rw-r--r--drivers/gpu/drm/via/via_irq.c2
-rw-r--r--drivers/gpu/drm/virtio/Kconfig3
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_debugfs.c24
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.c1
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.h1
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_fence.c81
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_gem.c8
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_vram.c3
-rw-r--r--drivers/gpu/drm/vkms/vkms_drv.c54
-rw-r--r--drivers/gpu/drm/vkms/vkms_drv.h12
-rw-r--r--drivers/gpu/drm/vkms/vkms_output.c13
-rw-r--r--drivers/gpu/drm/vmwgfx/Makefile6
-rw-r--r--drivers/gpu/drm/vmwgfx/ttm_object.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_binding.c52
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_blit.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_bo.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c (renamed from drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c)126
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c22
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_context.c40
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c14
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c236
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h94
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c26
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fb.c12
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.c28
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c15
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_irq.c10
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c76
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c9
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_marker.c155
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_mob.c16
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c28
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_shader.c27
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_so.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c36
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_streamoutput.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c47
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_thp.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c10
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front_gem.c4
-rw-r--r--drivers/gpu/drm/zte/zx_plane.c7
-rw-r--r--drivers/gpu/ipu-v3/ipu-di.c4
-rw-r--r--drivers/hid/Kconfig1
-rw-r--r--drivers/hid/amd-sfh-hid/amd_sfh_client.c8
-rw-r--r--drivers/hid/amd-sfh-hid/amd_sfh_hid.h2
-rw-r--r--drivers/hid/amd-sfh-hid/amd_sfh_pcie.c2
-rw-r--r--drivers/hid/amd-sfh-hid/amd_sfh_pcie.h2
-rw-r--r--drivers/hid/hid-ids.h1
-rw-r--r--drivers/hid/hid-input.c2
-rw-r--r--drivers/hid/hid-logitech-dj.c4
-rw-r--r--drivers/hid/hid-logitech-hidpp.c2
-rw-r--r--drivers/hid/hid-multitouch.c4
-rw-r--r--drivers/hid/hid-uclogic-params.c2
-rw-r--r--drivers/hid/hid-wiimote-core.c2
-rw-r--r--drivers/hid/wacom_sys.c35
-rw-r--r--drivers/hv/vmbus_drv.c2
-rw-r--r--drivers/hwmon/amd_energy.c3
-rw-r--r--drivers/hwmon/pwm-fan.c12
-rw-r--r--drivers/hwtracing/intel_th/pci.c5
-rw-r--r--drivers/hwtracing/stm/heartbeat.c6
-rw-r--r--drivers/i2c/busses/Kconfig1
-rw-r--r--drivers/i2c/busses/i2c-i801.c2
-rw-r--r--drivers/i2c/busses/i2c-imx.c20
-rw-r--r--drivers/i2c/busses/i2c-mt65xx.c27
-rw-r--r--drivers/i2c/busses/i2c-octeon-core.c2
-rw-r--r--drivers/i2c/busses/i2c-sprd.c8
-rw-r--r--drivers/i2c/busses/i2c-tegra-bpmp.c2
-rw-r--r--drivers/i2c/busses/i2c-tegra.c24
-rw-r--r--drivers/ide/ide-atapi.c1
-rw-r--r--drivers/ide/ide-io.c7
-rw-r--r--drivers/ide/ide-pm.c2
-rw-r--r--drivers/idle/intel_idle.c41
-rw-r--r--drivers/iio/adc/ti_am335x_adc.c6
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_trigger.c31
-rw-r--r--drivers/iio/dac/ad5504.c4
-rw-r--r--drivers/iio/proximity/sx9310.c5
-rw-r--r--drivers/iio/temperature/mlx90632.c6
-rw-r--r--drivers/infiniband/core/cma_configfs.c4
-rw-r--r--drivers/infiniband/core/restrack.c1
-rw-r--r--drivers/infiniband/core/ucma.c135
-rw-r--r--drivers/infiniband/core/umem.c2
-rw-r--r--drivers/infiniband/hw/mlx5/main.c4
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.c2
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_verbs.c3
-rw-r--r--drivers/interconnect/imx/imx.c3
-rw-r--r--drivers/interconnect/imx/imx8mq.c2
-rw-r--r--drivers/interconnect/qcom/Kconfig23
-rw-r--r--drivers/iommu/amd/init.c3
-rw-r--r--drivers/iommu/amd/iommu.c3
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c4
-rw-r--r--drivers/iommu/dma-iommu.c27
-rw-r--r--drivers/iommu/intel/dmar.c4
-rw-r--r--drivers/iommu/intel/iommu.c149
-rw-r--r--drivers/iommu/intel/irq_remapping.c2
-rw-r--r--drivers/iommu/intel/svm.c45
-rw-r--r--drivers/iommu/iova.c8
-rw-r--r--drivers/irqchip/Kconfig5
-rw-r--r--drivers/irqchip/irq-bcm2836.c4
-rw-r--r--drivers/irqchip/irq-loongson-liointc.c4
-rw-r--r--drivers/irqchip/irq-mips-cpu.c7
-rw-r--r--drivers/irqchip/irq-sl28cpld.c2
-rw-r--r--drivers/isdn/mISDN/Kconfig1
-rw-r--r--drivers/lightnvm/Kconfig1
-rw-r--r--drivers/lightnvm/core.c3
-rw-r--r--drivers/md/Kconfig2
-rw-r--r--drivers/md/bcache/features.c2
-rw-r--r--drivers/md/bcache/features.h30
-rw-r--r--drivers/md/bcache/super.c53
-rw-r--r--drivers/md/dm-bufio.c6
-rw-r--r--drivers/md/dm-crypt.c177
-rw-r--r--drivers/md/dm-integrity.c92
-rw-r--r--drivers/md/dm-raid.c6
-rw-r--r--drivers/md/dm-snap.c24
-rw-r--r--drivers/md/dm-table.c15
-rw-r--r--drivers/md/dm.c2
-rw-r--r--drivers/md/md.c2
-rw-r--r--drivers/misc/cardreader/rtsx_pcr.c7
-rw-r--r--drivers/misc/habanalabs/common/command_submission.c77
-rw-r--r--drivers/misc/habanalabs/common/device.c19
-rw-r--r--drivers/misc/habanalabs/common/firmware_if.c65
-rw-r--r--drivers/misc/habanalabs/common/habanalabs.h5
-rw-r--r--drivers/misc/habanalabs/common/habanalabs_drv.c1
-rw-r--r--drivers/misc/habanalabs/common/habanalabs_ioctl.c9
-rw-r--r--drivers/misc/habanalabs/common/hw_queue.c5
-rw-r--r--drivers/misc/habanalabs/common/memory.c10
-rw-r--r--drivers/misc/habanalabs/common/mmu.c6
-rw-r--r--drivers/misc/habanalabs/common/mmu_v1.c12
-rw-r--r--drivers/misc/habanalabs/common/pci.c28
-rw-r--r--drivers/misc/habanalabs/gaudi/gaudi.c194
-rw-r--r--drivers/misc/habanalabs/gaudi/gaudiP.h7
-rw-r--r--drivers/misc/habanalabs/gaudi/gaudi_coresight.c3
-rw-r--r--drivers/misc/habanalabs/goya/goya.c78
-rw-r--r--drivers/misc/habanalabs/include/common/hl_boot_if.h9
-rw-r--r--drivers/misc/mei/hdcp/mei_hdcp.c3
-rw-r--r--drivers/misc/pvpanic.c19
-rw-r--r--drivers/mmc/core/queue.c4
-rw-r--r--drivers/mmc/host/sdhci-brcmstb.c6
-rw-r--r--drivers/mmc/host/sdhci-of-dwcmshc.c27
-rw-r--r--drivers/mmc/host/sdhci-xenon.c7
-rw-r--r--drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c2
-rw-r--r--drivers/mtd/nand/raw/intel-nand-controller.c5
-rw-r--r--drivers/mtd/nand/raw/nandsim.c7
-rw-r--r--drivers/mtd/nand/raw/omap2.c15
-rw-r--r--drivers/mtd/nand/spi/core.c14
-rw-r--r--drivers/net/bareudp.c25
-rw-r--r--drivers/net/can/Kconfig1
-rw-r--r--drivers/net/can/dev.c4
-rw-r--r--drivers/net/can/m_can/m_can.c2
-rw-r--r--drivers/net/can/m_can/tcan4x5x.c26
-rw-r--r--drivers/net/can/rcar/Kconfig4
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c19
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_fd.c8
-rw-r--r--drivers/net/can/vxcan.c6
-rw-r--r--drivers/net/dsa/b53/b53_common.c2
-rw-r--r--drivers/net/dsa/hirschmann/Kconfig1
-rw-r--r--drivers/net/dsa/lantiq_gswip.c34
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1_vtu.c4
-rw-r--r--drivers/net/ethernet/aquantia/Kconfig1
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c7
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c38
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h7
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c3
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c8
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_tcb.h7
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls.h4
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c103
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_hw.c41
-rw-r--r--drivers/net/ethernet/ethoc.c3
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c1
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mii-fec.c1
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c3
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.h9
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ethtool.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c9
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c9
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h2
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c10
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h1
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c46
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c17
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c59
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h3
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c10
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.c2
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c4
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c4
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c29
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c38
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.c14
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c77
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c24
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/rdma.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_thermal.c13
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c23
-rw-r--r--drivers/net/ethernet/mscc/ocelot_net.c4
-rw-r--r--drivers/net/ethernet/natsemi/macsonic.c12
-rw-r--r--drivers/net/ethernet/natsemi/xtsonic.c7
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_txrx.c2
-rw-r--r--drivers/net/ethernet/qlogic/Kconfig1
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c7
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_fp.c5
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c6
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c30
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c129
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac5.c52
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c20
-rw-r--r--drivers/net/ethernet/ti/cpts.c2
-rw-r--r--drivers/net/ipa/gsi.c127
-rw-r--r--drivers/net/ipa/ipa_clock.c4
-rw-r--r--drivers/net/ipa/ipa_modem.c1
-rw-r--r--drivers/net/mdio/mdio-bitbang.c6
-rw-r--r--drivers/net/phy/smsc.c3
-rw-r--r--drivers/net/ppp/ppp_generic.c12
-rw-r--r--drivers/net/tun.c2
-rw-r--r--drivers/net/usb/Kconfig1
-rw-r--r--drivers/net/usb/cdc_ether.c7
-rw-r--r--drivers/net/usb/cdc_ncm.c23
-rw-r--r--drivers/net/usb/qmi_wwan.c1
-rw-r--r--drivers/net/usb/r8152.c1
-rw-r--r--drivers/net/usb/r8153_ecm.c8
-rw-r--r--drivers/net/usb/rndis_host.c2
-rw-r--r--drivers/net/virtio_net.c12
-rw-r--r--drivers/net/wan/Kconfig1
-rw-r--r--drivers/net/wan/hdlc_ppp.c7
-rw-r--r--drivers/net/wireless/ath/ath11k/core.c2
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_rx.c10
-rw-r--r--drivers/net/wireless/ath/ath11k/mac.c8
-rw-r--r--drivers/net/wireless/ath/ath11k/pci.c44
-rw-r--r--drivers/net/wireless/ath/ath11k/pci.h2
-rw-r--r--drivers/net/wireless/ath/ath11k/peer.c17
-rw-r--r--drivers/net/wireless/ath/ath11k/peer.h2
-rw-r--r--drivers/net/wireless/ath/ath11k/qmi.c24
-rw-r--r--drivers/net/wireless/ath/ath11k/qmi.h1
-rw-r--r--drivers/net/wireless/ath/ath11k/wmi.c3
-rw-r--r--drivers/net/wireless/ath/wil6210/Kconfig1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/init.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/sdio.c19
-rw-r--r--drivers/net/wireless/mediatek/mt76/usb.c9
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/core.c8
-rw-r--r--drivers/nvme/host/core.c36
-rw-r--r--drivers/nvme/host/fc.c15
-rw-r--r--drivers/nvme/host/nvme.h9
-rw-r--r--drivers/nvme/host/pci.c129
-rw-r--r--drivers/nvme/host/rdma.c15
-rw-r--r--drivers/nvme/host/tcp.c30
-rw-r--r--drivers/nvme/target/admin-cmd.c8
-rw-r--r--drivers/nvme/target/fcloop.c7
-rw-r--r--drivers/nvme/target/rdma.c26
-rw-r--r--drivers/opp/core.c9
-rw-r--r--drivers/pci/pci.c12
-rw-r--r--drivers/pci/pci.h1
-rw-r--r--drivers/perf/arm_pmu.c5
-rw-r--r--drivers/phy/ingenic/Makefile2
-rw-r--r--drivers/phy/mediatek/Kconfig4
-rw-r--r--drivers/phy/motorola/phy-cpcap-usb.c19
-rw-r--r--drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c2
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c4
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-nomadik.c1
-rw-r--r--drivers/pinctrl/pinctrl-ingenic.c80
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm.c96
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm.h2
-rw-r--r--drivers/platform/surface/Kconfig8
-rw-r--r--drivers/platform/surface/surface_gpe.c4
-rw-r--r--drivers/platform/x86/amd-pmc.c2
-rw-r--r--drivers/platform/x86/hp-wmi.c3
-rw-r--r--drivers/platform/x86/i2c-multi-instantiate.c31
-rw-r--r--drivers/platform/x86/ideapad-laptop.c15
-rw-r--r--drivers/platform/x86/intel-vbtn.c10
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c5
-rw-r--r--drivers/platform/x86/touchscreen_dmi.c18
-rw-r--r--drivers/ptp/Kconfig2
-rw-r--r--drivers/regulator/Kconfig1
-rw-r--r--drivers/regulator/bd718x7-regulator.c57
-rw-r--r--drivers/regulator/pf8x00-regulator.c8
-rw-r--r--drivers/regulator/qcom-rpmh-regulator.c2
-rw-r--r--drivers/s390/net/qeth_core.h3
-rw-r--r--drivers/s390/net/qeth_core_main.c38
-rw-r--r--drivers/s390/net/qeth_l2_main.c2
-rw-r--r--drivers/s390/net/qeth_l3_main.c4
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/Kconfig1
-rw-r--r--drivers/scsi/fnic/vnic_dev.c8
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas.h4
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_main.c11
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v2_hw.c66
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c8
-rw-r--r--drivers/scsi/libfc/fc_exch.c16
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c45
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c29
-rw-r--r--drivers/scsi/mpt3sas/Kconfig2
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c2
-rw-r--r--drivers/scsi/qedi/qedi_main.c4
-rw-r--r--drivers/scsi/scsi_debug.c5
-rw-r--r--drivers/scsi/scsi_lib.c27
-rw-r--r--drivers/scsi/scsi_transport_spi.c27
-rw-r--r--drivers/scsi/scsi_transport_srp.c9
-rw-r--r--drivers/scsi/sd.c6
-rw-r--r--drivers/scsi/ufs/Kconfig1
-rw-r--r--drivers/scsi/ufs/ufs-mediatek-trace.h2
-rw-r--r--drivers/scsi/ufs/ufs-mediatek.c21
-rw-r--r--drivers/scsi/ufs/ufs-mediatek.h1
-rw-r--r--drivers/scsi/ufs/ufs.h2
-rw-r--r--drivers/scsi/ufs/ufshcd-pci.c73
-rw-r--r--drivers/scsi/ufs/ufshcd.c102
-rw-r--r--drivers/scsi/ufs/ufshcd.h14
-rw-r--r--drivers/sh/intc/core.c2
-rw-r--r--drivers/sh/intc/virq-debugfs.c14
-rw-r--r--drivers/soc/litex/litex_soc_ctrl.c3
-rw-r--r--drivers/spi/spi-altera.c26
-rw-r--r--drivers/spi/spi-cadence.c6
-rw-r--r--drivers/spi/spi-fsl-spi.c5
-rw-r--r--drivers/spi/spi-geni-qcom.c84
-rw-r--r--drivers/spi/spi-stm32.c4
-rw-r--r--drivers/spi/spi.c11
-rw-r--r--drivers/staging/comedi/comedi_fops.c4
-rw-r--r--drivers/staging/hikey9xx/hisi-spmi-controller.c21
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_subdev.c20
-rw-r--r--drivers/staging/mt7621-dma/mtk-hsdma.c4
-rw-r--r--drivers/target/target_core_user.c11
-rw-r--r--drivers/target/target_core_xcopy.c119
-rw-r--r--drivers/target/target_core_xcopy.h1
-rw-r--r--drivers/thunderbolt/icm.c2
-rw-r--r--drivers/tty/Kconfig14
-rw-r--r--drivers/tty/Makefile3
-rw-r--r--drivers/tty/serial/mvebu-uart.c10
-rw-r--r--drivers/tty/serial/sifive.c1
-rw-r--r--drivers/tty/tty_io.c51
-rw-r--r--drivers/tty/ttynull.c18
-rw-r--r--drivers/usb/cdns3/cdns3-imx.c22
-rw-r--r--drivers/usb/chipidea/ci_hdrc_imx.c6
-rw-r--r--drivers/usb/class/cdc-acm.c4
-rw-r--r--drivers/usb/class/cdc-wdm.c16
-rw-r--r--drivers/usb/class/usblp.c21
-rw-r--r--drivers/usb/core/hcd.c8
-rw-r--r--drivers/usb/dwc3/core.h1
-rw-r--r--drivers/usb/dwc3/dwc3-meson-g12a.c2
-rw-r--r--drivers/usb/dwc3/gadget.c16
-rw-r--r--drivers/usb/dwc3/ulpi.c38
-rw-r--r--drivers/usb/gadget/Kconfig2
-rw-r--r--drivers/usb/gadget/composite.c10
-rw-r--r--drivers/usb/gadget/configfs.c19
-rw-r--r--drivers/usb/gadget/function/f_printer.c1
-rw-r--r--drivers/usb/gadget/function/f_uac2.c69
-rw-r--r--drivers/usb/gadget/function/u_ether.c9
-rw-r--r--drivers/usb/gadget/legacy/acm_ms.c4
-rw-r--r--drivers/usb/gadget/udc/Kconfig2
-rw-r--r--drivers/usb/gadget/udc/Makefile1
-rw-r--r--drivers/usb/gadget/udc/aspeed-vhub/epn.c5
-rw-r--r--drivers/usb/gadget/udc/bdc/Kconfig2
-rw-r--r--drivers/usb/gadget/udc/core.c16
-rw-r--r--drivers/usb/gadget/udc/dummy_hcd.c45
-rw-r--r--drivers/usb/gadget/udc/fsl_mxc_udc.c122
-rw-r--r--drivers/usb/host/ehci-hcd.c12
-rw-r--r--drivers/usb/host/ehci-hub.c3
-rw-r--r--drivers/usb/host/xhci-ring.c2
-rw-r--r--drivers/usb/host/xhci-tegra.c7
-rw-r--r--drivers/usb/host/xhci.c24
-rw-r--r--drivers/usb/misc/yurex.c3
-rw-r--r--drivers/usb/serial/iuu_phoenix.c20
-rw-r--r--drivers/usb/serial/option.c3
-rw-r--r--drivers/usb/storage/unusual_uas.h7
-rw-r--r--drivers/usb/typec/altmodes/Kconfig2
-rw-r--r--drivers/usb/typec/class.c2
-rw-r--r--drivers/usb/typec/mux/intel_pmc_mux.c11
-rw-r--r--drivers/usb/usbip/vhci_hcd.c2
-rw-r--r--drivers/vhost/net.c6
-rw-r--r--drivers/vhost/vsock.c68
-rw-r--r--drivers/video/fbdev/Kconfig2
-rw-r--r--drivers/video/fbdev/aty/atyfb_base.c11
-rw-r--r--drivers/video/fbdev/aty/mach64_ct.c15
-rw-r--r--drivers/video/fbdev/aty/radeon_monitor.c4
-rw-r--r--drivers/video/fbdev/bw2.c2
-rw-r--r--drivers/video/fbdev/cg3.c2
-rw-r--r--drivers/video/fbdev/cg6.c2
-rw-r--r--drivers/video/fbdev/cirrusfb.c20
-rw-r--r--drivers/video/fbdev/controlfb.c4
-rw-r--r--drivers/video/fbdev/core/fb_notify.c7
-rw-r--r--drivers/video/fbdev/core/fbcon.c25
-rw-r--r--drivers/video/fbdev/core/fbmon.c2
-rw-r--r--drivers/video/fbdev/efifb.c3
-rw-r--r--drivers/video/fbdev/ffb.c2
-rw-r--r--drivers/video/fbdev/gbefb.c4
-rw-r--r--drivers/video/fbdev/goldfishfb.c2
-rw-r--r--drivers/video/fbdev/hgafb.c10
-rw-r--r--drivers/video/fbdev/leo.c2
-rw-r--r--drivers/video/fbdev/mmp/hw/mmp_spi.c2
-rw-r--r--drivers/video/fbdev/mx3fb.c13
-rw-r--r--drivers/video/fbdev/neofb.c4
-rw-r--r--drivers/video/fbdev/nvidia/nv_setup.c7
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/Kconfig1
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/dispc-compat.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/dsi.c14
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/hdmi4_core.c4
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/hdmi5_core.c4
-rw-r--r--drivers/video/fbdev/p9100.c2
-rw-r--r--drivers/video/fbdev/pm2fb.c8
-rw-r--r--drivers/video/fbdev/riva/fbdev.c9
-rw-r--r--drivers/video/fbdev/riva/riva_hw.c28
-rw-r--r--drivers/video/fbdev/s1d13xxxfb.c3
-rw-r--r--drivers/video/fbdev/s3c-fb.c11
-rw-r--r--drivers/video/fbdev/sis/init.c33
-rw-r--r--drivers/video/fbdev/sis/oem310.h2
-rw-r--r--drivers/video/fbdev/sis/sis.h1
-rw-r--r--drivers/video/fbdev/sis/sis_main.c9
-rw-r--r--drivers/video/fbdev/sstfb.c2
-rw-r--r--drivers/video/fbdev/tcx.c2
-rw-r--r--drivers/video/fbdev/tdfxfb.c4
-rw-r--r--drivers/video/fbdev/tgafb.c7
-rw-r--r--drivers/video/fbdev/udlfb.c1
-rw-r--r--drivers/video/fbdev/uvesafb.c6
-rw-r--r--drivers/video/fbdev/via/lcd.c4
-rw-r--r--drivers/video/fbdev/wmt_ge_rops.c1
-rw-r--r--drivers/video/of_display_timing.c1
-rw-r--r--drivers/video/of_videomode.c6
-rw-r--r--drivers/xen/events/events_base.c10
-rw-r--r--drivers/xen/platform-pci.c8
-rw-r--r--drivers/xen/privcmd.c25
-rw-r--r--drivers/xen/xenbus/xenbus.h1
-rw-r--r--drivers/xen/xenbus/xenbus_comms.c8
-rw-r--r--drivers/xen/xenbus/xenbus_probe.c81
-rw-r--r--fs/afs/dir.c49
-rw-r--r--fs/afs/dir_edit.c6
-rw-r--r--fs/afs/xdr_fs.h25
-rw-r--r--fs/block_dev.c9
-rw-r--r--fs/btrfs/backref.c2
-rw-r--r--fs/btrfs/block-group.c3
-rw-r--r--fs/btrfs/btrfs_inode.h9
-rw-r--r--fs/btrfs/ctree.c24
-rw-r--r--fs/btrfs/ctree.h29
-rw-r--r--fs/btrfs/dev-replace.c2
-rw-r--r--fs/btrfs/discard.c70
-rw-r--r--fs/btrfs/disk-io.c15
-rw-r--r--fs/btrfs/extent-tree.c12
-rw-r--r--fs/btrfs/extent_io.c4
-rw-r--r--fs/btrfs/file-item.c2
-rw-r--r--fs/btrfs/inode.c69
-rw-r--r--fs/btrfs/ioctl.c2
-rw-r--r--fs/btrfs/print-tree.c10
-rw-r--r--fs/btrfs/print-tree.h2
-rw-r--r--fs/btrfs/qgroup.c43
-rw-r--r--fs/btrfs/reflink.c15
-rw-r--r--fs/btrfs/relocation.c7
-rw-r--r--fs/btrfs/send.c64
-rw-r--r--fs/btrfs/space-info.c4
-rw-r--r--fs/btrfs/super.c40
-rw-r--r--fs/btrfs/tests/btrfs-tests.c10
-rw-r--r--fs/btrfs/tests/inode-tests.c9
-rw-r--r--fs/btrfs/transaction.c8
-rw-r--r--fs/btrfs/tree-checker.c7
-rw-r--r--fs/btrfs/volumes.c6
-rw-r--r--fs/cachefiles/rdwr.c2
-rw-r--r--fs/ceph/mds_client.c87
-rw-r--r--fs/cifs/connect.c6
-rw-r--r--fs/cifs/dfs_cache.c3
-rw-r--r--fs/cifs/fs_context.c4
-rw-r--r--fs/cifs/smb2pdu.c2
-rw-r--r--fs/cifs/smb2pdu.h2
-rw-r--r--fs/cifs/transport.c4
-rw-r--r--fs/ext4/ext4_jbd2.c17
-rw-r--r--fs/ext4/ext4_jbd2.h5
-rw-r--r--fs/ext4/fast_commit.c35
-rw-r--r--fs/ext4/file.c7
-rw-r--r--fs/ext4/inode.c6
-rw-r--r--fs/ext4/ioctl.c3
-rw-r--r--fs/ext4/namei.c27
-rw-r--r--fs/ext4/resize.c20
-rw-r--r--fs/ext4/super.c190
-rw-r--r--fs/ext4/xattr.c5
-rw-r--r--fs/file.c2
-rw-r--r--fs/fs-writeback.c24
-rw-r--r--fs/io_uring.c421
-rw-r--r--fs/kernfs/file.c65
-rw-r--r--fs/namespace.c7
-rw-r--r--fs/nfs/delegation.c12
-rw-r--r--fs/nfs/internal.h38
-rw-r--r--fs/nfs/nfs4proc.c28
-rw-r--r--fs/nfs/nfs4super.c4
-rw-r--r--fs/nfs/pnfs.c67
-rw-r--r--fs/nfs/pnfs.h8
-rw-r--r--fs/nfs/pnfs_nfs.c22
-rw-r--r--fs/nfsd/nfs3xdr.c7
-rw-r--r--fs/nfsd/nfs4proc.c5
-rw-r--r--fs/nfsd/nfs4xdr.c56
-rw-r--r--fs/nfsd/nfssvc.c6
-rw-r--r--fs/nfsd/xdr4.h1
-rw-r--r--fs/notify/fanotify/fanotify_user.c17
-rw-r--r--fs/proc/proc_sysctl.c7
-rw-r--r--fs/proc/task_mmu.c53
-rw-r--r--fs/select.c14
-rw-r--r--fs/udf/super.c7
-rw-r--r--fs/zonefs/Kconfig1
-rw-r--r--include/asm-generic/Kbuild1
-rw-r--r--include/asm-generic/bitops/atomic.h6
-rw-r--r--include/drm/drm_agpsupport.h18
-rw-r--r--include/drm/drm_atomic.h20
-rw-r--r--include/drm/drm_atomic_helper.h4
-rw-r--r--include/drm/drm_connector.h49
-rw-r--r--include/drm/drm_crtc.h33
-rw-r--r--include/drm/drm_device.h23
-rw-r--r--include/drm/drm_dp_helper.h239
-rw-r--r--include/drm/drm_dp_mst_helper.h1
-rw-r--r--include/drm/drm_drv.h2
-rw-r--r--include/drm/drm_dsc.h1
-rw-r--r--include/drm/drm_edid.h30
-rw-r--r--include/drm/drm_encoder.h32
-rw-r--r--include/drm/drm_gem.h3
-rw-r--r--include/drm/drm_gem_cma_helper.h14
-rw-r--r--include/drm/drm_hdcp.h8
-rw-r--r--include/drm/drm_irq.h2
-rw-r--r--include/drm/drm_legacy.h10
-rw-r--r--include/drm/drm_managed.h2
-rw-r--r--include/drm/drm_mipi_dbi.h2
-rw-r--r--include/drm/drm_modes.h10
-rw-r--r--include/drm/drm_modeset_helper_vtables.h29
-rw-r--r--include/drm/drm_plane.h42
-rw-r--r--include/drm/drm_prime.h7
-rw-r--r--include/drm/drm_property.h2
-rw-r--r--include/drm/drm_rect.h13
-rw-r--r--include/drm/drm_simple_kms_helper.h24
-rw-r--r--include/drm/gpu_scheduler.h14
-rw-r--r--include/drm/ttm/ttm_bo_api.h13
-rw-r--r--include/drm/ttm/ttm_bo_driver.h6
-rw-r--r--include/drm/ttm/ttm_resource.h5
-rw-r--r--include/drm/ttm/ttm_tt.h2
-rw-r--r--include/kvm/arm_pmu.h2
-rw-r--r--include/linux/acpi.h7
-rw-r--r--include/linux/blk-mq.h4
-rw-r--r--include/linux/blkdev.h18
-rw-r--r--include/linux/build_bug.h5
-rw-r--r--include/linux/ceph/msgr.h4
-rw-r--r--include/linux/compiler-gcc.h6
-rw-r--r--include/linux/compiler_attributes.h6
-rw-r--r--include/linux/compiler_types.h6
-rw-r--r--include/linux/console.h3
-rw-r--r--include/linux/device.h12
-rw-r--r--include/linux/dm-bufio.h1
-rw-r--r--include/linux/dma-buf.h45
-rw-r--r--include/linux/intel-iommu.h18
-rw-r--r--include/linux/kasan.h6
-rw-r--r--include/linux/kcov.h21
-rw-r--r--include/linux/kdev_t.h22
-rw-r--r--include/linux/kthread.h3
-rw-r--r--include/linux/ktime.h1
-rw-r--r--include/linux/mdio-bitbang.h3
-rw-r--r--include/linux/memcontrol.h2
-rw-r--r--include/linux/mlx5/mlx5_ifc.h3
-rw-r--r--include/linux/mm.h12
-rw-r--r--include/linux/nvme.h6
-rw-r--r--include/linux/pci.h9
-rw-r--r--include/linux/perf/arm_pmu.h2
-rw-r--r--include/linux/rcupdate.h6
-rw-r--r--include/linux/sizes.h3
-rw-r--r--include/linux/skbuff.h3
-rw-r--r--include/linux/syscalls.h24
-rw-r--r--include/linux/timekeeping32.h14
-rw-r--r--include/linux/usb/usbnet.h2
-rw-r--r--include/linux/vgaarb.h6
-rw-r--r--include/net/cfg80211.h5
-rw-r--r--include/net/inet_connection_sock.h3
-rw-r--r--include/net/mac80211.h1
-rw-r--r--include/net/red.h4
-rw-r--r--include/net/sock.h17
-rw-r--r--include/net/xdp_sock.h4
-rw-r--r--include/net/xsk_buff_pool.h5
-rw-r--r--include/soc/nps/common.h172
-rw-r--r--include/soc/nps/mtm.h59
-rw-r--r--include/trace/events/afs.h2
-rw-r--r--include/trace/events/sched.h2
-rw-r--r--include/trace/events/sunrpc.h59
-rw-r--r--include/uapi/drm/drm.h97
-rw-r--r--include/uapi/drm/drm_fourcc.h23
-rw-r--r--include/uapi/drm/drm_mode.h13
-rw-r--r--include/uapi/drm/i915_drm.h3
-rw-r--r--include/uapi/linux/bcache.h2
-rw-r--r--include/uapi/linux/if_link.h5
-rw-r--r--include/uapi/linux/kvm.h2
-rw-r--r--include/uapi/linux/netfilter/nf_tables.h3
-rw-r--r--include/uapi/linux/ppp-ioctl.h2
-rw-r--r--include/uapi/misc/habanalabs.h5
-rw-r--r--include/video/sstfb.h4
-rw-r--r--include/xen/xenbus.h2
-rw-r--r--init/main.c11
-rw-r--r--kernel/bpf/bpf_inode_storage.c9
-rw-r--r--kernel/bpf/bpf_task_storage.c5
-rw-r--r--kernel/bpf/btf.c2
-rw-r--r--kernel/bpf/cgroup.c5
-rw-r--r--kernel/bpf/hashtab.c1
-rw-r--r--kernel/bpf/helpers.c2
-rw-r--r--kernel/bpf/syscall.c7
-rw-r--r--kernel/bpf/task_iter.c17
-rw-r--r--kernel/bpf/verifier.c8
-rw-r--r--kernel/cgroup/cgroup-v1.c2
-rw-r--r--kernel/cgroup/cgroup.c30
-rw-r--r--kernel/configs/android-recommended.config1
-rw-r--r--kernel/exit.c2
-rw-r--r--kernel/fork.c6
-rw-r--r--kernel/irq/manage.c1
-rw-r--r--kernel/irq/msi.c2
-rw-r--r--kernel/kthread.c29
-rw-r--r--kernel/locking/lockdep.c9
-rw-r--r--kernel/printk/printk.c40
-rw-r--r--kernel/printk/printk_ringbuffer.c2
-rw-r--r--kernel/rcu/tasks.h25
-rw-r--r--kernel/sched/core.c111
-rw-r--r--kernel/sched/sched.h1
-rw-r--r--kernel/signal.c6
-rw-r--r--kernel/smpboot.c1
-rw-r--r--kernel/time/ntp.c4
-rw-r--r--kernel/time/timekeeping.c3
-rw-r--r--kernel/trace/Kconfig2
-rw-r--r--kernel/trace/trace_kprobe.c2
-rw-r--r--kernel/workqueue.c35
-rw-r--r--lib/Kconfig.debug8
-rw-r--r--lib/Kconfig.ubsan1
-rw-r--r--lib/fonts/font_ter16x32.c6
-rw-r--r--lib/genalloc.c25
-rw-r--r--lib/iov_iter.c2
-rw-r--r--lib/raid6/Makefile2
-rw-r--r--lib/zlib_dfltcc/Makefile2
-rw-r--r--lib/zlib_dfltcc/dfltcc.c6
-rw-r--r--lib/zlib_dfltcc/dfltcc_deflate.c3
-rw-r--r--lib/zlib_dfltcc/dfltcc_inflate.c4
-rw-r--r--lib/zlib_dfltcc/dfltcc_syms.c17
-rw-r--r--mm/highmem.c7
-rw-r--r--mm/hugetlb.c24
-rw-r--r--mm/kasan/generic.c2
-rw-r--r--mm/kasan/hw_tags.c77
-rw-r--r--mm/kasan/init.c26
-rw-r--r--mm/memblock.c2
-rw-r--r--mm/memcontrol.c4
-rw-r--r--mm/memory-failure.c22
-rw-r--r--mm/memory.c8
-rw-r--r--mm/memory_hotplug.c2
-rw-r--r--mm/mempolicy.c2
-rw-r--r--mm/migrate.c23
-rw-r--r--mm/mremap.c4
-rw-r--r--mm/page-writeback.c2
-rw-r--r--mm/page_alloc.c125
-rw-r--r--mm/process_vm_access.c1
-rw-r--r--mm/slub.c14
-rw-r--r--mm/vmalloc.c4
-rw-r--r--mm/vmscan.c2
-rw-r--r--net/8021q/vlan.c3
-rw-r--r--net/bpf/test_run.c3
-rw-r--r--net/can/isotp.c1
-rw-r--r--net/ceph/auth_x.c57
-rw-r--r--net/ceph/crypto.c3
-rw-r--r--net/ceph/messenger_v1.c2
-rw-r--r--net/ceph/messenger_v2.c60
-rw-r--r--net/ceph/mon_client.c14
-rw-r--r--net/ceph/osd_client.c40
-rw-r--r--net/core/dev.c42
-rw-r--r--net/core/devlink.c4
-rw-r--r--net/core/gen_estimator.c11
-rw-r--r--net/core/neighbour.c6
-rw-r--r--net/core/net-sysfs.c65
-rw-r--r--net/core/rtnetlink.c23
-rw-r--r--net/core/skbuff.c63
-rw-r--r--net/core/sock_reuseport.c2
-rw-r--r--net/dcb/dcbnl.c2
-rw-r--r--net/dsa/dsa2.c4
-rw-r--r--net/dsa/master.c10
-rw-r--r--net/ipv4/esp4.c7
-rw-r--r--net/ipv4/fib_frontend.c2
-rw-r--r--net/ipv4/gre_demux.c2
-rw-r--r--net/ipv4/inet_connection_sock.c1
-rw-r--r--net/ipv4/ip_output.c2
-rw-r--r--net/ipv4/ip_tunnel.c11
-rw-r--r--net/ipv4/netfilter/arp_tables.c2
-rw-r--r--net/ipv4/netfilter/ip_tables.c2
-rw-r--r--net/ipv4/netfilter/ipt_rpfilter.c2
-rw-r--r--net/ipv4/nexthop.c6
-rw-r--r--net/ipv4/tcp.c1
-rw-r--r--net/ipv4/tcp_input.c6
-rw-r--r--net/ipv4/tcp_ipv4.c29
-rw-r--r--net/ipv4/tcp_output.c1
-rw-r--r--net/ipv4/tcp_timer.c36
-rw-r--r--net/ipv4/udp.c3
-rw-r--r--net/ipv6/addrconf.c3
-rw-r--r--net/ipv6/esp6.c7
-rw-r--r--net/ipv6/ip6_fib.c5
-rw-r--r--net/ipv6/ip6_output.c41
-rw-r--r--net/ipv6/netfilter/ip6_tables.c2
-rw-r--r--net/ipv6/sit.c5
-rw-r--r--net/lapb/lapb_iface.c1
-rw-r--r--net/mac80211/debugfs.c44
-rw-r--r--net/mac80211/rx.c2
-rw-r--r--net/mac80211/tx.c31
-rw-r--r--net/mptcp/protocol.c74
-rw-r--r--net/ncsi/ncsi-rsp.c2
-rw-r--r--net/netfilter/ipset/ip_set_hash_gen.h42
-rw-r--r--net/netfilter/nf_conntrack_standalone.c3
-rw-r--r--net/netfilter/nf_nat_core.c1
-rw-r--r--net/netfilter/nf_tables_api.c10
-rw-r--r--net/netfilter/nft_dynset.c15
-rw-r--r--net/netfilter/xt_RATEEST.c3
-rw-r--r--net/nfc/nci/core.c2
-rw-r--r--net/packet/af_packet.c4
-rw-r--r--net/qrtr/ns.c7
-rw-r--r--net/qrtr/qrtr.c16
-rw-r--r--net/qrtr/qrtr.h2
-rw-r--r--net/rxrpc/input.c2
-rw-r--r--net/rxrpc/key.c6
-rw-r--r--net/sched/cls_flower.c22
-rw-r--r--net/sched/cls_tcindex.c8
-rw-r--r--net/sched/sch_api.c3
-rw-r--r--net/sched/sch_choke.c2
-rw-r--r--net/sched/sch_gred.c2
-rw-r--r--net/sched/sch_red.c2
-rw-r--r--net/sched/sch_sfq.c2
-rw-r--r--net/sched/sch_taprio.c7
-rw-r--r--net/smc/smc_core.c20
-rw-r--r--net/smc/smc_ib.c6
-rw-r--r--net/smc/smc_ism.c3
-rw-r--r--net/sunrpc/addr.c2
-rw-r--r--net/sunrpc/svc_xprt.c4
-rw-r--r--net/sunrpc/svcsock.c86
-rw-r--r--net/tipc/link.c11
-rw-r--r--net/tipc/node.c2
-rw-r--r--net/wireless/Kconfig1
-rw-r--r--net/wireless/reg.c11
-rw-r--r--net/xdp/xsk.c20
-rw-r--r--net/xdp/xsk_buff_pool.c3
-rw-r--r--net/xdp/xsk_queue.h5
-rwxr-xr-xscripts/checkpatch.pl6
-rwxr-xr-xscripts/config1
-rwxr-xr-xscripts/depmod.sh2
-rw-r--r--scripts/gcc-plugins/Makefile4
-rw-r--r--scripts/kconfig/Makefile10
-rwxr-xr-xscripts/kconfig/mconf-cfg.sh4
-rw-r--r--security/lsm_audit.c7
-rw-r--r--sound/core/seq/oss/seq_oss_synth.c3
-rw-r--r--sound/firewire/fireface/ff-transaction.c2
-rw-r--r--sound/firewire/tascam/tascam-transaction.c2
-rw-r--r--sound/pci/hda/hda_codec.c24
-rw-r--r--sound/pci/hda/hda_intel.c17
-rw-r--r--sound/pci/hda/hda_tegra.c2
-rw-r--r--sound/pci/hda/patch_conexant.c1
-rw-r--r--sound/pci/hda/patch_hdmi.c3
-rw-r--r--sound/pci/hda/patch_realtek.c24
-rw-r--r--sound/pci/hda/patch_via.c14
-rw-r--r--sound/soc/amd/raven/pci-acp3x.c16
-rw-r--r--sound/soc/amd/renoir/rn-pci-acp3x.c14
-rw-r--r--sound/soc/atmel/Kconfig4
-rw-r--r--sound/soc/codecs/Kconfig2
-rw-r--r--sound/soc/codecs/hdmi-codec.c2
-rw-r--r--sound/soc/codecs/max98373-i2c.c20
-rw-r--r--sound/soc/codecs/max98373-sdw.c20
-rw-r--r--sound/soc/codecs/max98373.c34
-rw-r--r--sound/soc/codecs/max98373.h8
-rw-r--r--sound/soc/codecs/rt711.c6
-rw-r--r--sound/soc/fsl/imx-hdmi.c3
-rw-r--r--sound/soc/intel/boards/haswell.c1
-rw-r--r--sound/soc/intel/skylake/cnl-sst.c1
-rw-r--r--sound/soc/meson/axg-tdm-interface.c14
-rw-r--r--sound/soc/meson/axg-tdmin.c13
-rw-r--r--sound/soc/qcom/lpass-cpu.c20
-rw-r--r--sound/soc/qcom/lpass-platform.c50
-rw-r--r--sound/soc/sh/rcar/adg.c18
-rw-r--r--sound/soc/soc-dapm.c1
-rw-r--r--sound/soc/sof/Kconfig2
-rw-r--r--sound/soc/sof/intel/hda-codec.c37
-rw-r--r--sound/soc/sof/intel/hda-dsp.c9
-rw-r--r--sound/soc/sof/intel/hda.h2
-rw-r--r--sound/usb/card.c5
-rw-r--r--sound/usb/card.h3
-rw-r--r--sound/usb/clock.c21
-rw-r--r--sound/usb/endpoint.c93
-rw-r--r--sound/usb/endpoint.h2
-rw-r--r--sound/usb/implicit.c62
-rw-r--r--sound/usb/midi.c4
-rw-r--r--sound/usb/pcm.c174
-rw-r--r--sound/usb/quirks-table.h6
-rw-r--r--sound/usb/quirks.c86
-rw-r--r--sound/usb/usbaudio.h1
-rwxr-xr-xtools/bootconfig/scripts/bconf2ftrace.sh1
-rwxr-xr-xtools/bootconfig/scripts/ftrace2bconf.sh4
-rw-r--r--tools/bpf/bpftool/net.c1
-rw-r--r--tools/bpf/resolve_btfids/main.c17
-rw-r--r--tools/gpio/gpio-event-mon.c4
-rw-r--r--tools/gpio/gpio-watch.c5
-rw-r--r--tools/include/linux/build_bug.h5
-rw-r--r--tools/include/uapi/linux/kvm.h2
-rw-r--r--tools/lib/bpf/btf.c5
-rw-r--r--tools/lib/perf/evlist.c17
-rw-r--r--tools/lib/perf/tests/test-cpumap.c2
-rw-r--r--tools/lib/perf/tests/test-evlist.c7
-rw-r--r--tools/lib/perf/tests/test-evsel.c2
-rw-r--r--tools/lib/perf/tests/test-threadmap.c2
-rw-r--r--tools/objtool/check.c14
-rw-r--r--tools/objtool/elf.c14
-rw-r--r--tools/perf/builtin-script.c18
-rw-r--r--tools/perf/examples/bpf/5sec.c2
-rwxr-xr-xtools/perf/tests/shell/stat+shadow_stat.sh30
-rw-r--r--tools/perf/util/header.c8
-rw-r--r--tools/perf/util/machine.c4
-rw-r--r--tools/perf/util/metricgroup.c16
-rw-r--r--tools/perf/util/session.c2
-rw-r--r--tools/perf/util/stat-shadow.c366
-rw-r--r--tools/power/x86/intel-speed-select/isst-config.c32
-rwxr-xr-xtools/testing/kunit/kunit.py34
-rw-r--r--tools/testing/kunit/kunit_config.py7
-rw-r--r--tools/testing/kunit/kunit_json.py2
-rw-r--r--tools/testing/kunit/kunit_kernel.py56
-rw-r--r--tools/testing/kunit/kunit_parser.py81
-rw-r--r--tools/testing/selftests/Makefile6
-rw-r--r--tools/testing/selftests/arm64/fp/fpsimd-test.S2
-rw-r--r--tools/testing/selftests/arm64/fp/sve-test.S2
-rw-r--r--tools/testing/selftests/bpf/Makefile3
-rw-r--r--tools/testing/selftests/bpf/prog_tests/test_local_storage.c96
-rw-r--r--tools/testing/selftests/bpf/progs/bprm_opts.c2
-rw-r--r--tools/testing/selftests/bpf/progs/local_storage.c62
-rw-r--r--tools/testing/selftests/bpf/test_maps.c48
-rw-r--r--tools/testing/selftests/bpf/test_verifier.c12
-rw-r--r--tools/testing/selftests/bpf/verifier/spill_fill.c30
-rw-r--r--tools/testing/selftests/bpf/xdpxceiver.c4
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/qos_pfc.sh2
-rw-r--r--tools/testing/selftests/kvm/Makefile2
-rw-r--r--tools/testing/selftests/kvm/demand_paging_test.c118
-rw-r--r--tools/testing/selftests/kvm/dirty_log_perf_test.c145
-rw-r--r--tools/testing/selftests/kvm/dirty_log_test.c125
-rw-r--r--tools/testing/selftests/kvm/include/guest_modes.h21
-rw-r--r--tools/testing/selftests/kvm/include/kvm_util.h9
-rw-r--r--tools/testing/selftests/kvm/include/perf_test_util.h167
-rw-r--r--tools/testing/selftests/kvm/lib/guest_modes.c70
-rw-r--r--tools/testing/selftests/kvm/lib/kvm_util.c9
-rw-r--r--tools/testing/selftests/kvm/lib/perf_test_util.c134
-rwxr-xr-xtools/testing/selftests/net/fib_nexthops.sh2
-rwxr-xr-xtools/testing/selftests/net/fib_tests.sh1
-rwxr-xr-xtools/testing/selftests/net/pmtu.sh71
-rw-r--r--tools/testing/selftests/net/tls.c4
-rwxr-xr-xtools/testing/selftests/net/udpgro.sh34
-rw-r--r--tools/testing/selftests/netfilter/Makefile3
-rwxr-xr-xtools/testing/selftests/netfilter/ipip-conntrack-mtu.sh206
-rwxr-xr-xtools/testing/selftests/netfilter/nft_conntrack_helper.sh12
-rw-r--r--tools/testing/selftests/powerpc/alignment/alignment_handler.c5
-rw-r--r--tools/testing/selftests/powerpc/mm/pkey_exec_prot.c2
-rw-r--r--tools/testing/selftests/powerpc/mm/pkey_siginfo.c2
-rw-r--r--tools/testing/selftests/vDSO/.gitignore3
-rw-r--r--tools/testing/selftests/vDSO/vdso_test_correctness.c2
-rw-r--r--tools/testing/selftests/vm/Makefile10
-rw-r--r--tools/testing/selftests/wireguard/qemu/debug.config1
-rw-r--r--virt/kvm/kvm_main.c3
2015 files changed, 54291 insertions, 38318 deletions
diff --git a/.mailmap b/.mailmap
index 632700cee55c..b1ab0129c7d6 100644
--- a/.mailmap
+++ b/.mailmap
@@ -55,6 +55,8 @@ Bart Van Assche <[email protected]> <[email protected]>
Ben Gardner <[email protected]>
Ben M Cahill <[email protected]>
Björn Steinbrink <[email protected]>
diff --git a/CREDITS b/CREDITS
index 090ed4b004a5..9add7e6a4fa0 100644
--- a/CREDITS
+++ b/CREDITS
@@ -710,6 +710,10 @@ S: Las Cuevas 2385 - Bo Guemes
S: Las Heras, Mendoza CP 5539
S: Argentina
+N: Jay Cliburn
+D: ATLX Ethernet drivers
+
N: Steven P. Cole
@@ -1284,6 +1288,10 @@ D: Major kbuild rework during the 2.5 cycle
D: ISDN Maintainer
S: USA
+N: Gerrit Renker
+D: DCCP protocol support.
+
N: Philip Gladstone
D: Kernel / timekeeping stuff
@@ -2138,6 +2146,10 @@ E: [email protected]
D: Original author of software suspend
+N: Alexey Kuznetsov
+D: Author and maintainer of large parts of the networking stack
+
N: Jaroslav Kysela
W: https://www.perex.cz
@@ -2696,6 +2708,10 @@ N: Wolfgang Muees
D: Auerswald USB driver
+N: Shrijeet Mukherjee
+D: Network routing domains (VRF).
+
N: Paul Mundt
D: SuperH maintainer
@@ -4110,6 +4126,10 @@ S: B-1206 Jingmao Guojigongyu
S: 16 Baliqiao Nanjie, Beijing 101100
S: People's Repulic of China
+N: Aviad Yehezkel
+D: Kernel TLS implementation and offload support.
+
N: Victor Yodaiken
D: RTLinux (RealTime Linux)
@@ -4167,6 +4187,10 @@ S: 1507 145th Place SE #B5
S: Bellevue, Washington 98007
S: USA
+N: Wensong Zhang
+D: IP virtual server (IPVS).
+
N: Haojian Zhuang
D: MMP support
diff --git a/Documentation/ABI/testing/sysfs-class-devlink b/Documentation/ABI/testing/sysfs-class-devlink
index b662f747c83e..8a21ce515f61 100644
--- a/Documentation/ABI/testing/sysfs-class-devlink
+++ b/Documentation/ABI/testing/sysfs-class-devlink
@@ -5,8 +5,8 @@ Description:
Provide a place in sysfs for the device link objects in the
kernel at any given time. The name of a device link directory,
denoted as ... above, is of the form <supplier>--<consumer>
- where <supplier> is the supplier device name and <consumer> is
- the consumer device name.
+ where <supplier> is the supplier bus:device name and <consumer>
+ is the consumer bus:device name.
What: /sys/class/devlink/.../auto_remove_on
Date: May 2020
diff --git a/Documentation/ABI/testing/sysfs-devices-consumer b/Documentation/ABI/testing/sysfs-devices-consumer
index 1f06d74d1c3c..0809fda092e6 100644
--- a/Documentation/ABI/testing/sysfs-devices-consumer
+++ b/Documentation/ABI/testing/sysfs-devices-consumer
@@ -4,5 +4,6 @@ Contact: Saravana Kannan <[email protected]>
Description:
The /sys/devices/.../consumer:<consumer> are symlinks to device
links where this device is the supplier. <consumer> denotes the
- name of the consumer in that device link. There can be zero or
- more of these symlinks for a given device.
+ name of the consumer in that device link and is of the form
+ bus:device name. There can be zero or more of these symlinks
+ for a given device.
diff --git a/Documentation/ABI/testing/sysfs-devices-supplier b/Documentation/ABI/testing/sysfs-devices-supplier
index a919e0db5e90..207f5972e98d 100644
--- a/Documentation/ABI/testing/sysfs-devices-supplier
+++ b/Documentation/ABI/testing/sysfs-devices-supplier
@@ -4,5 +4,6 @@ Contact: Saravana Kannan <[email protected]>
Description:
The /sys/devices/.../supplier:<supplier> are symlinks to device
links where this device is the consumer. <supplier> denotes the
- name of the supplier in that device link. There can be zero or
- more of these symlinks for a given device.
+ name of the supplier in that device link and is of the form
+ bus:device name. There can be zero or more of these symlinks
+ for a given device.
diff --git a/Documentation/ABI/testing/sysfs-driver-ufs b/Documentation/ABI/testing/sysfs-driver-ufs
index adc0d0e91607..75ccc5c62b3c 100644
--- a/Documentation/ABI/testing/sysfs-driver-ufs
+++ b/Documentation/ABI/testing/sysfs-driver-ufs
@@ -916,21 +916,25 @@ Date: September 2014
Contact: Subhash Jadavani <[email protected]>
Description: This entry could be used to set or show the UFS device
runtime power management level. The current driver
- implementation supports 6 levels with next target states:
+ implementation supports 7 levels with next target states:
== ====================================================
- 0 an UFS device will stay active, an UIC link will
+ 0 UFS device will stay active, UIC link will
stay active
- 1 an UFS device will stay active, an UIC link will
+ 1 UFS device will stay active, UIC link will
hibernate
- 2 an UFS device will moved to sleep, an UIC link will
+ 2 UFS device will be moved to sleep, UIC link will
stay active
- 3 an UFS device will moved to sleep, an UIC link will
+ 3 UFS device will be moved to sleep, UIC link will
hibernate
- 4 an UFS device will be powered off, an UIC link will
+ 4 UFS device will be powered off, UIC link will
hibernate
- 5 an UFS device will be powered off, an UIC link will
+ 5 UFS device will be powered off, UIC link will
be powered off
+ 6 UFS device will be moved to deep sleep, UIC link
+ will be powered off. Note, deep sleep might not be
+ supported in which case this value will not be
+ accepted
== ====================================================
What: /sys/bus/platform/drivers/ufshcd/*/rpm_target_dev_state
@@ -954,21 +958,25 @@ Date: September 2014
Contact: Subhash Jadavani <[email protected]>
Description: This entry could be used to set or show the UFS device
system power management level. The current driver
- implementation supports 6 levels with next target states:
+ implementation supports 7 levels with next target states:
== ====================================================
- 0 an UFS device will stay active, an UIC link will
+ 0 UFS device will stay active, UIC link will
stay active
- 1 an UFS device will stay active, an UIC link will
+ 1 UFS device will stay active, UIC link will
hibernate
- 2 an UFS device will moved to sleep, an UIC link will
+ 2 UFS device will be moved to sleep, UIC link will
stay active
- 3 an UFS device will moved to sleep, an UIC link will
+ 3 UFS device will be moved to sleep, UIC link will
hibernate
- 4 an UFS device will be powered off, an UIC link will
+ 4 UFS device will be powered off, UIC link will
hibernate
- 5 an UFS device will be powered off, an UIC link will
+ 5 UFS device will be powered off, UIC link will
be powered off
+ 6 UFS device will be moved to deep sleep, UIC link
+ will be powered off. Note, deep sleep might not be
+ supported in which case this value will not be
+ accepted
== ====================================================
What: /sys/bus/platform/drivers/ufshcd/*/spm_target_dev_state
diff --git a/Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.rst b/Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.rst
index 83ae3b79a643..a648b423ba0e 100644
--- a/Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.rst
+++ b/Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.rst
@@ -473,7 +473,7 @@ read-side critical sections that follow the idle period (the oval near
the bottom of the diagram above).
Plumbing this into the full grace-period execution is described
-`below <#Forcing%20Quiescent%20States>`__.
+`below <Forcing Quiescent States_>`__.
CPU-Hotplug Interface
^^^^^^^^^^^^^^^^^^^^^
@@ -494,7 +494,7 @@ mask to detect CPUs having gone offline since the beginning of this
grace period.
Plumbing this into the full grace-period execution is described
-`below <#Forcing%20Quiescent%20States>`__.
+`below <Forcing Quiescent States_>`__.
Forcing Quiescent States
^^^^^^^^^^^^^^^^^^^^^^^^
@@ -532,7 +532,7 @@ from other CPUs.
| RCU. But this diagram is complex enough as it is, so simplicity |
| overrode accuracy. You can think of it as poetic license, or you can |
| think of it as misdirection that is resolved in the |
-| `stitched-together diagram <#Putting%20It%20All%20Together>`__. |
+| `stitched-together diagram <Putting It All Together_>`__. |
+-----------------------------------------------------------------------+
Grace-Period Cleanup
@@ -596,7 +596,7 @@ maintain ordering. For example, if the callback function wakes up a task
that runs on some other CPU, proper ordering must in place in both the
callback function and the task being awakened. To see why this is
important, consider the top half of the `grace-period
-cleanup <#Grace-Period%20Cleanup>`__ diagram. The callback might be
+cleanup`_ diagram. The callback might be
running on a CPU corresponding to the leftmost leaf ``rcu_node``
structure, and awaken a task that is to run on a CPU corresponding to
the rightmost leaf ``rcu_node`` structure, and the grace-period kernel
diff --git a/Documentation/RCU/Design/Requirements/Requirements.rst b/Documentation/RCU/Design/Requirements/Requirements.rst
index e8c84fcc0507..d4c9a016074b 100644
--- a/Documentation/RCU/Design/Requirements/Requirements.rst
+++ b/Documentation/RCU/Design/Requirements/Requirements.rst
@@ -45,7 +45,7 @@ requirements:
#. `Other RCU Flavors`_
#. `Possible Future Changes`_
-This is followed by a `summary <#Summary>`__, however, the answers to
+This is followed by a summary_, however, the answers to
each quick quiz immediately follows the quiz. Select the big white space
with your mouse to see the answer.
@@ -1096,7 +1096,7 @@ memory barriers.
| case, voluntary context switch) within an RCU read-side critical |
| section. However, sleeping locks may be used within userspace RCU |
| read-side critical sections, and also within Linux-kernel sleepable |
-| RCU `(SRCU) <#Sleepable%20RCU>`__ read-side critical sections. In |
+| RCU `(SRCU) <Sleepable RCU_>`__ read-side critical sections. In |
| addition, the -rt patchset turns spinlocks into a sleeping locks so |
| that the corresponding critical sections can be preempted, which also |
| means that these sleeplockified spinlocks (but not other sleeping |
@@ -1186,7 +1186,7 @@ non-preemptible (``CONFIG_PREEMPT=n``) kernels, and thus `tiny
RCU <https://lkml.kernel.org/g/[email protected]>`__
was born. Josh Triplett has since taken over the small-memory banner
with his `Linux kernel tinification <https://tiny.wiki.kernel.org/>`__
-project, which resulted in `SRCU <#Sleepable%20RCU>`__ becoming optional
+project, which resulted in `SRCU <Sleepable RCU_>`__ becoming optional
for those kernels not needing it.
The remaining performance requirements are, for the most part,
@@ -1457,8 +1457,8 @@ will vary as the value of ``HZ`` varies, and can also be changed using
the relevant Kconfig options and kernel boot parameters. RCU currently
does not do much sanity checking of these parameters, so please use
caution when changing them. Note that these forward-progress measures
-are provided only for RCU, not for `SRCU <#Sleepable%20RCU>`__ or `Tasks
-RCU <#Tasks%20RCU>`__.
+are provided only for RCU, not for `SRCU <Sleepable RCU_>`__ or `Tasks
+RCU`_.
RCU takes the following steps in ``call_rcu()`` to encourage timely
invocation of callbacks when any given non-\ ``rcu_nocbs`` CPU has
@@ -1477,8 +1477,8 @@ encouragement was provided:
Again, these are default values when running at ``HZ=1000``, and can be
overridden. Again, these forward-progress measures are provided only for
-RCU, not for `SRCU <#Sleepable%20RCU>`__ or `Tasks
-RCU <#Tasks%20RCU>`__. Even for RCU, callback-invocation forward
+RCU, not for `SRCU <Sleepable RCU_>`__ or `Tasks
+RCU`_. Even for RCU, callback-invocation forward
progress for ``rcu_nocbs`` CPUs is much less well-developed, in part
because workloads benefiting from ``rcu_nocbs`` CPUs tend to invoke
``call_rcu()`` relatively infrequently. If workloads emerge that need
@@ -1920,7 +1920,7 @@ Hotplug CPU
The Linux kernel supports CPU hotplug, which means that CPUs can come
and go. It is of course illegal to use any RCU API member from an
-offline CPU, with the exception of `SRCU <#Sleepable%20RCU>`__ read-side
+offline CPU, with the exception of `SRCU <Sleepable RCU_>`__ read-side
critical sections. This requirement was present from day one in
DYNIX/ptx, but on the other hand, the Linux kernel's CPU-hotplug
implementation is “interesting.”
@@ -2177,7 +2177,7 @@ handles these states differently:
However, RCU must be reliably informed as to whether any given CPU is
currently in the idle loop, and, for ``NO_HZ_FULL``, also whether that
CPU is executing in usermode, as discussed
-`earlier <#Energy%20Efficiency>`__. It also requires that the
+`earlier <Energy Efficiency_>`__. It also requires that the
scheduling-clock interrupt be enabled when RCU needs it to be:
#. If a CPU is either idle or executing in usermode, and RCU believes it
@@ -2294,7 +2294,7 @@ Performance, Scalability, Response Time, and Reliability
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Expanding on the `earlier
-discussion <#Performance%20and%20Scalability>`__, RCU is used heavily by
+discussion <Performance and Scalability_>`__, RCU is used heavily by
hot code paths in performance-critical portions of the Linux kernel's
networking, security, virtualization, and scheduling code paths. RCU
must therefore use efficient implementations, especially in its
diff --git a/Documentation/admin-guide/binfmt-misc.rst b/Documentation/admin-guide/binfmt-misc.rst
index 7a864131e5ea..59cd902e3549 100644
--- a/Documentation/admin-guide/binfmt-misc.rst
+++ b/Documentation/admin-guide/binfmt-misc.rst
@@ -23,7 +23,7 @@ Here is what the fields mean:
- ``name``
is an identifier string. A new /proc file will be created with this
- ``name below /proc/sys/fs/binfmt_misc``; cannot contain slashes ``/`` for
+ name below ``/proc/sys/fs/binfmt_misc``; cannot contain slashes ``/`` for
obvious reasons.
- ``type``
is the type of recognition. Give ``M`` for magic and ``E`` for extension.
@@ -83,7 +83,7 @@ Here is what the fields mean:
``F`` - fix binary
The usual behaviour of binfmt_misc is to spawn the
binary lazily when the misc format file is invoked. However,
- this doesn``t work very well in the face of mount namespaces and
+ this doesn't work very well in the face of mount namespaces and
changeroots, so the ``F`` mode opens the binary as soon as the
emulation is installed and uses the opened image to spawn the
emulator, meaning it is always available once installed,
diff --git a/Documentation/admin-guide/bootconfig.rst b/Documentation/admin-guide/bootconfig.rst
index 9b90efcc3a35..452b7dcd7f6b 100644
--- a/Documentation/admin-guide/bootconfig.rst
+++ b/Documentation/admin-guide/bootconfig.rst
@@ -154,7 +154,7 @@ get the boot configuration data.
Because of this "piggyback" method, there is no need to change or
update the boot loader and the kernel image itself as long as the boot
loader passes the correct initrd file size. If by any chance, the boot
-loader passes a longer size, the kernel feils to find the bootconfig data.
+loader passes a longer size, the kernel fails to find the bootconfig data.
To do this operation, Linux kernel provides "bootconfig" command under
tools/bootconfig, which allows admin to apply or delete the config file
diff --git a/Documentation/admin-guide/device-mapper/dm-integrity.rst b/Documentation/admin-guide/device-mapper/dm-integrity.rst
index 4e6f504474ac..2cc5488acbd9 100644
--- a/Documentation/admin-guide/device-mapper/dm-integrity.rst
+++ b/Documentation/admin-guide/device-mapper/dm-integrity.rst
@@ -177,14 +177,20 @@ bitmap_flush_interval:number
The bitmap flush interval in milliseconds. The metadata buffers
are synchronized when this interval expires.
+allow_discards
+ Allow block discard requests (a.k.a. TRIM) for the integrity device.
+ Discards are only allowed to devices using internal hash.
+
fix_padding
Use a smaller padding of the tag area that is more
space-efficient. If this option is not present, large padding is
used - that is for compatibility with older kernels.
-allow_discards
- Allow block discard requests (a.k.a. TRIM) for the integrity device.
- Discards are only allowed to devices using internal hash.
+legacy_recalculate
+ Allow recalculating of volumes with HMAC keys. This is disabled by
+ default for security reasons - an attacker could modify the volume,
+ set recalc_sector to zero, and the kernel would not detect the
+ modification.
The journal mode (D/J), buffer_sectors, journal_watermark, commit_time and
allow_discards can be changed when reloading the target (load an inactive
diff --git a/Documentation/admin-guide/kernel-parameters.rst b/Documentation/admin-guide/kernel-parameters.rst
index 06fb1b4aa849..682ab28b5c94 100644
--- a/Documentation/admin-guide/kernel-parameters.rst
+++ b/Documentation/admin-guide/kernel-parameters.rst
@@ -3,8 +3,8 @@
The kernel's command-line parameters
====================================
-The following is a consolidated list of the kernel parameters as
-implemented by the __setup(), core_param() and module_param() macros
+The following is a consolidated list of the kernel parameters as implemented
+by the __setup(), early_param(), core_param() and module_param() macros
and sorted into English Dictionary order (defined as ignoring all
punctuation and sorting digits before letters in a case insensitive
manner), and with descriptions where known.
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index c722ec19cd00..a10b545c2070 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -1385,7 +1385,7 @@
ftrace_filter=[function-list]
[FTRACE] Limit the functions traced by the function
- tracer at boot up. function-list is a comma separated
+ tracer at boot up. function-list is a comma-separated
list of functions. This list can be changed at run
time by the set_ftrace_filter file in the debugfs
tracing directory.
@@ -1399,13 +1399,13 @@
ftrace_graph_filter=[function-list]
[FTRACE] Limit the top level callers functions traced
by the function graph tracer at boot up.
- function-list is a comma separated list of functions
+ function-list is a comma-separated list of functions
that can be changed at run time by the
set_graph_function file in the debugfs tracing directory.
ftrace_graph_notrace=[function-list]
[FTRACE] Do not trace from the functions specified in
- function-list. This list is a comma separated list of
+ function-list. This list is a comma-separated list of
functions that can be changed at run time by the
set_graph_notrace file in the debugfs tracing directory.
@@ -2421,7 +2421,7 @@
when set.
Format: <int>
- libata.force= [LIBATA] Force configurations. The format is comma
+ libata.force= [LIBATA] Force configurations. The format is comma-
separated list of "[ID:]VAL" where ID is
PORT[.DEVICE]. PORT and DEVICE are decimal numbers
matching port, link or device. Basically, it matches
@@ -5145,7 +5145,7 @@
stacktrace_filter=[function-list]
[FTRACE] Limit the functions that the stack tracer
- will trace at boot up. function-list is a comma separated
+ will trace at boot up. function-list is a comma-separated
list of functions. This list can be changed at run
time by the stack_trace_filter file in the debugfs
tracing directory. Note, this enables stack tracing
@@ -5348,7 +5348,7 @@
trace_event=[event-list]
[FTRACE] Set and start specified trace events in order
to facilitate early boot debugging. The event-list is a
- comma separated list of trace events to enable. See
+ comma-separated list of trace events to enable. See
also Documentation/trace/events.rst
trace_options=[option-list]
@@ -5972,6 +5972,10 @@
This option is obsoleted by the "nopv" option, which
has equivalent effect for XEN platform.
+ xen_no_vector_callback
+ [KNL,X86,XEN] Disable the vector callback for Xen
+ event channel interrupts.
+
xen_scrub_pages= [XEN]
Boolean option to control scrubbing pages before giving them back
to Xen, for use by other domains. Can be also changed at runtime
diff --git a/Documentation/admin-guide/mm/concepts.rst b/Documentation/admin-guide/mm/concepts.rst
index fa0974fbeae7..b966fcff993b 100644
--- a/Documentation/admin-guide/mm/concepts.rst
+++ b/Documentation/admin-guide/mm/concepts.rst
@@ -184,7 +184,7 @@ pages either asynchronously or synchronously, depending on the state
of the system. When the system is not loaded, most of the memory is free
and allocation requests will be satisfied immediately from the free
pages supply. As the load increases, the amount of the free pages goes
-down and when it reaches a certain threshold (high watermark), an
+down and when it reaches a certain threshold (low watermark), an
allocation request will awaken the ``kswapd`` daemon. It will
asynchronously scan memory pages and either just free them if the data
they contain is available elsewhere, or evict to the backing storage
diff --git a/Documentation/core-api/index.rst b/Documentation/core-api/index.rst
index 69171b1799f2..f1c9d20bd42d 100644
--- a/Documentation/core-api/index.rst
+++ b/Documentation/core-api/index.rst
@@ -53,7 +53,6 @@ How Linux keeps everything from happening at the same time. See
.. toctree::
:maxdepth: 1
- atomic_ops
refcount-vs-atomic
irq/index
local_ops
diff --git a/Documentation/dev-tools/kasan.rst b/Documentation/dev-tools/kasan.rst
index 0fc3fb1860c4..1651d961f06a 100644
--- a/Documentation/dev-tools/kasan.rst
+++ b/Documentation/dev-tools/kasan.rst
@@ -160,29 +160,14 @@ intended for use in production as a security mitigation. Therefore it supports
boot parameters that allow to disable KASAN competely or otherwise control
particular KASAN features.
-The things that can be controlled are:
+- ``kasan=off`` or ``=on`` controls whether KASAN is enabled (default: ``on``).
-1. Whether KASAN is enabled at all.
-2. Whether KASAN collects and saves alloc/free stacks.
-3. Whether KASAN panics on a detected bug or not.
+- ``kasan.stacktrace=off`` or ``=on`` disables or enables alloc and free stack
+ traces collection (default: ``on`` for ``CONFIG_DEBUG_KERNEL=y``, otherwise
+ ``off``).
-The ``kasan.mode`` boot parameter allows to choose one of three main modes:
-
-- ``kasan.mode=off`` - KASAN is disabled, no tag checks are performed
-- ``kasan.mode=prod`` - only essential production features are enabled
-- ``kasan.mode=full`` - all KASAN features are enabled
-
-The chosen mode provides default control values for the features mentioned
-above. However it's also possible to override the default values by providing:
-
-- ``kasan.stacktrace=off`` or ``=on`` - enable alloc/free stack collection
- (default: ``on`` for ``mode=full``,
- otherwise ``off``)
-- ``kasan.fault=report`` or ``=panic`` - only print KASAN report or also panic
- (default: ``report``)
-
-If ``kasan.mode`` parameter is not provided, it defaults to ``full`` when
-``CONFIG_DEBUG_KERNEL`` is enabled, and to ``prod`` otherwise.
+- ``kasan.fault=report`` or ``=panic`` controls whether to only print a KASAN
+ report or also panic the kernel (default: ``report``).
For developers
~~~~~~~~~~~~~~
diff --git a/Documentation/dev-tools/kunit/usage.rst b/Documentation/dev-tools/kunit/usage.rst
index d9fdc14f0677..650f99590df5 100644
--- a/Documentation/dev-tools/kunit/usage.rst
+++ b/Documentation/dev-tools/kunit/usage.rst
@@ -522,6 +522,63 @@ There's more boilerplate involved, but it can:
* E.g. if we wanted to also test ``sha256sum``, we could add a ``sha256``
field and reuse ``cases``.
+* be converted to a "parameterized test", see below.
+
+Parameterized Testing
+~~~~~~~~~~~~~~~~~~~~~
+
+The table-driven testing pattern is common enough that KUnit has special
+support for it.
+
+Reusing the same ``cases`` array from above, we can write the test as a
+"parameterized test" with the following.
+
+.. code-block:: c
+
+ // This is copy-pasted from above.
+ struct sha1_test_case {
+ const char *str;
+ const char *sha1;
+ };
+ struct sha1_test_case cases[] = {
+ {
+ .str = "hello world",
+ .sha1 = "2aae6c35c94fcfb415dbe95f408b9ce91ee846ed",
+ },
+ {
+ .str = "hello world!",
+ .sha1 = "430ce34d020724ed75a196dfc2ad67c77772d169",
+ },
+ };
+
+ // Need a helper function to generate a name for each test case.
+ static void case_to_desc(const struct sha1_test_case *t, char *desc)
+ {
+ strcpy(desc, t->str);
+ }
+ // Creates `sha1_gen_params()` to iterate over `cases`.
+ KUNIT_ARRAY_PARAM(sha1, cases, case_to_desc);
+
+ // Looks no different from a normal test.
+ static void sha1_test(struct kunit *test)
+ {
+ // This function can just contain the body of the for-loop.
+ // The former `cases[i]` is accessible under test->param_value.
+ char out[40];
+ struct sha1_test_case *test_param = (struct sha1_test_case *)(test->param_value);
+
+ sha1sum(test_param->str, out);
+ KUNIT_EXPECT_STREQ_MSG(test, (char *)out, test_param->sha1,
+ "sha1sum(%s)", test_param->str);
+ }
+
+ // Instead of KUNIT_CASE, we use KUNIT_CASE_PARAM and pass in the
+ // function declared by KUNIT_ARRAY_PARAM.
+ static struct kunit_case sha1_test_cases[] = {
+ KUNIT_CASE_PARAM(sha1_test, sha1_gen_params),
+ {}
+ };
+
.. _kunit-on-non-uml:
KUnit on non-UML architectures
diff --git a/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-display-backend.yaml b/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-display-backend.yaml
index 86057d541065..12a7df0e38b2 100644
--- a/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-display-backend.yaml
+++ b/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-display-backend.yaml
@@ -84,36 +84,23 @@ properties:
const: dma-mem
ports:
- type: object
- description: |
- A ports node with endpoint definitions as defined in
- Documentation/devicetree/bindings/media/video-interfaces.txt.
+ $ref: /schemas/graph.yaml#/properties/ports
properties:
- "#address-cells":
- const: 1
-
- "#size-cells":
- const: 0
-
port@0:
- type: object
- description: |
+ $ref: /schemas/graph.yaml#/properties/port
+ description:
Input endpoints of the controller.
port@1:
- type: object
- description: |
+ $ref: /schemas/graph.yaml#/properties/port
+ description:
Output endpoints of the controller.
required:
- - "#address-cells"
- - "#size-cells"
- port@0
- port@1
- additionalProperties: false
-
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-display-frontend.yaml b/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-display-frontend.yaml
index 3eb1c2bbf4e7..055157fbf3bf 100644
--- a/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-display-frontend.yaml
+++ b/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-display-frontend.yaml
@@ -57,35 +57,22 @@ properties:
maxItems: 1
ports:
- type: object
- description: |
- A ports node with endpoint definitions as defined in
- Documentation/devicetree/bindings/media/video-interfaces.txt.
+ $ref: /schemas/graph.yaml#/properties/ports
properties:
- "#address-cells":
- const: 1
-
- "#size-cells":
- const: 0
-
port@0:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description: |
Input endpoints of the controller.
port@1:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description: |
Output endpoints of the controller.
required:
- - "#address-cells"
- - "#size-cells"
- port@1
- additionalProperties: false
-
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-hdmi.yaml b/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-hdmi.yaml
index 75e6479397a5..7f11452539f4 100644
--- a/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-hdmi.yaml
+++ b/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-hdmi.yaml
@@ -76,37 +76,24 @@ properties:
- const: audio-tx
ports:
- type: object
- description: |
- A ports node with endpoint definitions as defined in
- Documentation/devicetree/bindings/media/video-interfaces.txt.
+ $ref: /schemas/graph.yaml#/properties/ports
properties:
- "#address-cells":
- const: 1
-
- "#size-cells":
- const: 0
-
port@0:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description: |
Input endpoints of the controller.
port@1:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description: |
Output endpoints of the controller. Usually an HDMI
connector.
required:
- - "#address-cells"
- - "#size-cells"
- port@0
- port@1
- additionalProperties: false
-
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-tcon.yaml b/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-tcon.yaml
index 4c15a2644a7c..c13faf3e6581 100644
--- a/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-tcon.yaml
+++ b/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-tcon.yaml
@@ -115,31 +115,24 @@ properties:
- const: lvds
ports:
- type: object
- description: |
- A ports node with endpoint definitions as defined in
- Documentation/devicetree/bindings/media/video-interfaces.txt.
+ $ref: /schemas/graph.yaml#/properties/ports
properties:
- "#address-cells":
- const: 1
-
- "#size-cells":
- const: 0
-
port@0:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description: |
Input endpoints of the controller.
port@1:
- type: object
+ $ref: /schemas/graph.yaml#/$defs/port-base
+ unevaluatedProperties: false
description: |
Output endpoints of the controller.
patternProperties:
"^endpoint(@[0-9])$":
- type: object
+ $ref: /schemas/graph.yaml#/$defs/endpoint-base
+ unevaluatedProperties: false
properties:
allwinner,tcon-channel:
@@ -156,16 +149,10 @@ properties:
property is not present, the endpoint number will be
used as the channel number.
- unevaluatedProperties: true
-
required:
- - "#address-cells"
- - "#size-cells"
- port@0
- port@1
- additionalProperties: false
-
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-tv-encoder.yaml b/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-tv-encoder.yaml
index 6009324be967..afc0ed799e0e 100644
--- a/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-tv-encoder.yaml
+++ b/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-tv-encoder.yaml
@@ -24,11 +24,9 @@ properties:
maxItems: 1
port:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description:
- A port node with endpoint definitions as defined in
- Documentation/devicetree/bindings/media/video-interfaces.txt. The
- first port should be the input endpoint, usually coming from the
+ The first port should be the input endpoint, usually coming from the
associated TCON.
required:
diff --git a/Documentation/devicetree/bindings/display/allwinner,sun6i-a31-drc.yaml b/Documentation/devicetree/bindings/display/allwinner,sun6i-a31-drc.yaml
index 0c1ce55940e1..71cce5687580 100644
--- a/Documentation/devicetree/bindings/display/allwinner,sun6i-a31-drc.yaml
+++ b/Documentation/devicetree/bindings/display/allwinner,sun6i-a31-drc.yaml
@@ -46,36 +46,23 @@ properties:
maxItems: 1
ports:
- type: object
- description: |
- A ports node with endpoint definitions as defined in
- Documentation/devicetree/bindings/media/video-interfaces.txt.
+ $ref: /schemas/graph.yaml#/properties/ports
properties:
- "#address-cells":
- const: 1
-
- "#size-cells":
- const: 0
-
port@0:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description: |
Input endpoints of the controller.
port@1:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description: |
Output endpoints of the controller.
required:
- - "#address-cells"
- - "#size-cells"
- port@0
- port@1
- additionalProperties: false
-
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/display/allwinner,sun6i-a31-mipi-dsi.yaml b/Documentation/devicetree/bindings/display/allwinner,sun6i-a31-mipi-dsi.yaml
index 7aa330dabc44..a738d7c12a97 100644
--- a/Documentation/devicetree/bindings/display/allwinner,sun6i-a31-mipi-dsi.yaml
+++ b/Documentation/devicetree/bindings/display/allwinner,sun6i-a31-mipi-dsi.yaml
@@ -47,11 +47,9 @@ properties:
const: dphy
port:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description:
- A port node with endpoint definitions as defined in
- Documentation/devicetree/bindings/media/video-interfaces.txt. That
- port should be the input endpoint, usually coming from the
+ The port should be the input endpoint, usually coming from the
associated TCON.
required:
diff --git a/Documentation/devicetree/bindings/display/allwinner,sun8i-a83t-de2-mixer.yaml b/Documentation/devicetree/bindings/display/allwinner,sun8i-a83t-de2-mixer.yaml
index c040eef56518..4f91eec26de9 100644
--- a/Documentation/devicetree/bindings/display/allwinner,sun8i-a83t-de2-mixer.yaml
+++ b/Documentation/devicetree/bindings/display/allwinner,sun8i-a83t-de2-mixer.yaml
@@ -43,35 +43,22 @@ properties:
maxItems: 1
ports:
- type: object
- description: |
- A ports node with endpoint definitions as defined in
- Documentation/devicetree/bindings/media/video-interfaces.txt.
+ $ref: /schemas/graph.yaml#/properties/ports
properties:
- "#address-cells":
- const: 1
-
- "#size-cells":
- const: 0
-
port@0:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description: |
Input endpoints of the controller.
port@1:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description: |
Output endpoints of the controller.
required:
- - "#address-cells"
- - "#size-cells"
- port@1
- additionalProperties: false
-
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/display/allwinner,sun8i-a83t-dw-hdmi.yaml b/Documentation/devicetree/bindings/display/allwinner,sun8i-a83t-dw-hdmi.yaml
index fa4769a0b26e..b3e9992525c2 100644
--- a/Documentation/devicetree/bindings/display/allwinner,sun8i-a83t-dw-hdmi.yaml
+++ b/Documentation/devicetree/bindings/display/allwinner,sun8i-a83t-dw-hdmi.yaml
@@ -93,38 +93,25 @@ properties:
The VCC power supply of the controller
ports:
- type: object
- description: |
- A ports node with endpoint definitions as defined in
- Documentation/devicetree/bindings/media/video-interfaces.txt.
+ $ref: /schemas/graph.yaml#/properties/ports
properties:
- "#address-cells":
- const: 1
-
- "#size-cells":
- const: 0
-
port@0:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description: |
Input endpoints of the controller. Usually the associated
TCON.
port@1:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description: |
Output endpoints of the controller. Usually an HDMI
connector.
required:
- - "#address-cells"
- - "#size-cells"
- port@0
- port@1
- additionalProperties: false
-
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/display/allwinner,sun8i-r40-tcon-top.yaml b/Documentation/devicetree/bindings/display/allwinner,sun8i-r40-tcon-top.yaml
index b98ca609824b..ec21e8bf2767 100644
--- a/Documentation/devicetree/bindings/display/allwinner,sun8i-r40-tcon-top.yaml
+++ b/Documentation/devicetree/bindings/display/allwinner,sun8i-r40-tcon-top.yaml
@@ -80,141 +80,45 @@ properties:
maxItems: 1
ports:
- type: object
- description: |
- A ports node with endpoint definitions as defined in
- Documentation/devicetree/bindings/media/video-interfaces.txt.
- All ports should have only one endpoint connected to
- remote endpoint.
+ $ref: /schemas/graph.yaml#/properties/ports
properties:
- "#address-cells":
- const: 1
-
- "#size-cells":
- const: 0
-
port@0:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description: |
Input endpoint for Mixer 0 mux.
port@1:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description: |
Output endpoint for Mixer 0 mux
- properties:
- "#address-cells":
- const: 1
-
- "#size-cells":
- const: 0
-
- reg: true
-
- patternProperties:
- "^endpoint@[0-9]$":
- type: object
-
- properties:
- reg:
- description: |
- ID of the target TCON
-
- required:
- - reg
-
- required:
- - "#address-cells"
- - "#size-cells"
-
- additionalProperties: false
-
port@2:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description: |
Input endpoint for Mixer 1 mux.
port@3:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description: |
Output endpoint for Mixer 1 mux
- properties:
- "#address-cells":
- const: 1
-
- "#size-cells":
- const: 0
-
- reg: true
-
- patternProperties:
- "^endpoint@[0-9]$":
- type: object
-
- properties:
- reg:
- description: |
- ID of the target TCON
-
- required:
- - reg
-
- required:
- - "#address-cells"
- - "#size-cells"
-
- additionalProperties: false
-
port@4:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description: |
Input endpoint for HDMI mux.
- properties:
- "#address-cells":
- const: 1
-
- "#size-cells":
- const: 0
-
- reg: true
-
- patternProperties:
- "^endpoint@[0-9]$":
- type: object
-
- properties:
- reg:
- description: |
- ID of the target TCON
-
- required:
- - reg
-
- required:
- - "#address-cells"
- - "#size-cells"
-
- additionalProperties: false
-
port@5:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description: |
Output endpoint for HDMI mux
required:
- - "#address-cells"
- - "#size-cells"
- port@0
- port@1
- port@4
- port@5
- additionalProperties: false
-
required:
- "#clock-cells"
- compatible
diff --git a/Documentation/devicetree/bindings/display/allwinner,sun9i-a80-deu.yaml b/Documentation/devicetree/bindings/display/allwinner,sun9i-a80-deu.yaml
index 96de41d32b3e..637372ec4614 100644
--- a/Documentation/devicetree/bindings/display/allwinner,sun9i-a80-deu.yaml
+++ b/Documentation/devicetree/bindings/display/allwinner,sun9i-a80-deu.yaml
@@ -40,36 +40,23 @@ properties:
maxItems: 1
ports:
- type: object
- description: |
- A ports node with endpoint definitions as defined in
- Documentation/devicetree/bindings/media/video-interfaces.txt.
+ $ref: /schemas/graph.yaml#/properties/ports
properties:
- "#address-cells":
- const: 1
-
- "#size-cells":
- const: 0
-
port@0:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description: |
Input endpoints of the controller.
port@1:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description: |
Output endpoints of the controller.
required:
- - "#address-cells"
- - "#size-cells"
- port@0
- port@1
- additionalProperties: false
-
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/display/amlogic,meson-dw-hdmi.yaml b/Documentation/devicetree/bindings/display/amlogic,meson-dw-hdmi.yaml
index 0da42ab8fd3a..cf5a208f2f10 100644
--- a/Documentation/devicetree/bindings/display/amlogic,meson-dw-hdmi.yaml
+++ b/Documentation/devicetree/bindings/display/amlogic,meson-dw-hdmi.yaml
@@ -81,12 +81,12 @@ properties:
description: phandle to an external 5V regulator to power the HDMI logic
port@0:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description:
A port node pointing to the VENC Input port node.
port@1:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description:
A port node pointing to the TMDS Output port node.
diff --git a/Documentation/devicetree/bindings/display/amlogic,meson-vpu.yaml b/Documentation/devicetree/bindings/display/amlogic,meson-vpu.yaml
index a8d202c9d004..851cb0781217 100644
--- a/Documentation/devicetree/bindings/display/amlogic,meson-vpu.yaml
+++ b/Documentation/devicetree/bindings/display/amlogic,meson-vpu.yaml
@@ -83,12 +83,12 @@ properties:
description: phandle to the associated power domain
port@0:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description:
A port node pointing to the CVBS VDAC port node.
port@1:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description:
A port node pointing to the HDMI-TX port node.
diff --git a/Documentation/devicetree/bindings/display/brcm,bcm2835-dpi.yaml b/Documentation/devicetree/bindings/display/brcm,bcm2835-dpi.yaml
index 5c1024bbc1b3..c9ad0ecc9b6d 100644
--- a/Documentation/devicetree/bindings/display/brcm,bcm2835-dpi.yaml
+++ b/Documentation/devicetree/bindings/display/brcm,bcm2835-dpi.yaml
@@ -27,10 +27,9 @@ properties:
- const: pixel
port:
- type: object
- description: >
- Port node with a single endpoint connecting to the panel, as
- defined in Documentation/devicetree/bindings/media/video-interfaces.txt.
+ $ref: /schemas/graph.yaml#/properties/port
+ description:
+ Port node with a single endpoint connecting to the panel.
required:
- compatible
diff --git a/Documentation/devicetree/bindings/display/brcm,bcm2835-dsi0.yaml b/Documentation/devicetree/bindings/display/brcm,bcm2835-dsi0.yaml
index eb44e072b6e5..55c60919991f 100644
--- a/Documentation/devicetree/bindings/display/brcm,bcm2835-dsi0.yaml
+++ b/Documentation/devicetree/bindings/display/brcm,bcm2835-dsi0.yaml
@@ -18,6 +18,7 @@ properties:
compatible:
enum:
+ - brcm,bcm2711-dsi1
- brcm,bcm2835-dsi0
- brcm,bcm2835-dsi1
diff --git a/Documentation/devicetree/bindings/display/bridge/analogix,anx7625.yaml b/Documentation/devicetree/bindings/display/bridge/analogix,anx7625.yaml
index 9392b5502a32..c789784efe30 100644
--- a/Documentation/devicetree/bindings/display/bridge/analogix,anx7625.yaml
+++ b/Documentation/devicetree/bindings/display/bridge/analogix,anx7625.yaml
@@ -35,16 +35,16 @@ properties:
maxItems: 1
ports:
- type: object
+ $ref: /schemas/graph.yaml#/properties/ports
properties:
port@0:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description:
Video port for MIPI DSI input.
port@1:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description:
Video port for panel or connector.
diff --git a/Documentation/devicetree/bindings/display/bridge/analogix,anx7814.yaml b/Documentation/devicetree/bindings/display/bridge/analogix,anx7814.yaml
index 3ba477aefdd7..8e13f27b28ed 100644
--- a/Documentation/devicetree/bindings/display/bridge/analogix,anx7814.yaml
+++ b/Documentation/devicetree/bindings/display/bridge/analogix,anx7814.yaml
@@ -42,31 +42,18 @@ properties:
description: Regulator for 1.0V digital core power.
ports:
- type: object
- description:
- A node containing input and output port nodes with endpoint
- definitions as documented in
- Documentation/devicetree/bindings/media/video-interfaces.txt
- Documentation/devicetree/bindings/graph.txt
+ $ref: /schemas/graph.yaml#/properties/ports
properties:
port@0:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description: Video port for HDMI input.
- properties:
- reg:
- const: 0
-
port@1:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description:
Video port for SlimPort, DisplayPort, eDP or MyDP output.
- properties:
- reg:
- const: 1
-
required:
- port@0
- port@1
diff --git a/Documentation/devicetree/bindings/display/bridge/anx6345.yaml b/Documentation/devicetree/bindings/display/bridge/anx6345.yaml
index fccd63521a8c..1c0406c38fe5 100644
--- a/Documentation/devicetree/bindings/display/bridge/anx6345.yaml
+++ b/Documentation/devicetree/bindings/display/bridge/anx6345.yaml
@@ -32,31 +32,23 @@ properties:
description: Regulator for 2.5V digital core power.
ports:
- type: object
+ $ref: /schemas/graph.yaml#/properties/ports
properties:
- '#address-cells':
- const: 1
-
- '#size-cells':
- const: 0
-
port@0:
- type: object
- description: |
+ $ref: /schemas/graph.yaml#/properties/port
+ description:
Video port for LVTTL input
port@1:
- type: object
- description: |
+ $ref: /schemas/graph.yaml#/properties/port
+ description:
Video port for eDP output (panel or connector).
May be omitted if EDID works reliably.
required:
- port@0
- additionalProperties: false
-
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/display/bridge/cdns,mhdp8546.yaml b/Documentation/devicetree/bindings/display/bridge/cdns,mhdp8546.yaml
index 74d675fc6e7b..63427878715e 100644
--- a/Documentation/devicetree/bindings/display/bridge/cdns,mhdp8546.yaml
+++ b/Documentation/devicetree/bindings/display/bridge/cdns,mhdp8546.yaml
@@ -57,47 +57,37 @@ properties:
maxItems: 1
ports:
- type: object
- description:
- Ports as described in Documentation/devicetree/bindings/graph.txt.
+ $ref: /schemas/graph.yaml#/properties/ports
properties:
- '#address-cells':
- const: 1
-
- '#size-cells':
- const: 0
-
port@0:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description:
First input port representing the DP bridge input.
port@1:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description:
Second input port representing the DP bridge input.
port@2:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description:
Third input port representing the DP bridge input.
port@3:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description:
Fourth input port representing the DP bridge input.
port@4:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description:
Output port representing the DP bridge output.
required:
- port@0
- port@4
- - '#address-cells'
- - '#size-cells'
allOf:
- if:
diff --git a/Documentation/devicetree/bindings/display/bridge/chrontel,ch7033.yaml b/Documentation/devicetree/bindings/display/bridge/chrontel,ch7033.yaml
index 9f38f55fc990..bb6289c7d375 100644
--- a/Documentation/devicetree/bindings/display/bridge/chrontel,ch7033.yaml
+++ b/Documentation/devicetree/bindings/display/bridge/chrontel,ch7033.yaml
@@ -19,16 +19,16 @@ properties:
description: I2C address of the device
ports:
- type: object
+ $ref: /schemas/graph.yaml#/properties/ports
properties:
port@0:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description: |
Video port for RGB input.
port@1:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description: |
DVI port, should be connected to a node compatible with the
dvi-connector binding.
diff --git a/Documentation/devicetree/bindings/display/bridge/intel,keembay-dsi.yaml b/Documentation/devicetree/bindings/display/bridge/intel,keembay-dsi.yaml
index 35c9dfd86650..dcb1336ee2a5 100644
--- a/Documentation/devicetree/bindings/display/bridge/intel,keembay-dsi.yaml
+++ b/Documentation/devicetree/bindings/display/bridge/intel,keembay-dsi.yaml
@@ -35,29 +35,21 @@ properties:
- const: clk_mipi_cfg
ports:
- type: object
+ $ref: /schemas/graph.yaml#/properties/ports
properties:
- '#address-cells':
- const: 1
-
- '#size-cells':
- const: 0
-
port@0:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description: MIPI DSI input port.
port@1:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description: DSI output port.
required:
- port@0
- port@1
- additionalProperties: false
-
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/display/bridge/ite,it6505.yaml b/Documentation/devicetree/bindings/display/bridge/ite,it6505.yaml
index 02cfc0a3b550..833d11b2303a 100644
--- a/Documentation/devicetree/bindings/display/bridge/ite,it6505.yaml
+++ b/Documentation/devicetree/bindings/display/bridge/ite,it6505.yaml
@@ -53,7 +53,7 @@ properties:
description: extcon specifier for the Power Delivery
port:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description: A port node pointing to DPI host port node
required:
diff --git a/Documentation/devicetree/bindings/display/bridge/lontium,lt9611.yaml b/Documentation/devicetree/bindings/display/bridge/lontium,lt9611.yaml
index 7a1c89b995e2..5b9d36f7af30 100644
--- a/Documentation/devicetree/bindings/display/bridge/lontium,lt9611.yaml
+++ b/Documentation/devicetree/bindings/display/bridge/lontium,lt9611.yaml
@@ -38,82 +38,26 @@ properties:
description: Regulator for 3.3V IO power.
ports:
- type: object
+ $ref: /schemas/graph.yaml#/properties/ports
properties:
- "#address-cells":
- const: 1
-
- "#size-cells":
- const: 0
-
port@0:
- type: object
- description: |
+ $ref: /schemas/graph.yaml#/properties/port
+ description:
Primary MIPI port-1 for MIPI input
- properties:
- reg:
- const: 0
-
- patternProperties:
- "^endpoint(@[0-9])$":
- type: object
- additionalProperties: false
-
- properties:
- remote-endpoint:
- $ref: /schemas/types.yaml#/definitions/phandle
-
- required:
- - reg
-
port@1:
- type: object
- description: |
+ $ref: /schemas/graph.yaml#/properties/port
+ description:
Additional MIPI port-2 for MIPI input, used in combination
with primary MIPI port-1 to drive higher resolution displays
- properties:
- reg:
- const: 1
-
- patternProperties:
- "^endpoint(@[0-9])$":
- type: object
- additionalProperties: false
-
- properties:
- remote-endpoint:
- $ref: /schemas/types.yaml#/definitions/phandle
-
- required:
- - reg
-
port@2:
- type: object
- description: |
+ $ref: /schemas/graph.yaml#/properties/port
+ description:
HDMI port for HDMI output
- properties:
- reg:
- const: 2
-
- patternProperties:
- "^endpoint(@[0-9])$":
- type: object
- additionalProperties: false
-
- properties:
- remote-endpoint:
- $ref: /schemas/types.yaml#/definitions/phandle
-
- required:
- - reg
-
required:
- - "#address-cells"
- - "#size-cells"
- port@0
- port@2
diff --git a/Documentation/devicetree/bindings/display/bridge/lvds-codec.yaml b/Documentation/devicetree/bindings/display/bridge/lvds-codec.yaml
index 66a14d60ce1d..304a1367faaa 100644
--- a/Documentation/devicetree/bindings/display/bridge/lvds-codec.yaml
+++ b/Documentation/devicetree/bindings/display/bridge/lvds-codec.yaml
@@ -45,25 +45,17 @@ properties:
- thine,thc63lvdm83d # For the THC63LVDM83D LVDS serializer
ports:
- type: object
- description: |
- This device has two video ports. Their connections are modeled using the
- OF graph bindings specified in Documentation/devicetree/bindings/graph.txt
- properties:
- '#address-cells':
- const: 1
-
- '#size-cells':
- const: 0
+ $ref: /schemas/graph.yaml#/properties/ports
+ properties:
port@0:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description: |
For LVDS encoders, port 0 is the parallel input
For LVDS decoders, port 0 is the LVDS input
port@1:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description: |
For LVDS encoders, port 1 is the LVDS output
For LVDS decoders, port 1 is the parallel output
@@ -72,8 +64,6 @@ properties:
- port@0
- port@1
- additionalProperties: false
-
powerdown-gpios:
description:
The GPIO used to control the power down line of this device.
diff --git a/Documentation/devicetree/bindings/display/bridge/nwl-dsi.yaml b/Documentation/devicetree/bindings/display/bridge/nwl-dsi.yaml
index a125b2dd3a2f..350fb8f400f0 100644
--- a/Documentation/devicetree/bindings/display/bridge/nwl-dsi.yaml
+++ b/Documentation/devicetree/bindings/display/bridge/nwl-dsi.yaml
@@ -84,40 +84,23 @@ properties:
- const: pclk
ports:
- type: object
- description:
- A node containing DSI input & output port nodes with endpoint
- definitions as documented in
- Documentation/devicetree/bindings/graph.txt.
+ $ref: /schemas/graph.yaml#/properties/ports
+
properties:
port@0:
- type: object
+ $ref: /schemas/graph.yaml#/$defs/port-base
description:
Input port node to receive pixel data from the
display controller. Exactly one endpoint must be
specified.
properties:
- '#address-cells':
- const: 1
-
- '#size-cells':
- const: 0
-
endpoint@0:
+ $ref: /schemas/graph.yaml#/properties/endpoint
description: sub-node describing the input from LCDIF
- type: object
endpoint@1:
+ $ref: /schemas/graph.yaml#/properties/endpoint
description: sub-node describing the input from DCSS
- type: object
-
- reg:
- const: 0
-
- required:
- - '#address-cells'
- - '#size-cells'
- - reg
oneOf:
- required:
@@ -125,28 +108,18 @@ properties:
- required:
- endpoint@1
- additionalProperties: false
+ unevaluatedProperties: false
port@1:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description:
DSI output port node to the panel or the next bridge
in the chain
- '#address-cells':
- const: 1
-
- '#size-cells':
- const: 0
-
required:
- - '#address-cells'
- - '#size-cells'
- port@0
- port@1
- additionalProperties: false
-
required:
- '#address-cells'
- '#size-cells'
diff --git a/Documentation/devicetree/bindings/display/bridge/ps8640.yaml b/Documentation/devicetree/bindings/display/bridge/ps8640.yaml
index 763c7909473e..fce82b605c8b 100644
--- a/Documentation/devicetree/bindings/display/bridge/ps8640.yaml
+++ b/Documentation/devicetree/bindings/display/bridge/ps8640.yaml
@@ -41,34 +41,22 @@ properties:
description: Regulator for 3.3V digital core power.
ports:
- type: object
- description:
- A node containing DSI input & output port nodes with endpoint
- definitions as documented in
- Documentation/devicetree/bindings/media/video-interfaces.txt
- Documentation/devicetree/bindings/graph.txt
- properties:
- '#address-cells':
- const: 1
-
- '#size-cells':
- const: 0
+ $ref: /schemas/graph.yaml#/properties/ports
+ properties:
port@0:
- type: object
- description: |
+ $ref: /schemas/graph.yaml#/properties/port
+ description:
Video port for DSI input
port@1:
- type: object
- description: |
+ $ref: /schemas/graph.yaml#/properties/port
+ description:
Video port for eDP output (panel or connector).
required:
- port@0
- additionalProperties: false
-
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/display/bridge/renesas,lvds.yaml b/Documentation/devicetree/bindings/display/bridge/renesas,lvds.yaml
index e5b163951b91..acfc327f70a7 100644
--- a/Documentation/devicetree/bindings/display/bridge/renesas,lvds.yaml
+++ b/Documentation/devicetree/bindings/display/bridge/renesas,lvds.yaml
@@ -49,33 +49,21 @@ properties:
maxItems: 1
ports:
- type: object
- description: |
- This device has two video ports. Their connections are modelled using the
- OF graph bindings specified in Documentation/devicetree/bindings/graph.txt.
- Each port shall have a single endpoint.
+ $ref: /schemas/graph.yaml#/properties/ports
properties:
- '#address-cells':
- const: 1
-
- '#size-cells':
- const: 0
-
port@0:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description: Parallel RGB input port
port@1:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description: LVDS output port
required:
- port@0
- port@1
- additionalProperties: false
-
power-domains:
maxItems: 1
@@ -83,9 +71,9 @@ properties:
$ref: /schemas/types.yaml#/definitions/phandle
description:
phandle to the companion LVDS encoder. This property is mandatory
- for the first LVDS encoder on D3 and E3 SoCs, and shall point to
- the second encoder to be used as a companion in dual-link mode. It
- shall not be set for any other LVDS encoder.
+ for the first LVDS encoder on R-Car D3 and E3, and RZ/G2E SoCs, and shall
+ point to the second encoder to be used as a companion in dual-link mode.
+ It shall not be set for any other LVDS encoder.
required:
- compatible
diff --git a/Documentation/devicetree/bindings/display/bridge/simple-bridge.yaml b/Documentation/devicetree/bindings/display/bridge/simple-bridge.yaml
index 64e8a1c24b40..6c7b577fd471 100644
--- a/Documentation/devicetree/bindings/display/bridge/simple-bridge.yaml
+++ b/Documentation/devicetree/bindings/display/bridge/simple-bridge.yaml
@@ -30,31 +30,21 @@ properties:
- ti,ths8135
ports:
- type: object
- description: |
- This device has two video ports. Their connections are modeled using the
- OF graph bindings specified in Documentation/devicetree/bindings/graph.txt.
- properties:
- '#address-cells':
- const: 1
-
- '#size-cells':
- const: 0
+ $ref: /schemas/graph.yaml#/properties/ports
+ properties:
port@0:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description: The bridge input
port@1:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description: The bridge output
required:
- port@0
- port@1
- additionalProperties: false
-
enable-gpios:
maxItems: 1
description: GPIO controlling bridge enable
diff --git a/Documentation/devicetree/bindings/display/bridge/snps,dw-mipi-dsi.yaml b/Documentation/devicetree/bindings/display/bridge/snps,dw-mipi-dsi.yaml
index e42cb610f545..3c3e51af154b 100644
--- a/Documentation/devicetree/bindings/display/bridge/snps,dw-mipi-dsi.yaml
+++ b/Documentation/devicetree/bindings/display/bridge/snps,dw-mipi-dsi.yaml
@@ -47,14 +47,15 @@ properties:
const: apb
ports:
- type: object
+ $ref: /schemas/graph.yaml#/properties/ports
properties:
port@0:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description: Input node to receive pixel data.
+
port@1:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description: DSI output node to panel.
required:
diff --git a/Documentation/devicetree/bindings/display/bridge/thine,thc63lvd1024.yaml b/Documentation/devicetree/bindings/display/bridge/thine,thc63lvd1024.yaml
index 3d5ce08a5792..8ae382429d2b 100644
--- a/Documentation/devicetree/bindings/display/bridge/thine,thc63lvd1024.yaml
+++ b/Documentation/devicetree/bindings/display/bridge/thine,thc63lvd1024.yaml
@@ -25,46 +25,41 @@ properties:
const: thine,thc63lvd1024
ports:
- type: object
+ $ref: /schemas/graph.yaml#/properties/ports
description: |
- This device has four video ports. Their connections are modeled using the
- OF graph bindings specified in Documentation/devicetree/bindings/graph.txt.
+ The device can operate in single or dual input and output modes.
- The device can operate in single-link mode or dual-link mode. In
- single-link mode, all pixels are received on port@0, and port@1 shall not
- contain any endpoint. In dual-link mode, even-numbered pixels are
- received on port@0 and odd-numbered pixels on port@1, and both port@0 and
- port@1 shall contain endpoints.
+ When operating in single input mode, all pixels are received on port@0,
+ and port@1 shall not contain any endpoint. In dual input mode,
+ even-numbered pixels are received on port@0 and odd-numbered pixels on
+ port@1, and both port@0 and port@1 shall contain endpoints.
- properties:
- '#address-cells':
- const: 1
-
- '#size-cells':
- const: 0
+ When operating in single output mode all pixels are output from the first
+ CMOS/TTL port and port@3 shall not contain any endpoint. In dual output
+ mode pixels are output from both CMOS/TTL ports and both port@2 and
+ port@3 shall contain endpoints.
+ properties:
port@0:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description: First LVDS input port
port@1:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description: Second LVDS input port
port@2:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description: First digital CMOS/TTL parallel output
port@3:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description: Second digital CMOS/TTL parallel output
required:
- port@0
- port@2
- additionalProperties: false
-
oe-gpios:
maxItems: 1
description: Output enable GPIO signal, pin name "OE", active high.
diff --git a/Documentation/devicetree/bindings/display/bridge/ti,sn65dsi86.yaml b/Documentation/devicetree/bindings/display/bridge/ti,sn65dsi86.yaml
index f8622bd0f61e..26932d2e86ab 100644
--- a/Documentation/devicetree/bindings/display/bridge/ti,sn65dsi86.yaml
+++ b/Documentation/devicetree/bindings/display/bridge/ti,sn65dsi86.yaml
@@ -71,54 +71,26 @@ properties:
description: See ../../pwm/pwm.yaml for description of the cell formats.
ports:
- type: object
- additionalProperties: false
+ $ref: /schemas/graph.yaml#/properties/ports
properties:
- "#address-cells":
- const: 1
-
- "#size-cells":
- const: 0
-
port@0:
- type: object
- additionalProperties: false
-
+ $ref: /schemas/graph.yaml#/properties/port
description:
Video port for MIPI DSI input
- properties:
- reg:
- const: 0
-
- endpoint:
- type: object
- additionalProperties: false
- properties:
- remote-endpoint: true
-
- required:
- - reg
-
port@1:
- type: object
- additionalProperties: false
-
+ $ref: /schemas/graph.yaml#/$defs/port-base
+ unevaluatedProperties: false
description:
Video port for eDP output (panel or connector).
properties:
- reg:
- const: 1
-
endpoint:
- type: object
- additionalProperties: false
+ $ref: /schemas/graph.yaml#/$defs/endpoint-base
+ unevaluatedProperties: false
properties:
- remote-endpoint: true
-
data-lanes:
oneOf:
- minItems: 1
@@ -171,12 +143,7 @@ properties:
dependencies:
lane-polarities: [data-lanes]
- required:
- - reg
-
required:
- - "#address-cells"
- - "#size-cells"
- port@0
- port@1
diff --git a/Documentation/devicetree/bindings/display/bridge/ti,tfp410.yaml b/Documentation/devicetree/bindings/display/bridge/ti,tfp410.yaml
index 605831c1e836..4c5dd8ec2951 100644
--- a/Documentation/devicetree/bindings/display/bridge/ti,tfp410.yaml
+++ b/Documentation/devicetree/bindings/display/bridge/ti,tfp410.yaml
@@ -31,23 +31,18 @@ properties:
maximum: 7
ports:
- description:
- A node containing input and output port nodes with endpoint
- definitions as documented in
- Documentation/devicetree/bindings/media/video-interfaces.txt
- type: object
+ $ref: /schemas/graph.yaml#/properties/ports
properties:
port@0:
+ $ref: /schemas/graph.yaml#/$defs/port-base
+ unevaluatedProperties: false
description: DPI input port.
- type: object
properties:
- reg:
- const: 0
-
endpoint:
- type: object
+ $ref: /schemas/graph.yaml#/$defs/endpoint-base
+ unevaluatedProperties: false
properties:
pclk-sample:
@@ -67,15 +62,8 @@ properties:
default: 24
port@1:
+ $ref: /schemas/graph.yaml#/properties/port
description: DVI output port.
- type: object
-
- properties:
- reg:
- const: 1
-
- endpoint:
- type: object
required:
- port@0
diff --git a/Documentation/devicetree/bindings/display/bridge/toshiba,tc358762.yaml b/Documentation/devicetree/bindings/display/bridge/toshiba,tc358762.yaml
index 195025e6803c..5216c27fc0ad 100644
--- a/Documentation/devicetree/bindings/display/bridge/toshiba,tc358762.yaml
+++ b/Documentation/devicetree/bindings/display/bridge/toshiba,tc358762.yaml
@@ -25,62 +25,20 @@ properties:
description: Regulator for 1.2V internal core power.
ports:
- type: object
+ $ref: /schemas/graph.yaml#/properties/ports
properties:
- "#address-cells":
- const: 1
-
- "#size-cells":
- const: 0
-
port@0:
- type: object
- additionalProperties: false
-
- description: |
+ $ref: /schemas/graph.yaml#/properties/port
+ description:
Video port for MIPI DSI input
- properties:
- reg:
- const: 0
-
- patternProperties:
- endpoint:
- type: object
- additionalProperties: false
-
- properties:
- remote-endpoint: true
-
- required:
- - reg
-
port@1:
- type: object
- additionalProperties: false
-
- description: |
+ $ref: /schemas/graph.yaml#/properties/port
+ description:
Video port for MIPI DPI output (panel or connector).
- properties:
- reg:
- const: 1
-
- patternProperties:
- endpoint:
- type: object
- additionalProperties: false
-
- properties:
- remote-endpoint: true
-
- required:
- - reg
-
required:
- - "#address-cells"
- - "#size-cells"
- port@0
- port@1
diff --git a/Documentation/devicetree/bindings/display/bridge/toshiba,tc358768.yaml b/Documentation/devicetree/bindings/display/bridge/toshiba,tc358768.yaml
index c036a75db8f7..eacfe7165083 100644
--- a/Documentation/devicetree/bindings/display/bridge/toshiba,tc358768.yaml
+++ b/Documentation/devicetree/bindings/display/bridge/toshiba,tc358768.yaml
@@ -42,65 +42,30 @@ properties:
const: refclk
ports:
- type: object
+ $ref: /schemas/graph.yaml#/properties/ports
properties:
- "#address-cells":
- const: 1
-
- "#size-cells":
- const: 0
-
port@0:
- type: object
- additionalProperties: false
-
+ $ref: /schemas/graph.yaml#/$defs/port-base
+ unevaluatedProperties: false
description: |
Video port for RGB input
properties:
- reg:
- const: 0
-
- patternProperties:
endpoint:
- type: object
- additionalProperties: false
+ $ref: /schemas/graph.yaml#/$defs/endpoint-base
+ unevaluatedProperties: false
properties:
data-lines:
enum: [ 16, 18, 24 ]
- remote-endpoint: true
-
- required:
- - reg
-
port@1:
- type: object
- additionalProperties: false
-
+ $ref: /schemas/graph.yaml#/properties/port
description: |
Video port for DSI output (panel or connector).
- properties:
- reg:
- const: 1
-
- patternProperties:
- endpoint:
- type: object
- additionalProperties: false
-
- properties:
- remote-endpoint: true
-
- required:
- - reg
-
required:
- - "#address-cells"
- - "#size-cells"
- port@0
- port@1
@@ -156,4 +121,3 @@ examples:
};
};
};
-
diff --git a/Documentation/devicetree/bindings/display/bridge/toshiba,tc358775.yaml b/Documentation/devicetree/bindings/display/bridge/toshiba,tc358775.yaml
index b5959cc78b8d..10471c6c1ff9 100644
--- a/Documentation/devicetree/bindings/display/bridge/toshiba,tc358775.yaml
+++ b/Documentation/devicetree/bindings/display/bridge/toshiba,tc358775.yaml
@@ -42,31 +42,22 @@ properties:
description: Hardware reset, Low active
ports:
- type: object
- description:
- A node containing input and output port nodes with endpoint definitions
- as documented in
- Documentation/devicetree/bindings/media/video-interfaces.txt
- properties:
- "#address-cells":
- const: 1
-
- "#size-cells":
- const: 0
+ $ref: /schemas/graph.yaml#/properties/ports
+ properties:
port@0:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description: |
DSI Input. The remote endpoint phandle should be a
reference to a valid mipi_dsi_host device node.
port@1:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description: |
Video port for LVDS output (panel or connector).
port@2:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description: |
Video port for Dual link LVDS output (panel or connector).
diff --git a/Documentation/devicetree/bindings/display/connector/analog-tv-connector.yaml b/Documentation/devicetree/bindings/display/connector/analog-tv-connector.yaml
index eebe88fed999..a31ca2d52b86 100644
--- a/Documentation/devicetree/bindings/display/connector/analog-tv-connector.yaml
+++ b/Documentation/devicetree/bindings/display/connector/analog-tv-connector.yaml
@@ -25,6 +25,7 @@ properties:
$ref: /schemas/types.yaml#/definitions/uint32
port:
+ $ref: /schemas/graph.yaml#/properties/port
description: Connection to controller providing analog TV signals
required:
diff --git a/Documentation/devicetree/bindings/display/connector/dp-connector.yaml b/Documentation/devicetree/bindings/display/connector/dp-connector.yaml
new file mode 100644
index 000000000000..1c17d60e7760
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/connector/dp-connector.yaml
@@ -0,0 +1,56 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/connector/dp-connector.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: DisplayPort Connector
+
+maintainers:
+ - Tomi Valkeinen <[email protected]>
+
+properties:
+ compatible:
+ const: dp-connector
+
+ label: true
+
+ type:
+ enum:
+ - full-size
+ - mini
+
+ hpd-gpios:
+ description: A GPIO line connected to HPD
+ maxItems: 1
+
+ dp-pwr-supply:
+ description: Power supply for the DP_PWR pin
+ maxItems: 1
+
+ port:
+ $ref: /schemas/graph.yaml#/properties/port
+ description: Connection to controller providing DP signals
+
+required:
+ - compatible
+ - type
+ - port
+
+additionalProperties: false
+
+examples:
+ - |
+ connector {
+ compatible = "dp-connector";
+ label = "dp0";
+ type = "full-size";
+
+ port {
+ dp_connector_in: endpoint {
+ remote-endpoint = <&dp_out>;
+ };
+ };
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/display/connector/dvi-connector.yaml b/Documentation/devicetree/bindings/display/connector/dvi-connector.yaml
index 71cb9220fa59..93eb14294e68 100644
--- a/Documentation/devicetree/bindings/display/connector/dvi-connector.yaml
+++ b/Documentation/devicetree/bindings/display/connector/dvi-connector.yaml
@@ -36,6 +36,7 @@ properties:
description: the connector has pins for DVI dual-link
port:
+ $ref: /schemas/graph.yaml#/properties/port
description: Connection to controller providing DVI signals
required:
diff --git a/Documentation/devicetree/bindings/display/connector/hdmi-connector.yaml b/Documentation/devicetree/bindings/display/connector/hdmi-connector.yaml
index 14d7128af592..83c0d008265b 100644
--- a/Documentation/devicetree/bindings/display/connector/hdmi-connector.yaml
+++ b/Documentation/devicetree/bindings/display/connector/hdmi-connector.yaml
@@ -37,6 +37,7 @@ properties:
maxItems: 1
port:
+ $ref: /schemas/graph.yaml#/properties/port
description: Connection to controller providing HDMI signals
required:
diff --git a/Documentation/devicetree/bindings/display/connector/vga-connector.yaml b/Documentation/devicetree/bindings/display/connector/vga-connector.yaml
index 5782c4bb3252..25f868002000 100644
--- a/Documentation/devicetree/bindings/display/connector/vga-connector.yaml
+++ b/Documentation/devicetree/bindings/display/connector/vga-connector.yaml
@@ -20,6 +20,7 @@ properties:
$ref: /schemas/types.yaml#/definitions/phandle
port:
+ $ref: /schemas/graph.yaml#/properties/port
description: Connection to controller providing VGA signals
required:
diff --git a/Documentation/devicetree/bindings/display/imx/nxp,imx8mq-dcss.yaml b/Documentation/devicetree/bindings/display/imx/nxp,imx8mq-dcss.yaml
index f1f25aa794d9..0091df9dd73b 100644
--- a/Documentation/devicetree/bindings/display/imx/nxp,imx8mq-dcss.yaml
+++ b/Documentation/devicetree/bindings/display/imx/nxp,imx8mq-dcss.yaml
@@ -74,7 +74,7 @@ properties:
- description: Must be 400 MHz
port:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description:
A port node pointing to the input port of a HDMI/DP or MIPI display bridge.
diff --git a/Documentation/devicetree/bindings/display/ingenic,ipu.yaml b/Documentation/devicetree/bindings/display/ingenic,ipu.yaml
index 12064a8e7a92..e679f48a3886 100644
--- a/Documentation/devicetree/bindings/display/ingenic,ipu.yaml
+++ b/Documentation/devicetree/bindings/display/ingenic,ipu.yaml
@@ -31,9 +31,8 @@ properties:
clock-names:
const: ipu
-patternProperties:
- "^ports?$":
- description: OF graph bindings (specified in bindings/graph.txt).
+ port:
+ $ref: /schemas/graph.yaml#/properties/port
required:
- compatible
diff --git a/Documentation/devicetree/bindings/display/ingenic,lcd.yaml b/Documentation/devicetree/bindings/display/ingenic,lcd.yaml
index 768050f30dba..50d2b0a50e8a 100644
--- a/Documentation/devicetree/bindings/display/ingenic,lcd.yaml
+++ b/Documentation/devicetree/bindings/display/ingenic,lcd.yaml
@@ -39,18 +39,18 @@ properties:
minItems: 1
port:
- description: OF graph bindings (specified in bindings/graph.txt).
+ $ref: /schemas/graph.yaml#/properties/port
ports:
- description: OF graph bindings (specified in bindings/graph.txt).
- type: object
+ $ref: /schemas/graph.yaml#/properties/ports
+
properties:
port@0:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description: DPI output, to interface with TFT panels.
port@8:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description: Link to the Image Processing Unit (IPU).
(See ingenic,ipu.yaml).
diff --git a/Documentation/devicetree/bindings/display/intel,keembay-display.yaml b/Documentation/devicetree/bindings/display/intel,keembay-display.yaml
index 0a697d45c2ad..bc6622b010ca 100644
--- a/Documentation/devicetree/bindings/display/intel,keembay-display.yaml
+++ b/Documentation/devicetree/bindings/display/intel,keembay-display.yaml
@@ -36,7 +36,7 @@ properties:
maxItems: 1
port:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description: Display output node to DSI.
required:
diff --git a/Documentation/devicetree/bindings/display/panel/advantech,idk-2121wr.yaml b/Documentation/devicetree/bindings/display/panel/advantech,idk-2121wr.yaml
index 6b7fddc80c41..67682fe77f10 100644
--- a/Documentation/devicetree/bindings/display/panel/advantech,idk-2121wr.yaml
+++ b/Documentation/devicetree/bindings/display/panel/advantech,idk-2121wr.yaml
@@ -37,34 +37,33 @@ properties:
panel-timing: true
ports:
- type: object
+ $ref: /schemas/graph.yaml#/properties/ports
+
properties:
port@0:
- type: object
+ $ref: /schemas/graph.yaml#/$defs/port-base
+ unevaluatedProperties: false
description: The sink for odd pixels.
properties:
- reg:
- const: 0
-
dual-lvds-odd-pixels: true
required:
- - reg
- dual-lvds-odd-pixels
port@1:
- type: object
+ $ref: /schemas/graph.yaml#/$defs/port-base
+ unevaluatedProperties: false
description: The sink for even pixels.
properties:
- reg:
- const: 1
-
dual-lvds-even-pixels: true
required:
- - reg
- dual-lvds-even-pixels
+ required:
+ - port@0
+ - port@1
+
additionalProperties: false
required:
diff --git a/Documentation/devicetree/bindings/display/panel/mantix,mlaf057we51-x.yaml b/Documentation/devicetree/bindings/display/panel/mantix,mlaf057we51-x.yaml
index 51f423297ec8..9e78f2e60f99 100644
--- a/Documentation/devicetree/bindings/display/panel/mantix,mlaf057we51-x.yaml
+++ b/Documentation/devicetree/bindings/display/panel/mantix,mlaf057we51-x.yaml
@@ -20,6 +20,7 @@ properties:
compatible:
enum:
- mantix,mlaf057we51-x
+ - ys,ys57pss36bh5gq
port: true
reg:
diff --git a/Documentation/devicetree/bindings/display/panel/panel-common.yaml b/Documentation/devicetree/bindings/display/panel/panel-common.yaml
index cd6dc5461721..5b38dc89cb21 100644
--- a/Documentation/devicetree/bindings/display/panel/panel-common.yaml
+++ b/Documentation/devicetree/bindings/display/panel/panel-common.yaml
@@ -68,16 +68,7 @@ properties:
# Connectivity
port:
- type: object
-
- ports:
- type: object
- description:
- Panels receive video data through one or multiple connections. While
- the nature of those connections is specific to the panel type, the
- connectivity is expressed in a standard fashion using ports as specified
- in the device graph bindings defined in
- Documentation/devicetree/bindings/graph.txt.
+ $ref: /schemas/graph.yaml#/properties/port
ddc-i2c-bus:
$ref: /schemas/types.yaml#/definitions/phandle
diff --git a/Documentation/devicetree/bindings/display/panel/panel-simple-dsi.yaml b/Documentation/devicetree/bindings/display/panel/panel-simple-dsi.yaml
index 72e4b6d4d5e1..fbd71669248f 100644
--- a/Documentation/devicetree/bindings/display/panel/panel-simple-dsi.yaml
+++ b/Documentation/devicetree/bindings/display/panel/panel-simple-dsi.yaml
@@ -35,6 +35,8 @@ properties:
- boe,tv080wum-nl0
# Innolux P079ZCA 7.85" 768x1024 TFT LCD panel
- innolux,p079zca
+ # Khadas TS050 5" 1080x1920 LCD panel
+ - khadas,ts050
# Kingdisplay KD097D04 9.7" 1536x2048 TFT LCD panel
- kingdisplay,kd097d04
# LG ACX467AKM-7 4.95" 1080×1920 LCD Panel
diff --git a/Documentation/devicetree/bindings/display/panel/panel-simple.yaml b/Documentation/devicetree/bindings/display/panel/panel-simple.yaml
index 27fffafe5b5c..35b42ee4ed1d 100644
--- a/Documentation/devicetree/bindings/display/panel/panel-simple.yaml
+++ b/Documentation/devicetree/bindings/display/panel/panel-simple.yaml
@@ -76,6 +76,8 @@ properties:
# BOE OPTOELECTRONICS TECHNOLOGY 10.1" WXGA TFT LCD panel
- boe,nv101wxmn51
# BOE NV133FHM-N61 13.3" FHD (1920x1080) TFT LCD Panel
+ - boe,nv110wtm-n61
+ # BOE NV110WTM-N61 11.0" 2160x1440 TFT LCD Panel
- boe,nv133fhm-n61
# BOE NV133FHM-N62 13.3" FHD (1920x1080) TFT LCD Panel
- boe,nv133fhm-n62
diff --git a/Documentation/devicetree/bindings/display/panel/samsung,s6e63m0.yaml b/Documentation/devicetree/bindings/display/panel/samsung,s6e63m0.yaml
index 1dab80ae1d0a..ea58df49263a 100644
--- a/Documentation/devicetree/bindings/display/panel/samsung,s6e63m0.yaml
+++ b/Documentation/devicetree/bindings/display/panel/samsung,s6e63m0.yaml
@@ -11,6 +11,7 @@ maintainers:
allOf:
- $ref: panel-common.yaml#
+ - $ref: /schemas/leds/backlight/common.yaml#
properties:
compatible:
@@ -19,6 +20,8 @@ properties:
reg: true
reset-gpios: true
port: true
+ default-brightness: true
+ max-brightness: true
vdd3-supply:
description: VDD regulator
@@ -31,7 +34,6 @@ required:
- reset-gpios
- vdd3-supply
- vci-supply
- - port
unevaluatedProperties: false
diff --git a/Documentation/devicetree/bindings/display/rockchip/rockchip,rk3066-hdmi.yaml b/Documentation/devicetree/bindings/display/rockchip/rockchip,rk3066-hdmi.yaml
index 4110d003ce1f..008c144257cb 100644
--- a/Documentation/devicetree/bindings/display/rockchip/rockchip,rk3066-hdmi.yaml
+++ b/Documentation/devicetree/bindings/display/rockchip/rockchip,rk3066-hdmi.yaml
@@ -43,34 +43,24 @@ properties:
This soc uses GRF regs to switch the HDMI TX input between vop0 and vop1.
ports:
- type: object
+ $ref: /schemas/graph.yaml#/properties/ports
properties:
- "#address-cells":
- const: 1
-
- "#size-cells":
- const: 0
-
port@0:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description:
Port node with two endpoints, numbered 0 and 1,
connected respectively to vop0 and vop1.
port@1:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description:
Port node with one endpoint connected to a hdmi-connector node.
required:
- - "#address-cells"
- - "#size-cells"
- port@0
- port@1
- additionalProperties: false
-
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/display/rockchip/rockchip-vop.yaml b/Documentation/devicetree/bindings/display/rockchip/rockchip-vop.yaml
index ed8148e26e24..6f43d885c9b3 100644
--- a/Documentation/devicetree/bindings/display/rockchip/rockchip-vop.yaml
+++ b/Documentation/devicetree/bindings/display/rockchip/rockchip-vop.yaml
@@ -70,10 +70,7 @@ properties:
- const: dclk
port:
- type: object
- description:
- A port node with endpoint definitions as defined in
- Documentation/devicetree/bindings/media/video-interfaces.txt.
+ $ref: /schemas/graph.yaml#/properties/port
assigned-clocks:
maxItems: 2
diff --git a/Documentation/devicetree/bindings/display/st,stm32-dsi.yaml b/Documentation/devicetree/bindings/display/st,stm32-dsi.yaml
index 327a14d85df8..679daed4124e 100644
--- a/Documentation/devicetree/bindings/display/st,stm32-dsi.yaml
+++ b/Documentation/devicetree/bindings/display/st,stm32-dsi.yaml
@@ -51,20 +51,16 @@ properties:
Phandle of the regulator that provides the supply voltage.
ports:
- type: object
- description:
- A node containing DSI input & output port nodes with endpoint
- definitions as documented in
- Documentation/devicetree/bindings/media/video-interfaces.txt
- Documentation/devicetree/bindings/graph.txt
+ $ref: /schemas/graph.yaml#/properties/ports
+
properties:
port@0:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description:
DSI input port node, connected to the ltdc rgb output port.
port@1:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description:
DSI output port node, connected to a panel or a bridge input port"
diff --git a/Documentation/devicetree/bindings/display/st,stm32-ltdc.yaml b/Documentation/devicetree/bindings/display/st,stm32-ltdc.yaml
index bf8ad916e9b0..d54f9ca207af 100644
--- a/Documentation/devicetree/bindings/display/st,stm32-ltdc.yaml
+++ b/Documentation/devicetree/bindings/display/st,stm32-ltdc.yaml
@@ -35,15 +35,13 @@ properties:
maxItems: 1
port:
- type: object
- description:
- "Video port for DPI RGB output.
+ $ref: /schemas/graph.yaml#/properties/port
+ description: |
+ Video port for DPI RGB output.
ltdc has one video port with up to 2 endpoints:
- for external dpi rgb panel or bridge, using gpios.
- for internal dpi input of the MIPI DSI host controller.
Note: These 2 endpoints cannot be activated simultaneously.
- Please refer to the bindings defined in
- Documentation/devicetree/bindings/media/video-interfaces.txt."
required:
- compatible
diff --git a/Documentation/devicetree/bindings/display/ste,mcde.txt b/Documentation/devicetree/bindings/display/ste,mcde.txt
deleted file mode 100644
index 4c33c692bd5f..000000000000
--- a/Documentation/devicetree/bindings/display/ste,mcde.txt
+++ /dev/null
@@ -1,104 +0,0 @@
-ST-Ericsson Multi Channel Display Engine MCDE
-
-The ST-Ericsson MCDE is a display controller with support for compositing
-and displaying several channels memory resident graphics data on DSI or
-LCD displays or bridges. It is used in the ST-Ericsson U8500 platform.
-
-Required properties:
-
-- compatible: must be:
- "ste,mcde"
-- reg: register base for the main MCDE control registers, should be
- 0x1000 in size
-- interrupts: the interrupt line for the MCDE
-- epod-supply: a phandle to the EPOD regulator
-- vana-supply: a phandle to the analog voltage regulator
-- clocks: an array of the MCDE clocks in this strict order:
- MCDECLK (main MCDE clock), LCDCLK (LCD clock), PLLDSI
- (HDMI clock), DSI0ESCLK (DSI0 energy save clock),
- DSI1ESCLK (DSI1 energy save clock), DSI2ESCLK (DSI2 energy
- save clock)
-- clock-names: must be the following array:
- "mcde", "lcd", "hdmi"
- to match the required clock inputs above.
-- #address-cells: should be <1> (for the DSI hosts that will be children)
-- #size-cells: should be <1> (for the DSI hosts that will be children)
-- ranges: this should always be stated
-
-Required subnodes:
-
-The devicetree must specify subnodes for the DSI host adapters.
-These must have the following characteristics:
-
-- compatible: must be:
- "ste,mcde-dsi"
-- reg: must specify the register range for the DSI host
-- vana-supply: phandle to the VANA voltage regulator
-- clocks: phandles to the high speed and low power (energy save) clocks
- the high speed clock is not present on the third (dsi2) block, so it
- should only have the "lp" clock
-- clock-names: "hs" for the high speed clock and "lp" for the low power
- (energy save) clock
-- #address-cells: should be <1>
-- #size-cells: should be <0>
-
-Display panels and bridges will appear as children on the DSI hosts, and
-the displays are connected to the DSI hosts using the common binding
-for video transmitter interfaces; see
-Documentation/devicetree/bindings/media/video-interfaces.txt
-
-If a DSI host is unused (not connected) it will have no children defined.
-
-Example:
-
-mcde@a0350000 {
- compatible = "ste,mcde";
- reg = <0xa0350000 0x1000>;
- interrupts = <GIC_SPI 48 IRQ_TYPE_LEVEL_HIGH>;
- epod-supply = <&db8500_b2r2_mcde_reg>;
- vana-supply = <&ab8500_ldo_ana_reg>;
- clocks = <&prcmu_clk PRCMU_MCDECLK>, /* Main MCDE clock */
- <&prcmu_clk PRCMU_LCDCLK>, /* LCD clock */
- <&prcmu_clk PRCMU_PLLDSI>; /* HDMI clock */
- clock-names = "mcde", "lcd", "hdmi";
- #address-cells = <1>;
- #size-cells = <1>;
- ranges;
-
- dsi0: dsi@a0351000 {
- compatible = "ste,mcde-dsi";
- reg = <0xa0351000 0x1000>;
- vana-supply = <&ab8500_ldo_ana_reg>;
- clocks = <&prcmu_clk PRCMU_DSI0CLK>, <&prcmu_clk PRCMU_DSI0ESCCLK>;
- clock-names = "hs", "lp";
- #address-cells = <1>;
- #size-cells = <0>;
-
- panel {
- compatible = "samsung,s6d16d0";
- reg = <0>;
- vdd1-supply = <&ab8500_ldo_aux1_reg>;
- reset-gpios = <&gpio2 1 GPIO_ACTIVE_LOW>;
- };
-
- };
- dsi1: dsi@a0352000 {
- compatible = "ste,mcde-dsi";
- reg = <0xa0352000 0x1000>;
- vana-supply = <&ab8500_ldo_ana_reg>;
- clocks = <&prcmu_clk PRCMU_DSI1CLK>, <&prcmu_clk PRCMU_DSI1ESCCLK>;
- clock-names = "hs", "lp";
- #address-cells = <1>;
- #size-cells = <0>;
- };
- dsi2: dsi@a0353000 {
- compatible = "ste,mcde-dsi";
- reg = <0xa0353000 0x1000>;
- vana-supply = <&ab8500_ldo_ana_reg>;
- /* This DSI port only has the Low Power / Energy Save clock */
- clocks = <&prcmu_clk PRCMU_DSI2ESCCLK>;
- clock-names = "lp";
- #address-cells = <1>;
- #size-cells = <0>;
- };
-};
diff --git a/Documentation/devicetree/bindings/display/ste,mcde.yaml b/Documentation/devicetree/bindings/display/ste,mcde.yaml
new file mode 100644
index 000000000000..de0c678b3c29
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/ste,mcde.yaml
@@ -0,0 +1,168 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/ste,mcde.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: ST-Ericsson Multi Channel Display Engine MCDE
+
+maintainers:
+ - Linus Walleij <[email protected]>
+
+properties:
+ compatible:
+ const: ste,mcde
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ description: an array of the MCDE clocks
+ items:
+ - description: MCDECLK (main MCDE clock)
+ - description: LCDCLK (LCD clock)
+ - description: PLLDSI (HDMI clock)
+
+ clock-names:
+ items:
+ - const: mcde
+ - const: lcd
+ - const: hdmi
+
+ resets:
+ maxItems: 1
+
+ epod-supply:
+ description: a phandle to the EPOD regulator
+
+ vana-supply:
+ description: a phandle to the analog voltage regulator
+
+ port:
+ $ref: /schemas/graph.yaml#/properties/port
+ description:
+ A DPI port node
+
+ "#address-cells":
+ const: 1
+
+ "#size-cells":
+ const: 1
+
+ ranges: true
+
+patternProperties:
+ "^dsi@[0-9a-f]+$":
+ description: subnodes for the three DSI host adapters
+ type: object
+ allOf:
+ - $ref: dsi-controller.yaml#
+ properties:
+ compatible:
+ const: ste,mcde-dsi
+
+ reg:
+ maxItems: 1
+
+ vana-supply:
+ description: a phandle to the analog voltage regulator
+
+ clocks:
+ description: phandles to the high speed and low power (energy save) clocks
+ the high speed clock is not present on the third (dsi2) block, so it
+ should only have the "lp" clock
+ minItems: 1
+ maxItems: 2
+
+ clock-names:
+ oneOf:
+ - items:
+ - const: hs
+ - const: lp
+ - items:
+ - const: lp
+
+ required:
+ - compatible
+ - reg
+ - vana-supply
+ - clocks
+ - clock-names
+
+ unevaluatedProperties: false
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+ - clock-names
+ - epod-supply
+ - vana-supply
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/mfd/dbx500-prcmu.h>
+ #include <dt-bindings/gpio/gpio.h>
+
+ mcde@a0350000 {
+ compatible = "ste,mcde";
+ reg = <0xa0350000 0x1000>;
+ interrupts = <GIC_SPI 48 IRQ_TYPE_LEVEL_HIGH>;
+ epod-supply = <&db8500_b2r2_mcde_reg>;
+ vana-supply = <&ab8500_ldo_ana_reg>;
+ clocks = <&prcmu_clk PRCMU_MCDECLK>,
+ <&prcmu_clk PRCMU_LCDCLK>,
+ <&prcmu_clk PRCMU_PLLDSI>;
+ clock-names = "mcde", "lcd", "hdmi";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ dsi0: dsi@a0351000 {
+ compatible = "ste,mcde-dsi";
+ reg = <0xa0351000 0x1000>;
+ vana-supply = <&ab8500_ldo_ana_reg>;
+ clocks = <&prcmu_clk PRCMU_DSI0CLK>, <&prcmu_clk PRCMU_DSI0ESCCLK>;
+ clock-names = "hs", "lp";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ panel@0 {
+ compatible = "samsung,s6d16d0";
+ reg = <0>;
+ vdd1-supply = <&ab8500_ldo_aux1_reg>;
+ reset-gpios = <&gpio2 1 GPIO_ACTIVE_LOW>;
+ };
+ };
+
+ dsi1: dsi@a0352000 {
+ compatible = "ste,mcde-dsi";
+ reg = <0xa0352000 0x1000>;
+ vana-supply = <&ab8500_ldo_ana_reg>;
+ clocks = <&prcmu_clk PRCMU_DSI1CLK>, <&prcmu_clk PRCMU_DSI1ESCCLK>;
+ clock-names = "hs", "lp";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ dsi2: dsi@a0353000 {
+ compatible = "ste,mcde-dsi";
+ reg = <0xa0353000 0x1000>;
+ vana-supply = <&ab8500_ldo_ana_reg>;
+ /* This DSI port only has the Low Power / Energy Save clock */
+ clocks = <&prcmu_clk PRCMU_DSI2ESCCLK>;
+ clock-names = "lp";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/display/ti/ti,am65x-dss.yaml b/Documentation/devicetree/bindings/display/ti/ti,am65x-dss.yaml
index 4dc30738ee57..781c1868b0b8 100644
--- a/Documentation/devicetree/bindings/display/ti/ti,am65x-dss.yaml
+++ b/Documentation/devicetree/bindings/display/ti/ti,am65x-dss.yaml
@@ -74,30 +74,19 @@ properties:
type: boolean
ports:
- type: object
- description:
- Ports as described in Documentation/devicetree/bindings/graph.txt
- properties:
- "#address-cells":
- const: 1
-
- "#size-cells":
- const: 0
+ $ref: /schemas/graph.yaml#/properties/ports
+ properties:
port@0:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description:
The DSS OLDI output port node form video port 1
port@1:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description:
The DSS DPI output port node from video port 2
- required:
- - "#address-cells"
- - "#size-cells"
-
ti,am65x-oldi-io-ctrl:
$ref: "/schemas/types.yaml#/definitions/phandle-array"
maxItems: 1
diff --git a/Documentation/devicetree/bindings/display/ti/ti,j721e-dss.yaml b/Documentation/devicetree/bindings/display/ti/ti,j721e-dss.yaml
index c9a947d55fa4..2986f9acc9f0 100644
--- a/Documentation/devicetree/bindings/display/ti/ti,j721e-dss.yaml
+++ b/Documentation/devicetree/bindings/display/ti/ti,j721e-dss.yaml
@@ -107,40 +107,29 @@ properties:
type: boolean
ports:
- type: object
- description:
- Ports as described in Documentation/devicetree/bindings/graph.txt
- properties:
- "#address-cells":
- const: 1
-
- "#size-cells":
- const: 0
+ $ref: /schemas/graph.yaml#/properties/ports
+ properties:
port@0:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description:
The output port node form video port 1
port@1:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description:
The output port node from video port 2
port@2:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description:
The output port node from video port 3
port@3:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description:
The output port node from video port 4
- required:
- - "#address-cells"
- - "#size-cells"
-
max-memory-bandwidth:
$ref: /schemas/types.yaml#/definitions/uint32
description:
diff --git a/Documentation/devicetree/bindings/display/ti/ti,k2g-dss.yaml b/Documentation/devicetree/bindings/display/ti/ti,k2g-dss.yaml
index 8f87b82c6695..7ce7bbad5780 100644
--- a/Documentation/devicetree/bindings/display/ti/ti,k2g-dss.yaml
+++ b/Documentation/devicetree/bindings/display/ti/ti,k2g-dss.yaml
@@ -54,9 +54,8 @@ properties:
description: phandle to the associated power domain
port:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description:
- Port as described in Documentation/devicetree/bindings/graph.txt.
The DSS DPI output port node
max-memory-bandwidth:
diff --git a/Documentation/devicetree/bindings/dma/ti/k3-bcdma.yaml b/Documentation/devicetree/bindings/dma/ti/k3-bcdma.yaml
index b15f68c499cb..df29d59d13a8 100644
--- a/Documentation/devicetree/bindings/dma/ti/k3-bcdma.yaml
+++ b/Documentation/devicetree/bindings/dma/ti/k3-bcdma.yaml
@@ -1,4 +1,6 @@
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+# Copyright (C) 2020 Texas Instruments Incorporated
+# Author: Peter Ujfalusi <[email protected]>
%YAML 1.2
---
$id: http://devicetree.org/schemas/dma/ti/k3-bcdma.yaml#
@@ -7,7 +9,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Texas Instruments K3 DMSS BCDMA Device Tree Bindings
maintainers:
- - Peter Ujfalusi <[email protected]>
+ - Peter Ujfalusi <[email protected]>
description: |
The Block Copy DMA (BCDMA) is intended to perform similar functions as the TR
diff --git a/Documentation/devicetree/bindings/dma/ti/k3-pktdma.yaml b/Documentation/devicetree/bindings/dma/ti/k3-pktdma.yaml
index b13ab60cd740..ea19d12a9337 100644
--- a/Documentation/devicetree/bindings/dma/ti/k3-pktdma.yaml
+++ b/Documentation/devicetree/bindings/dma/ti/k3-pktdma.yaml
@@ -1,4 +1,6 @@
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+# Copyright (C) 2020 Texas Instruments Incorporated
+# Author: Peter Ujfalusi <[email protected]>
%YAML 1.2
---
$id: http://devicetree.org/schemas/dma/ti/k3-pktdma.yaml#
@@ -7,7 +9,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Texas Instruments K3 DMSS PKTDMA Device Tree Bindings
maintainers:
- - Peter Ujfalusi <[email protected]>
+ - Peter Ujfalusi <[email protected]>
description: |
The Packet DMA (PKTDMA) is intended to perform similar functions as the packet
diff --git a/Documentation/devicetree/bindings/dma/ti/k3-udma.yaml b/Documentation/devicetree/bindings/dma/ti/k3-udma.yaml
index 9a87fd9041eb..6a09bbf83d46 100644
--- a/Documentation/devicetree/bindings/dma/ti/k3-udma.yaml
+++ b/Documentation/devicetree/bindings/dma/ti/k3-udma.yaml
@@ -1,4 +1,6 @@
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+# Copyright (C) 2019 Texas Instruments Incorporated
+# Author: Peter Ujfalusi <[email protected]>
%YAML 1.2
---
$id: http://devicetree.org/schemas/dma/ti/k3-udma.yaml#
@@ -7,7 +9,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Texas Instruments K3 NAVSS Unified DMA Device Tree Bindings
maintainers:
- - Peter Ujfalusi <[email protected]>
+ - Peter Ujfalusi <[email protected]>
description: |
The UDMA-P is intended to perform similar (but significantly upgraded)
diff --git a/Documentation/devicetree/bindings/iio/accel/bosch,bma255.yaml b/Documentation/devicetree/bindings/iio/accel/bosch,bma255.yaml
index 6eef3480ea8f..c2efbb813ca2 100644
--- a/Documentation/devicetree/bindings/iio/accel/bosch,bma255.yaml
+++ b/Documentation/devicetree/bindings/iio/accel/bosch,bma255.yaml
@@ -16,8 +16,8 @@ description:
properties:
compatible:
enum:
- - bosch,bmc150
- - bosch,bmi055
+ - bosch,bmc150_accel
+ - bosch,bmi055_accel
- bosch,bma255
- bosch,bma250e
- bosch,bma222
diff --git a/Documentation/devicetree/bindings/net/renesas,etheravb.yaml b/Documentation/devicetree/bindings/net/renesas,etheravb.yaml
index 244befb6402a..de9dd574a2f9 100644
--- a/Documentation/devicetree/bindings/net/renesas,etheravb.yaml
+++ b/Documentation/devicetree/bindings/net/renesas,etheravb.yaml
@@ -163,6 +163,7 @@ allOf:
enum:
- renesas,etheravb-r8a774a1
- renesas,etheravb-r8a774b1
+ - renesas,etheravb-r8a774e1
- renesas,etheravb-r8a7795
- renesas,etheravb-r8a7796
- renesas,etheravb-r8a77961
diff --git a/Documentation/devicetree/bindings/net/snps,dwmac.yaml b/Documentation/devicetree/bindings/net/snps,dwmac.yaml
index b2f6083f556a..dfbf5fe4547a 100644
--- a/Documentation/devicetree/bindings/net/snps,dwmac.yaml
+++ b/Documentation/devicetree/bindings/net/snps,dwmac.yaml
@@ -161,7 +161,8 @@ properties:
* snps,route-dcbcp, DCB Control Packets
* snps,route-up, Untagged Packets
* snps,route-multi-broad, Multicast & Broadcast Packets
- * snps,priority, RX queue priority (Range 0x0 to 0xF)
+ * snps,priority, bitmask of the tagged frames priorities assigned to
+ the queue
snps,mtl-tx-config:
$ref: /schemas/types.yaml#/definitions/phandle
@@ -188,7 +189,10 @@ properties:
* snps,idle_slope, unlock on WoL
* snps,high_credit, max write outstanding req. limit
* snps,low_credit, max read outstanding req. limit
- * snps,priority, TX queue priority (Range 0x0 to 0xF)
+ * snps,priority, bitmask of the priorities assigned to the queue.
+ When a PFC frame is received with priorities matching the bitmask,
+ the queue is blocked from transmitting for the pause time specified
+ in the PFC frame.
snps,reset-gpio:
deprecated: true
diff --git a/Documentation/devicetree/bindings/regulator/nxp,pf8x00-regulator.yaml b/Documentation/devicetree/bindings/regulator/nxp,pf8x00-regulator.yaml
index a6c259ce9785..956156fe52a3 100644
--- a/Documentation/devicetree/bindings/regulator/nxp,pf8x00-regulator.yaml
+++ b/Documentation/devicetree/bindings/regulator/nxp,pf8x00-regulator.yaml
@@ -19,7 +19,9 @@ description: |
properties:
compatible:
enum:
- - nxp,pf8x00
+ - nxp,pf8100
+ - nxp,pf8121a
+ - nxp,pf8200
reg:
maxItems: 1
@@ -118,7 +120,7 @@ examples:
#size-cells = <0>;
pmic@8 {
- compatible = "nxp,pf8x00";
+ compatible = "nxp,pf8100";
reg = <0x08>;
regulators {
diff --git a/Documentation/devicetree/bindings/regulator/qcom,rpmh-regulator.txt b/Documentation/devicetree/bindings/regulator/qcom,rpmh-regulator.txt
index b8f0b7809c02..7d462b899473 100644
--- a/Documentation/devicetree/bindings/regulator/qcom,rpmh-regulator.txt
+++ b/Documentation/devicetree/bindings/regulator/qcom,rpmh-regulator.txt
@@ -44,6 +44,7 @@ First Level Nodes - PMIC
Definition: Must be one of below:
"qcom,pm8005-rpmh-regulators"
"qcom,pm8009-rpmh-regulators"
+ "qcom,pm8009-1-rpmh-regulators"
"qcom,pm8150-rpmh-regulators"
"qcom,pm8150l-rpmh-regulators"
"qcom,pm8350-rpmh-regulators"
diff --git a/Documentation/devicetree/bindings/sound/ti,j721e-cpb-audio.yaml b/Documentation/devicetree/bindings/sound/ti,j721e-cpb-audio.yaml
index 805da4d6a88e..ec06789b21df 100644
--- a/Documentation/devicetree/bindings/sound/ti,j721e-cpb-audio.yaml
+++ b/Documentation/devicetree/bindings/sound/ti,j721e-cpb-audio.yaml
@@ -1,4 +1,6 @@
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+# Copyright (C) 2020 Texas Instruments Incorporated
+# Author: Peter Ujfalusi <[email protected]>
%YAML 1.2
---
$id: http://devicetree.org/schemas/sound/ti,j721e-cpb-audio.yaml#
@@ -7,7 +9,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Texas Instruments J721e Common Processor Board Audio Support
maintainers:
- - Peter Ujfalusi <[email protected]>
+ - Peter Ujfalusi <[email protected]>
description: |
The audio support on the board is using pcm3168a codec connected to McASP10
diff --git a/Documentation/devicetree/bindings/sound/ti,j721e-cpb-ivi-audio.yaml b/Documentation/devicetree/bindings/sound/ti,j721e-cpb-ivi-audio.yaml
index bb780f621628..ee9f960de36b 100644
--- a/Documentation/devicetree/bindings/sound/ti,j721e-cpb-ivi-audio.yaml
+++ b/Documentation/devicetree/bindings/sound/ti,j721e-cpb-ivi-audio.yaml
@@ -1,4 +1,6 @@
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+# Copyright (C) 2020 Texas Instruments Incorporated
+# Author: Peter Ujfalusi <[email protected]>
%YAML 1.2
---
$id: http://devicetree.org/schemas/sound/ti,j721e-cpb-ivi-audio.yaml#
@@ -7,7 +9,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Texas Instruments J721e Common Processor Board Audio Support
maintainers:
- - Peter Ujfalusi <[email protected]>
+ - Peter Ujfalusi <[email protected]>
description: |
The Infotainment board plugs into the Common Processor Board, the support of the
diff --git a/Documentation/devicetree/bindings/usb/ti,j721e-usb.yaml b/Documentation/devicetree/bindings/usb/ti,j721e-usb.yaml
index 388245b91a55..148b3fb4ceaf 100644
--- a/Documentation/devicetree/bindings/usb/ti,j721e-usb.yaml
+++ b/Documentation/devicetree/bindings/usb/ti,j721e-usb.yaml
@@ -11,8 +11,12 @@ maintainers:
properties:
compatible:
- items:
+ oneOf:
- const: ti,j721e-usb
+ - const: ti,am64-usb
+ - items:
+ - const: ti,j721e-usb
+ - const: ti,am64-usb
reg:
description: module registers
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.yaml b/Documentation/devicetree/bindings/vendor-prefixes.yaml
index 041ae90b0d8f..bec932e50f6d 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.yaml
+++ b/Documentation/devicetree/bindings/vendor-prefixes.yaml
@@ -1260,6 +1260,8 @@ patternProperties:
description: YSH & ATIL
"^yones-toptech,.*":
description: Yones Toptech Co., Ltd.
+ "^ys,.*":
+ description: Shenzhen Yashi Changhua Intelligent Technology Co., Ltd.
"^ysoft,.*":
description: Y Soft Corporation a.s.
"^zealz,.*":
diff --git a/Documentation/doc-guide/sphinx.rst b/Documentation/doc-guide/sphinx.rst
index 2fb2ff297d69..36ac2166ad67 100644
--- a/Documentation/doc-guide/sphinx.rst
+++ b/Documentation/doc-guide/sphinx.rst
@@ -48,12 +48,12 @@ or ``virtualenv``, depending on how your distribution packaged Python 3.
those versions, you should run ``pip install 'docutils==0.12'``.
#) It is recommended to use the RTD theme for html output. Depending
- on the Sphinx version, it should be installed in separate,
+ on the Sphinx version, it should be installed separately,
with ``pip install sphinx_rtd_theme``.
- #) Some ReST pages contain math expressions. Due to the way Sphinx work,
+ #) Some ReST pages contain math expressions. Due to the way Sphinx works,
those expressions are written using LaTeX notation. It needs texlive
- installed with amdfonts and amsmath in order to evaluate them.
+ installed with amsfonts and amsmath in order to evaluate them.
In summary, if you want to install Sphinx version 1.7.9, you should do::
@@ -128,7 +128,7 @@ Sphinx Build
============
The usual way to generate the documentation is to run ``make htmldocs`` or
-``make pdfdocs``. There are also other formats available, see the documentation
+``make pdfdocs``. There are also other formats available: see the documentation
section of ``make help``. The generated documentation is placed in
format-specific subdirectories under ``Documentation/output``.
@@ -303,17 +303,17 @@ and *targets* (e.g. a ref to ``:ref:`last row <last row>``` / :ref:`last row
- head col 3
- head col 4
- * - column 1
+ * - row 1
- field 1.1
- field 1.2 with autospan
- * - column 2
+ * - row 2
- field 2.1
- :rspan:`1` :cspan:`1` field 2.2 - 3.3
* .. _`last row`:
- - column 3
+ - row 3
Rendered as:
@@ -325,17 +325,17 @@ Rendered as:
- head col 3
- head col 4
- * - column 1
+ * - row 1
- field 1.1
- field 1.2 with autospan
- * - column 2
+ * - row 2
- field 2.1
- :rspan:`1` :cspan:`1` field 2.2 - 3.3
* .. _`last row`:
- - column 3
+ - row 3
Cross-referencing
-----------------
@@ -361,7 +361,7 @@ Figures & Images
If you want to add an image, you should use the ``kernel-figure`` and
``kernel-image`` directives. E.g. to insert a figure with a scalable
-image format use SVG (:ref:`svg_image_example`)::
+image format, use SVG (:ref:`svg_image_example`)::
.. kernel-figure:: svg_image.svg
:alt: simple SVG image
@@ -375,7 +375,7 @@ image format use SVG (:ref:`svg_image_example`)::
SVG image example
-The kernel figure (and image) directive support **DOT** formatted files, see
+The kernel figure (and image) directive supports **DOT** formatted files, see
* DOT: http://graphviz.org/pdf/dotguide.pdf
* Graphviz: http://www.graphviz.org/content/dot-language
@@ -394,7 +394,7 @@ A simple example (:ref:`hello_dot_file`)::
DOT's hello world example
-Embed *render* markups (or languages) like Graphviz's **DOT** is provided by the
+Embedded *render* markups (or languages) like Graphviz's **DOT** are provided by the
``kernel-render`` directives.::
.. kernel-render:: DOT
@@ -406,7 +406,7 @@ Embed *render* markups (or languages) like Graphviz's **DOT** is provided by the
}
How this will be rendered depends on the installed tools. If Graphviz is
-installed, you will see an vector image. If not the raw markup is inserted as
+installed, you will see a vector image. If not, the raw markup is inserted as
*literal-block* (:ref:`hello_dot_render`).
.. _hello_dot_render:
@@ -421,8 +421,8 @@ installed, you will see an vector image. If not the raw markup is inserted as
The *render* directive has all the options known from the *figure* directive,
plus option ``caption``. If ``caption`` has a value, a *figure* node is
-inserted. If not, a *image* node is inserted. A ``caption`` is also needed, if
-you want to refer it (:ref:`hello_svg_render`).
+inserted. If not, an *image* node is inserted. A ``caption`` is also needed, if
+you want to refer to it (:ref:`hello_svg_render`).
Embedded **SVG**::
diff --git a/Documentation/firmware-guide/acpi/apei/einj.rst b/Documentation/firmware-guide/acpi/apei/einj.rst
index e588bccf5158..c042176e1707 100644
--- a/Documentation/firmware-guide/acpi/apei/einj.rst
+++ b/Documentation/firmware-guide/acpi/apei/einj.rst
@@ -50,8 +50,8 @@ The following files belong to it:
0x00000010 Memory Uncorrectable non-fatal
0x00000020 Memory Uncorrectable fatal
0x00000040 PCI Express Correctable
- 0x00000080 PCI Express Uncorrectable fatal
- 0x00000100 PCI Express Uncorrectable non-fatal
+ 0x00000080 PCI Express Uncorrectable non-fatal
+ 0x00000100 PCI Express Uncorrectable fatal
0x00000200 Platform Correctable
0x00000400 Platform Uncorrectable non-fatal
0x00000800 Platform Uncorrectable fatal
diff --git a/Documentation/gpu/drm-kms.rst b/Documentation/gpu/drm-kms.rst
index 3c5ae4f6dfd2..87e5023e3f55 100644
--- a/Documentation/gpu/drm-kms.rst
+++ b/Documentation/gpu/drm-kms.rst
@@ -319,6 +319,15 @@ CRTC Functions Reference
.. kernel-doc:: drivers/gpu/drm/drm_crtc.c
:export:
+Color Management Functions Reference
+------------------------------------
+
+.. kernel-doc:: drivers/gpu/drm/drm_color_mgmt.c
+ :export:
+
+.. kernel-doc:: include/drm/drm_color_mgmt.h
+ :internal:
+
Frame Buffer Abstraction
========================
@@ -370,6 +379,21 @@ Plane Functions Reference
.. kernel-doc:: drivers/gpu/drm/drm_plane.c
:export:
+Plane Composition Functions Reference
+-------------------------------------
+
+.. kernel-doc:: drivers/gpu/drm/drm_blend.c
+ :export:
+
+Plane Damage Tracking Functions Reference
+-----------------------------------------
+
+.. kernel-doc:: drivers/gpu/drm/drm_damage_helper.c
+ :export:
+
+.. kernel-doc:: include/drm/drm_damage_helper.h
+ :internal:
+
Display Modes Function Reference
================================
@@ -436,6 +460,9 @@ KMS Locking
KMS Properties
==============
+This section of the documentation is primarily aimed at user-space developers.
+For the driver APIs, see the other sections.
+
Property Types and Blob Property Support
----------------------------------------
@@ -466,39 +493,30 @@ Standard CRTC Properties
.. kernel-doc:: drivers/gpu/drm/drm_crtc.c
:doc: standard CRTC properties
+Standard Plane Properties
+-------------------------
+
+.. kernel-doc:: drivers/gpu/drm/drm_plane.c
+ :doc: standard plane properties
+
Plane Composition Properties
----------------------------
.. kernel-doc:: drivers/gpu/drm/drm_blend.c
:doc: overview
-.. kernel-doc:: drivers/gpu/drm/drm_blend.c
- :export:
-
-FB_DAMAGE_CLIPS
-~~~~~~~~~~~~~~~
+Damage Tracking Properties
+--------------------------
.. kernel-doc:: drivers/gpu/drm/drm_damage_helper.c
:doc: overview
-.. kernel-doc:: drivers/gpu/drm/drm_damage_helper.c
- :export:
-
-.. kernel-doc:: include/drm/drm_damage_helper.h
- :internal:
-
Color Management Properties
---------------------------
.. kernel-doc:: drivers/gpu/drm/drm_color_mgmt.c
:doc: overview
-.. kernel-doc:: drivers/gpu/drm/drm_color_mgmt.c
- :export:
-
-.. kernel-doc:: include/drm/drm_color_mgmt.h
- :internal:
-
Tile Group Property
-------------------
diff --git a/Documentation/gpu/drm-uapi.rst b/Documentation/gpu/drm-uapi.rst
index 7dce175f6d75..04bdc7a91d53 100644
--- a/Documentation/gpu/drm-uapi.rst
+++ b/Documentation/gpu/drm-uapi.rst
@@ -457,5 +457,8 @@ Userspace API Structures
.. kernel-doc:: include/uapi/drm/drm_mode.h
:doc: overview
+.. kernel-doc:: include/uapi/drm/drm.h
+ :internal:
+
.. kernel-doc:: include/uapi/drm/drm_mode.h
:internal:
diff --git a/Documentation/gpu/i915.rst b/Documentation/gpu/i915.rst
index 20868f3d0123..486c720f3890 100644
--- a/Documentation/gpu/i915.rst
+++ b/Documentation/gpu/i915.rst
@@ -428,7 +428,7 @@ User Batchbuffer Execution
Logical Rings, Logical Ring Contexts and Execlists
--------------------------------------------------
-.. kernel-doc:: drivers/gpu/drm/i915/gt/intel_lrc.c
+.. kernel-doc:: drivers/gpu/drm/i915/gt/intel_execlists_submission.c
:doc: Logical Rings, Logical Ring Contexts and Execlists
Global GTT views
diff --git a/Documentation/gpu/vkms.rst b/Documentation/gpu/vkms.rst
index 13bab1d93bb3..2c9b376da5ca 100644
--- a/Documentation/gpu/vkms.rst
+++ b/Documentation/gpu/vkms.rst
@@ -7,6 +7,88 @@
.. kernel-doc:: drivers/gpu/drm/vkms/vkms_drv.c
:doc: vkms (Virtual Kernel Modesetting)
+Setup
+=====
+
+The VKMS driver can be setup with the following steps:
+
+To check if VKMS is loaded, run::
+
+ lsmod | grep vkms
+
+This should list the VKMS driver. If no output is obtained, then
+you need to enable and/or load the VKMS driver.
+Ensure that the VKMS driver has been set as a loadable module in your
+kernel config file. Do::
+
+ make nconfig
+
+ Go to `Device Drivers> Graphics support`
+
+ Enable `Virtual KMS (EXPERIMENTAL)`
+
+Compile and build the kernel for the changes to get reflected.
+Now, to load the driver, use::
+
+ sudo modprobe vkms
+
+On running the lsmod command now, the VKMS driver will appear listed.
+You can also observe the driver being loaded in the dmesg logs.
+
+The VKMS driver has optional features to simulate different kinds of hardware,
+which are exposed as module options. You can use the `modinfo` command
+to see the module options for vkms::
+
+ modinfo vkms
+
+Module options are helpful when testing, and enabling modules
+can be done while loading vkms. For example, to load vkms with cursor enabled,
+use::
+
+ sudo modprobe vkms enable_cursor=1
+
+To disable the driver, use ::
+
+ sudo modprobe -r vkms
+
+Testing With IGT
+================
+
+The IGT GPU Tools is a test suite used specifically for debugging and
+development of the DRM drivers.
+The IGT Tools can be installed from
+`here <https://gitlab.freedesktop.org/drm/igt-gpu-tools>`_ .
+
+The tests need to be run without a compositor, so you need to switch to text
+only mode. You can do this by::
+
+ sudo systemctl isolate multi-user.target
+
+To return to graphical mode, do::
+
+ sudo systemctl isolate graphical.target
+
+Once you are in text only mode, you can run tests using the --device switch
+or IGT_DEVICE variable to specify the device filter for the driver we want
+to test. IGT_DEVICE can also be used with the run-test.sh script to run the
+tests for a specific driver::
+
+ sudo ./build/tests/<name of test> --device "sys:/sys/devices/platform/vkms"
+ sudo IGT_DEVICE="sys:/sys/devices/platform/vkms" ./build/tests/<name of test>
+ sudo IGT_DEVICE="sys:/sys/devices/platform/vkms" ./scripts/run-tests.sh -t <name of test>
+
+For example, to test the functionality of the writeback library,
+we can run the kms_writeback test::
+
+ sudo ./build/tests/kms_writeback --device "sys:/sys/devices/platform/vkms"
+ sudo IGT_DEVICE="sys:/sys/devices/platform/vkms" ./build/tests/kms_writeback
+ sudo IGT_DEVICE="sys:/sys/devices/platform/vkms" ./scripts/run-tests.sh -t kms_writeback
+
+You can also run subtests if you do not want to run the entire test::
+
+ sudo ./build/tests/kms_flip --run-subtest basic-plain-flip --device "sys:/sys/devices/platform/vkms"
+ sudo IGT_DEVICE="sys:/sys/devices/platform/vkms" ./build/tests/kms_flip --run-subtest basic-plain-flip
+
TODO
====
diff --git a/Documentation/hwmon/sbtsi_temp.rst b/Documentation/hwmon/sbtsi_temp.rst
index 922b3c8db666..749f518389c3 100644
--- a/Documentation/hwmon/sbtsi_temp.rst
+++ b/Documentation/hwmon/sbtsi_temp.rst
@@ -1,7 +1,7 @@
.. SPDX-License-Identifier: GPL-2.0-or-later
Kernel driver sbtsi_temp
-==================
+========================
Supported hardware:
diff --git a/Documentation/kbuild/makefiles.rst b/Documentation/kbuild/makefiles.rst
index d36768cf1250..9f6a11881951 100644
--- a/Documentation/kbuild/makefiles.rst
+++ b/Documentation/kbuild/makefiles.rst
@@ -598,7 +598,7 @@ more details, with real examples.
explicitly added to $(targets).
Assignments to $(targets) are without $(obj)/ prefix. if_changed may be
- used in conjunction with custom rules as defined in "3.9 Custom Rules".
+ used in conjunction with custom rules as defined in "3.11 Custom Rules".
Note: It is a typical mistake to forget the FORCE prerequisite.
Another common pitfall is that whitespace is sometimes significant; for
diff --git a/Documentation/kernel-hacking/locking.rst b/Documentation/kernel-hacking/locking.rst
index 6ed806e6061b..c3448929a824 100644
--- a/Documentation/kernel-hacking/locking.rst
+++ b/Documentation/kernel-hacking/locking.rst
@@ -118,11 +118,11 @@ spinlock, but you may block holding a mutex. If you can't lock a mutex,
your task will suspend itself, and be woken up when the mutex is
released. This means the CPU can do something else while you are
waiting. There are many cases when you simply can't sleep (see
-`What Functions Are Safe To Call From Interrupts? <#sleeping-things>`__),
+`What Functions Are Safe To Call From Interrupts?`_),
and so have to use a spinlock instead.
Neither type of lock is recursive: see
-`Deadlock: Simple and Advanced <#deadlock>`__.
+`Deadlock: Simple and Advanced`_.
Locks and Uniprocessor Kernels
------------------------------
@@ -179,7 +179,7 @@ perfect world).
Note that you can also use spin_lock_irq() or
spin_lock_irqsave() here, which stop hardware interrupts
-as well: see `Hard IRQ Context <#hard-irq-context>`__.
+as well: see `Hard IRQ Context`_.
This works perfectly for UP as well: the spin lock vanishes, and this
macro simply becomes local_bh_disable()
@@ -230,7 +230,7 @@ The Same Softirq
~~~~~~~~~~~~~~~~
The same softirq can run on the other CPUs: you can use a per-CPU array
-(see `Per-CPU Data <#per-cpu-data>`__) for better performance. If you're
+(see `Per-CPU Data`_) for better performance. If you're
going so far as to use a softirq, you probably care about scalable
performance enough to justify the extra complexity.
diff --git a/Documentation/networking/device_drivers/ethernet/marvell/octeontx2.rst b/Documentation/networking/device_drivers/ethernet/marvell/octeontx2.rst
index d3fcf536d14e..61e850460e18 100644
--- a/Documentation/networking/device_drivers/ethernet/marvell/octeontx2.rst
+++ b/Documentation/networking/device_drivers/ethernet/marvell/octeontx2.rst
@@ -164,46 +164,56 @@ Devlink health reporters
NPA Reporters
-------------
-The NPA reporters are responsible for reporting and recovering the following group of errors
+The NPA reporters are responsible for reporting and recovering the following group of errors:
+
1. GENERAL events
+
- Error due to operation of unmapped PF.
- Error due to disabled alloc/free for other HW blocks (NIX, SSO, TIM, DPI and AURA).
+
2. ERROR events
+
- Fault due to NPA_AQ_INST_S read or NPA_AQ_RES_S write.
- AQ Doorbell Error.
+
3. RAS events
+
- RAS Error Reporting for NPA_AQ_INST_S/NPA_AQ_RES_S.
+
4. RVU events
+
- Error due to unmapped slot.
-Sample Output
--------------
-~# devlink health
-pci/0002:01:00.0:
- reporter hw_npa_intr
- state healthy error 2872 recover 2872 last_dump_date 2020-12-10 last_dump_time 09:39:09 grace_period 0 auto_recover true auto_dump true
- reporter hw_npa_gen
- state healthy error 2872 recover 2872 last_dump_date 2020-12-11 last_dump_time 04:43:04 grace_period 0 auto_recover true auto_dump true
- reporter hw_npa_err
- state healthy error 2871 recover 2871 last_dump_date 2020-12-10 last_dump_time 09:39:17 grace_period 0 auto_recover true auto_dump true
- reporter hw_npa_ras
- state healthy error 0 recover 0 last_dump_date 2020-12-10 last_dump_time 09:32:40 grace_period 0 auto_recover true auto_dump true
+Sample Output::
+
+ ~# devlink health
+ pci/0002:01:00.0:
+ reporter hw_npa_intr
+ state healthy error 2872 recover 2872 last_dump_date 2020-12-10 last_dump_time 09:39:09 grace_period 0 auto_recover true auto_dump true
+ reporter hw_npa_gen
+ state healthy error 2872 recover 2872 last_dump_date 2020-12-11 last_dump_time 04:43:04 grace_period 0 auto_recover true auto_dump true
+ reporter hw_npa_err
+ state healthy error 2871 recover 2871 last_dump_date 2020-12-10 last_dump_time 09:39:17 grace_period 0 auto_recover true auto_dump true
+ reporter hw_npa_ras
+ state healthy error 0 recover 0 last_dump_date 2020-12-10 last_dump_time 09:32:40 grace_period 0 auto_recover true auto_dump true
Each reporter dumps the
+
- Error Type
- Error Register value
- Reason in words
-For eg:
-~# devlink health dump show pci/0002:01:00.0 reporter hw_npa_gen
- NPA_AF_GENERAL:
- NPA General Interrupt Reg : 1
- NIX0: free disabled RX
-~# devlink health dump show pci/0002:01:00.0 reporter hw_npa_intr
- NPA_AF_RVU:
- NPA RVU Interrupt Reg : 1
- Unmap Slot Error
-~# devlink health dump show pci/0002:01:00.0 reporter hw_npa_err
- NPA_AF_ERR:
- NPA Error Interrupt Reg : 4096
- AQ Doorbell Error
+For example::
+
+ ~# devlink health dump show pci/0002:01:00.0 reporter hw_npa_gen
+ NPA_AF_GENERAL:
+ NPA General Interrupt Reg : 1
+ NIX0: free disabled RX
+ ~# devlink health dump show pci/0002:01:00.0 reporter hw_npa_intr
+ NPA_AF_RVU:
+ NPA RVU Interrupt Reg : 1
+ Unmap Slot Error
+ ~# devlink health dump show pci/0002:01:00.0 reporter hw_npa_err
+ NPA_AF_ERR:
+ NPA Error Interrupt Reg : 4096
+ AQ Doorbell Error
diff --git a/Documentation/networking/netdev-FAQ.rst b/Documentation/networking/netdev-FAQ.rst
index 4b9ed5874d5a..ae2ae37cd921 100644
--- a/Documentation/networking/netdev-FAQ.rst
+++ b/Documentation/networking/netdev-FAQ.rst
@@ -6,9 +6,9 @@
netdev FAQ
==========
-Q: What is netdev?
-------------------
-A: It is a mailing list for all network-related Linux stuff. This
+What is netdev?
+---------------
+It is a mailing list for all network-related Linux stuff. This
includes anything found under net/ (i.e. core code like IPv6) and
drivers/net (i.e. hardware specific drivers) in the Linux source tree.
@@ -25,9 +25,9 @@ Aside from subsystems like that mentioned above, all network-related
Linux development (i.e. RFC, review, comments, etc.) takes place on
netdev.
-Q: How do the changes posted to netdev make their way into Linux?
------------------------------------------------------------------
-A: There are always two trees (git repositories) in play. Both are
+How do the changes posted to netdev make their way into Linux?
+--------------------------------------------------------------
+There are always two trees (git repositories) in play. Both are
driven by David Miller, the main network maintainer. There is the
``net`` tree, and the ``net-next`` tree. As you can probably guess from
the names, the ``net`` tree is for fixes to existing code already in the
@@ -37,9 +37,9 @@ for the future release. You can find the trees here:
- https://git.kernel.org/pub/scm/linux/kernel/git/netdev/net.git
- https://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next.git
-Q: How often do changes from these trees make it to the mainline Linus tree?
-----------------------------------------------------------------------------
-A: To understand this, you need to know a bit of background information on
+How often do changes from these trees make it to the mainline Linus tree?
+-------------------------------------------------------------------------
+To understand this, you need to know a bit of background information on
the cadence of Linux development. Each new release starts off with a
two week "merge window" where the main maintainers feed their new stuff
to Linus for merging into the mainline tree. After the two weeks, the
@@ -81,7 +81,8 @@ focus for ``net`` is on stabilization and bug fixes.
Finally, the vX.Y gets released, and the whole cycle starts over.
-Q: So where are we now in this cycle?
+So where are we now in this cycle?
+----------------------------------
Load the mainline (Linus) page here:
@@ -91,9 +92,9 @@ and note the top of the "tags" section. If it is rc1, it is early in
the dev cycle. If it was tagged rc7 a week ago, then a release is
probably imminent.
-Q: How do I indicate which tree (net vs. net-next) my patch should be in?
--------------------------------------------------------------------------
-A: Firstly, think whether you have a bug fix or new "next-like" content.
+How do I indicate which tree (net vs. net-next) my patch should be in?
+----------------------------------------------------------------------
+Firstly, think whether you have a bug fix or new "next-like" content.
Then once decided, assuming that you use git, use the prefix flag, i.e.
::
@@ -105,48 +106,45 @@ in the above is just the subject text of the outgoing e-mail, and you
can manually change it yourself with whatever MUA you are comfortable
with.
-Q: I sent a patch and I'm wondering what happened to it?
---------------------------------------------------------
-Q: How can I tell whether it got merged?
-A: Start by looking at the main patchworks queue for netdev:
+I sent a patch and I'm wondering what happened to it - how can I tell whether it got merged?
+--------------------------------------------------------------------------------------------
+Start by looking at the main patchworks queue for netdev:
https://patchwork.kernel.org/project/netdevbpf/list/
The "State" field will tell you exactly where things are at with your
patch.
-Q: The above only says "Under Review". How can I find out more?
-----------------------------------------------------------------
-A: Generally speaking, the patches get triaged quickly (in less than
+The above only says "Under Review". How can I find out more?
+-------------------------------------------------------------
+Generally speaking, the patches get triaged quickly (in less than
48h). So be patient. Asking the maintainer for status updates on your
patch is a good way to ensure your patch is ignored or pushed to the
bottom of the priority list.
-Q: I submitted multiple versions of the patch series
-----------------------------------------------------
-Q: should I directly update patchwork for the previous versions of these
-patch series?
-A: No, please don't interfere with the patch status on patchwork, leave
+I submitted multiple versions of the patch series. Should I directly update patchwork for the previous versions of these patch series?
+--------------------------------------------------------------------------------------------------------------------------------------
+No, please don't interfere with the patch status on patchwork, leave
it to the maintainer to figure out what is the most recent and current
version that should be applied. If there is any doubt, the maintainer
will reply and ask what should be done.
-Q: I made changes to only a few patches in a patch series should I resend only those changed?
----------------------------------------------------------------------------------------------
-A: No, please resend the entire patch series and make sure you do number your
+I made changes to only a few patches in a patch series should I resend only those changed?
+------------------------------------------------------------------------------------------
+No, please resend the entire patch series and make sure you do number your
patches such that it is clear this is the latest and greatest set of patches
that can be applied.
-Q: I submitted multiple versions of a patch series and it looks like a version other than the last one has been accepted, what should I do?
--------------------------------------------------------------------------------------------------------------------------------------------
-A: There is no revert possible, once it is pushed out, it stays like that.
+I submitted multiple versions of a patch series and it looks like a version other than the last one has been accepted, what should I do?
+----------------------------------------------------------------------------------------------------------------------------------------
+There is no revert possible, once it is pushed out, it stays like that.
Please send incremental versions on top of what has been merged in order to fix
the patches the way they would look like if your latest patch series was to be
merged.
-Q: How can I tell what patches are queued up for backporting to the various stable releases?
---------------------------------------------------------------------------------------------
-A: Normally Greg Kroah-Hartman collects stable commits himself, but for
+How can I tell what patches are queued up for backporting to the various stable releases?
+-----------------------------------------------------------------------------------------
+Normally Greg Kroah-Hartman collects stable commits himself, but for
networking, Dave collects up patches he deems critical for the
networking subsystem, and then hands them off to Greg.
@@ -169,11 +167,9 @@ simply clone the repo, and then git grep the mainline commit ID, e.g.
releases/3.9.8/ipv6-fix-possible-crashes-in-ip6_cork_release.patch
stable/stable-queue$
-Q: I see a network patch and I think it should be backported to stable.
------------------------------------------------------------------------
-Q: Should I request it via [email protected] like the references in
-the kernel's Documentation/process/stable-kernel-rules.rst file say?
-A: No, not for networking. Check the stable queues as per above first
+I see a network patch and I think it should be backported to stable. Should I request it via [email protected] like the references in the kernel's Documentation/process/stable-kernel-rules.rst file say?
+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+No, not for networking. Check the stable queues as per above first
to see if it is already queued. If not, then send a mail to netdev,
listing the upstream commit ID and why you think it should be a stable
candidate.
@@ -190,11 +186,9 @@ mainline, the better the odds that it is an OK candidate for stable. So
scrambling to request a commit be added the day after it appears should
be avoided.
-Q: I have created a network patch and I think it should be backported to stable.
---------------------------------------------------------------------------------
-Q: Should I add a Cc: [email protected] like the references in the
-kernel's Documentation/ directory say?
-A: No. See above answer. In short, if you think it really belongs in
+I have created a network patch and I think it should be backported to stable. Should I add a Cc: [email protected] like the references in the kernel's Documentation/ directory say?
+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+No. See above answer. In short, if you think it really belongs in
stable, then ensure you write a decent commit log that describes who
gets impacted by the bug fix and how it manifests itself, and when the
bug was introduced. If you do that properly, then the commit will get
@@ -207,18 +201,18 @@ marker line as described in
:ref:`Documentation/process/submitting-patches.rst <the_canonical_patch_format>`
to temporarily embed that information into the patch that you send.
-Q: Are all networking bug fixes backported to all stable releases?
-------------------------------------------------------------------
-A: Due to capacity, Dave could only take care of the backports for the
+Are all networking bug fixes backported to all stable releases?
+---------------------------------------------------------------
+Due to capacity, Dave could only take care of the backports for the
last two stable releases. For earlier stable releases, each stable
branch maintainer is supposed to take care of them. If you find any
patch is missing from an earlier stable branch, please notify
[email protected] with either a commit ID or a formal patch
backported, and CC Dave and other relevant networking developers.
-Q: Is the comment style convention different for the networking content?
-------------------------------------------------------------------------
-A: Yes, in a largely trivial way. Instead of this::
+Is the comment style convention different for the networking content?
+---------------------------------------------------------------------
+Yes, in a largely trivial way. Instead of this::
/*
* foobar blah blah blah
@@ -231,32 +225,30 @@ it is requested that you make it look like this::
* another line of text
*/
-Q: I am working in existing code that has the former comment style and not the latter.
---------------------------------------------------------------------------------------
-Q: Should I submit new code in the former style or the latter?
-A: Make it the latter style, so that eventually all code in the domain
+I am working in existing code that has the former comment style and not the latter. Should I submit new code in the former style or the latter?
+-----------------------------------------------------------------------------------------------------------------------------------------------
+Make it the latter style, so that eventually all code in the domain
of netdev is of this format.
-Q: I found a bug that might have possible security implications or similar.
----------------------------------------------------------------------------
-Q: Should I mail the main netdev maintainer off-list?**
-A: No. The current netdev maintainer has consistently requested that
+I found a bug that might have possible security implications or similar. Should I mail the main netdev maintainer off-list?
+---------------------------------------------------------------------------------------------------------------------------
+No. The current netdev maintainer has consistently requested that
people use the mailing lists and not reach out directly. If you aren't
OK with that, then perhaps consider mailing [email protected] or
reading about http://oss-security.openwall.org/wiki/mailing-lists/distros
as possible alternative mechanisms.
-Q: What level of testing is expected before I submit my change?
----------------------------------------------------------------
-A: If your changes are against ``net-next``, the expectation is that you
+What level of testing is expected before I submit my change?
+------------------------------------------------------------
+If your changes are against ``net-next``, the expectation is that you
have tested by layering your changes on top of ``net-next``. Ideally
you will have done run-time testing specific to your change, but at a
minimum, your changes should survive an ``allyesconfig`` and an
``allmodconfig`` build without new warnings or failures.
-Q: How do I post corresponding changes to user space components?
-----------------------------------------------------------------
-A: User space code exercising kernel features should be posted
+How do I post corresponding changes to user space components?
+-------------------------------------------------------------
+User space code exercising kernel features should be posted
alongside kernel patches. This gives reviewers a chance to see
how any new interface is used and how well it works.
@@ -280,9 +272,9 @@ to the mailing list, e.g.::
Posting as one thread is discouraged because it confuses patchwork
(as of patchwork 2.2.2).
-Q: Any other tips to help ensure my net/net-next patch gets OK'd?
------------------------------------------------------------------
-A: Attention to detail. Re-read your own work as if you were the
+Any other tips to help ensure my net/net-next patch gets OK'd?
+--------------------------------------------------------------
+Attention to detail. Re-read your own work as if you were the
reviewer. You can start with using ``checkpatch.pl``, perhaps even with
the ``--strict`` flag. But do not be mindlessly robotic in doing so.
If your change is a bug fix, make sure your commit log indicates the
diff --git a/Documentation/networking/netdevices.rst b/Documentation/networking/netdevices.rst
index 5a85fcc80c76..17bdcb746dcf 100644
--- a/Documentation/networking/netdevices.rst
+++ b/Documentation/networking/netdevices.rst
@@ -10,18 +10,177 @@ Introduction
The following is a random collection of documentation regarding
network devices.
-struct net_device allocation rules
-==================================
+struct net_device lifetime rules
+================================
Network device structures need to persist even after module is unloaded and
must be allocated with alloc_netdev_mqs() and friends.
If device has registered successfully, it will be freed on last use
-by free_netdev(). This is required to handle the pathologic case cleanly
-(example: rmmod mydriver </sys/class/net/myeth/mtu )
+by free_netdev(). This is required to handle the pathological case cleanly
+(example: ``rmmod mydriver </sys/class/net/myeth/mtu``)
-alloc_netdev_mqs()/alloc_netdev() reserve extra space for driver
+alloc_netdev_mqs() / alloc_netdev() reserve extra space for driver
private data which gets freed when the network device is freed. If
separately allocated data is attached to the network device
-(netdev_priv(dev)) then it is up to the module exit handler to free that.
+(netdev_priv()) then it is up to the module exit handler to free that.
+
+There are two groups of APIs for registering struct net_device.
+First group can be used in normal contexts where ``rtnl_lock`` is not already
+held: register_netdev(), unregister_netdev().
+Second group can be used when ``rtnl_lock`` is already held:
+register_netdevice(), unregister_netdevice(), free_netdevice().
+
+Simple drivers
+--------------
+
+Most drivers (especially device drivers) handle lifetime of struct net_device
+in context where ``rtnl_lock`` is not held (e.g. driver probe and remove paths).
+
+In that case the struct net_device registration is done using
+the register_netdev(), and unregister_netdev() functions:
+
+.. code-block:: c
+
+ int probe()
+ {
+ struct my_device_priv *priv;
+ int err;
+
+ dev = alloc_netdev_mqs(...);
+ if (!dev)
+ return -ENOMEM;
+ priv = netdev_priv(dev);
+
+ /* ... do all device setup before calling register_netdev() ...
+ */
+
+ err = register_netdev(dev);
+ if (err)
+ goto err_undo;
+
+ /* net_device is visible to the user! */
+
+ err_undo:
+ /* ... undo the device setup ... */
+ free_netdev(dev);
+ return err;
+ }
+
+ void remove()
+ {
+ unregister_netdev(dev);
+ free_netdev(dev);
+ }
+
+Note that after calling register_netdev() the device is visible in the system.
+Users can open it and start sending / receiving traffic immediately,
+or run any other callback, so all initialization must be done prior to
+registration.
+
+unregister_netdev() closes the device and waits for all users to be done
+with it. The memory of struct net_device itself may still be referenced
+by sysfs but all operations on that device will fail.
+
+free_netdev() can be called after unregister_netdev() returns on when
+register_netdev() failed.
+
+Device management under RTNL
+----------------------------
+
+Registering struct net_device while in context which already holds
+the ``rtnl_lock`` requires extra care. In those scenarios most drivers
+will want to make use of struct net_device's ``needs_free_netdev``
+and ``priv_destructor`` members for freeing of state.
+
+Example flow of netdev handling under ``rtnl_lock``:
+
+.. code-block:: c
+
+ static void my_setup(struct net_device *dev)
+ {
+ dev->needs_free_netdev = true;
+ }
+
+ static void my_destructor(struct net_device *dev)
+ {
+ some_obj_destroy(priv->obj);
+ some_uninit(priv);
+ }
+
+ int create_link()
+ {
+ struct my_device_priv *priv;
+ int err;
+
+ ASSERT_RTNL();
+
+ dev = alloc_netdev(sizeof(*priv), "net%d", NET_NAME_UNKNOWN, my_setup);
+ if (!dev)
+ return -ENOMEM;
+ priv = netdev_priv(dev);
+
+ /* Implicit constructor */
+ err = some_init(priv);
+ if (err)
+ goto err_free_dev;
+
+ priv->obj = some_obj_create();
+ if (!priv->obj) {
+ err = -ENOMEM;
+ goto err_some_uninit;
+ }
+ /* End of constructor, set the destructor: */
+ dev->priv_destructor = my_destructor;
+
+ err = register_netdevice(dev);
+ if (err)
+ /* register_netdevice() calls destructor on failure */
+ goto err_free_dev;
+
+ /* If anything fails now unregister_netdevice() (or unregister_netdev())
+ * will take care of calling my_destructor and free_netdev().
+ */
+
+ return 0;
+
+ err_some_uninit:
+ some_uninit(priv);
+ err_free_dev:
+ free_netdev(dev);
+ return err;
+ }
+
+If struct net_device.priv_destructor is set it will be called by the core
+some time after unregister_netdevice(), it will also be called if
+register_netdevice() fails. The callback may be invoked with or without
+``rtnl_lock`` held.
+
+There is no explicit constructor callback, driver "constructs" the private
+netdev state after allocating it and before registration.
+
+Setting struct net_device.needs_free_netdev makes core call free_netdevice()
+automatically after unregister_netdevice() when all references to the device
+are gone. It only takes effect after a successful call to register_netdevice()
+so if register_netdevice() fails driver is responsible for calling
+free_netdev().
+
+free_netdev() is safe to call on error paths right after unregister_netdevice()
+or when register_netdevice() fails. Parts of netdev (de)registration process
+happen after ``rtnl_lock`` is released, therefore in those cases free_netdev()
+will defer some of the processing until ``rtnl_lock`` is released.
+
+Devices spawned from struct rtnl_link_ops should never free the
+struct net_device directly.
+
+.ndo_init and .ndo_uninit
+~~~~~~~~~~~~~~~~~~~~~~~~~
+
+``.ndo_init`` and ``.ndo_uninit`` callbacks are called during net_device
+registration and de-registration, under ``rtnl_lock``. Drivers can use
+those e.g. when parts of their init process need to run under ``rtnl_lock``.
+
+``.ndo_init`` runs before device is visible in the system, ``.ndo_uninit``
+runs during de-registering after device is closed but other subsystems
+may still have outstanding references to the netdevice.
MTU
===
@@ -64,8 +223,8 @@ ndo_do_ioctl:
Context: process
ndo_get_stats:
- Synchronization: dev_base_lock rwlock.
- Context: nominally process, but don't sleep inside an rwlock
+ Synchronization: rtnl_lock() semaphore, dev_base_lock rwlock, or RCU.
+ Context: atomic (can't sleep under rwlock or RCU)
ndo_start_xmit:
Synchronization: __netif_tx_lock spinlock.
diff --git a/Documentation/networking/packet_mmap.rst b/Documentation/networking/packet_mmap.rst
index 6c009ceb1183..500ef60b1b82 100644
--- a/Documentation/networking/packet_mmap.rst
+++ b/Documentation/networking/packet_mmap.rst
@@ -8,7 +8,7 @@ Abstract
========
This file documents the mmap() facility available with the PACKET
-socket interface on 2.4/2.6/3.x kernels. This type of sockets is used for
+socket interface. This type of sockets is used for
i) capture network traffic with utilities like tcpdump,
ii) transmit network traffic, or any other that needs raw
@@ -25,12 +25,12 @@ Please send your comments to
Why use PACKET_MMAP
===================
-In Linux 2.4/2.6/3.x if PACKET_MMAP is not enabled, the capture process is very
+Non PACKET_MMAP capture process (plain AF_PACKET) is very
inefficient. It uses very limited buffers and requires one system call to
capture each packet, it requires two if you want to get packet's timestamp
(like libpcap always does).
-In the other hand PACKET_MMAP is very efficient. PACKET_MMAP provides a size
+On the other hand PACKET_MMAP is very efficient. PACKET_MMAP provides a size
configurable circular buffer mapped in user space that can be used to either
send or receive packets. This way reading packets just needs to wait for them,
most of the time there is no need to issue a single system call. Concerning
@@ -252,8 +252,7 @@ PACKET_MMAP setting constraints
In kernel versions prior to 2.4.26 (for the 2.4 branch) and 2.6.5 (2.6 branch),
the PACKET_MMAP buffer could hold only 32768 frames in a 32 bit architecture or
-16384 in a 64 bit architecture. For information on these kernel versions
-see http://pusa.uv.es/~ulisses/packet_mmap/packet_mmap.pre-2.4.26_2.6.5.txt
+16384 in a 64 bit architecture.
Block size limit
----------------
@@ -437,7 +436,7 @@ and the following flags apply:
Capture process
^^^^^^^^^^^^^^^
- from include/linux/if_packet.h
+From include/linux/if_packet.h::
#define TP_STATUS_COPY (1 << 1)
#define TP_STATUS_LOSING (1 << 2)
diff --git a/Documentation/networking/tls-offload.rst b/Documentation/networking/tls-offload.rst
index 0f55c6d540f9..5f0dea3d571e 100644
--- a/Documentation/networking/tls-offload.rst
+++ b/Documentation/networking/tls-offload.rst
@@ -530,7 +530,10 @@ TLS device feature flags only control adding of new TLS connection
offloads, old connections will remain active after flags are cleared.
TLS encryption cannot be offloaded to devices without checksum calculation
-offload. Hence, TLS TX device feature flag requires NETIF_F_HW_CSUM being set.
+offload. Hence, TLS TX device feature flag requires TX csum offload being set.
Disabling the latter implies clearing the former. Disabling TX checksum offload
should not affect old connections, and drivers should make sure checksum
calculation does not break for them.
+Similarly, device-offloaded TLS decryption implies doing RXCSUM. If the user
+does not want to enable RX csum offload, TLS RX device feature is disabled
+as well.
diff --git a/Documentation/process/4.Coding.rst b/Documentation/process/4.Coding.rst
index c27e59d2f702..0825dc496f22 100644
--- a/Documentation/process/4.Coding.rst
+++ b/Documentation/process/4.Coding.rst
@@ -249,10 +249,8 @@ features; most of these are found in the "kernel hacking" submenu. Several
of these options should be turned on for any kernel used for development or
testing purposes. In particular, you should turn on:
- - ENABLE_MUST_CHECK and FRAME_WARN to get an
- extra set of warnings for problems like the use of deprecated interfaces
- or ignoring an important return value from a function. The output
- generated by these warnings can be verbose, but one need not worry about
+ - FRAME_WARN to get warnings for stack frames larger than a given amount.
+ The output generated can be verbose, but one need not worry about
warnings from other parts of the kernel.
- DEBUG_OBJECTS will add code to track the lifetime of various objects
diff --git a/Documentation/sound/alsa-configuration.rst b/Documentation/sound/alsa-configuration.rst
index fe52c314b763..b36af65a08ed 100644
--- a/Documentation/sound/alsa-configuration.rst
+++ b/Documentation/sound/alsa-configuration.rst
@@ -1501,7 +1501,7 @@ Module for Digigram miXart8 sound cards.
This module supports multiple cards.
Note: One miXart8 board will be represented as 4 alsa cards.
-See MIXART.txt for details.
+See Documentation/sound/cards/mixart.rst for details.
When the driver is compiled as a module and the hotplug firmware
is supported, the firmware data is loaded via hotplug automatically.
diff --git a/Documentation/sound/kernel-api/writing-an-alsa-driver.rst b/Documentation/sound/kernel-api/writing-an-alsa-driver.rst
index 73bbd59afc33..e6365836fa8b 100644
--- a/Documentation/sound/kernel-api/writing-an-alsa-driver.rst
+++ b/Documentation/sound/kernel-api/writing-an-alsa-driver.rst
@@ -71,7 +71,7 @@ core/oss
The codes for PCM and mixer OSS emulation modules are stored in this
directory. The rawmidi OSS emulation is included in the ALSA rawmidi
code since it's quite small. The sequencer code is stored in
-``core/seq/oss`` directory (see `below <#core-seq-oss>`__).
+``core/seq/oss`` directory (see `below <core/seq/oss_>`__).
core/seq
~~~~~~~~
@@ -382,7 +382,7 @@ where ``enable[dev]`` is the module option.
Each time the ``probe`` callback is called, check the availability of
the device. If not available, simply increment the device index and
returns. dev will be incremented also later (`step 7
-<#set-the-pci-driver-data-and-return-zero>`__).
+<7) Set the PCI driver data and return zero._>`__).
2) Create a card instance
~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -450,10 +450,10 @@ field contains the information shown in ``/proc/asound/cards``.
5) Create other components, such as mixer, MIDI, etc.
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-Here you define the basic components such as `PCM <#PCM-Interface>`__,
-mixer (e.g. `AC97 <#API-for-AC97-Codec>`__), MIDI (e.g.
-`MPU-401 <#MIDI-MPU401-UART-Interface>`__), and other interfaces.
-Also, if you want a `proc file <#Proc-Interface>`__, define it here,
+Here you define the basic components such as `PCM <PCM Interface_>`__,
+mixer (e.g. `AC97 <API for AC97 Codec_>`__), MIDI (e.g.
+`MPU-401 <MIDI (MPU401-UART) Interface_>`__), and other interfaces.
+Also, if you want a `proc file <Proc Interface_>`__, define it here,
too.
6) Register the card instance.
@@ -941,7 +941,7 @@ The allocation of an interrupt source is done like this:
chip->irq = pci->irq;
where :c:func:`snd_mychip_interrupt()` is the interrupt handler
-defined `later <#pcm-interface-interrupt-handler>`__. Note that
+defined `later <PCM Interrupt Handler_>`__. Note that
``chip->irq`` should be defined only when :c:func:`request_irq()`
succeeded.
@@ -3104,7 +3104,7 @@ processing the output stream in the irq handler.
If the MPU-401 interface shares its interrupt with the other logical
devices on the card, set ``MPU401_INFO_IRQ_HOOK`` (see
-`below <#MIDI-Interrupt-Handler>`__).
+`below <MIDI Interrupt Handler_>`__).
Usually, the port address corresponds to the command port and port + 1
corresponds to the data port. If not, you may change the ``cport``
diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst
index 70254eaa5229..c136e254b496 100644
--- a/Documentation/virt/kvm/api.rst
+++ b/Documentation/virt/kvm/api.rst
@@ -392,9 +392,14 @@ This ioctl is obsolete and has been removed.
Errors:
- ===== =============================
+ ======= ==============================================================
EINTR an unmasked signal is pending
- ===== =============================
+ ENOEXEC the vcpu hasn't been initialized or the guest tried to execute
+ instructions from device memory (arm64)
+ ENOSYS data abort outside memslots with no syndrome info and
+ KVM_CAP_ARM_NISV_TO_USER not enabled (arm64)
+ EPERM SVE feature set but not finalized (arm64)
+ ======= ==============================================================
This ioctl is used to run a guest virtual cpu. While there are no
explicit parameters, there is an implicit parameter block that can be
diff --git a/MAINTAINERS b/MAINTAINERS
index 546aa66428c9..d6dd31cb5ad1 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -203,8 +203,8 @@ F: include/uapi/linux/nl80211.h
F: net/wireless/
8169 10/100/1000 GIGABIT ETHERNET DRIVER
-M: Realtek linux nic maintainers <[email protected]>
M: Heiner Kallweit <[email protected]>
S: Maintained
F: drivers/net/ethernet/realtek/r8169*
@@ -820,7 +820,6 @@ M: Netanel Belgazal <[email protected]>
M: Arthur Kiyanovski <[email protected]>
R: Guy Tzalik <[email protected]>
R: Saeed Bishara <[email protected]>
-R: Zorik Machulsky <[email protected]>
S: Supported
F: Documentation/networking/device_drivers/ethernet/amazon/ena.rst
@@ -907,7 +906,7 @@ AMD KFD
M: Felix Kuehling <[email protected]>
S: Supported
-T: git git://people.freedesktop.org/~agd5f/linux
+T: git https://gitlab.freedesktop.org/agd5f/linux.git
F: drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd*.[ch]
F: drivers/gpu/drm/amd/amdkfd/
F: drivers/gpu/drm/amd/include/cik_structs.h
@@ -2119,7 +2118,7 @@ N: atmel
ARM/Microchip Sparx5 SoC support
M: Lars Povlsen <[email protected]>
M: Steen Hegelund <[email protected]>
-M: Microchip Linux Driver Support <[email protected]>
L: [email protected] (moderated for non-subscribers)
S: Supported
T: git git://github.com/microchip-ung/linux-upstream.git
@@ -2942,7 +2941,6 @@ S: Maintained
F: drivers/hwmon/asus_atk0110.c
ATLX ETHERNET DRIVERS
-M: Jay Cliburn <[email protected]>
M: Chris Snook <[email protected]>
S: Maintained
@@ -3336,7 +3334,7 @@ F: arch/riscv/net/
X: arch/riscv/net/bpf_jit_comp64.c
BPF JIT for RISC-V (64-bit)
-M: Björn Töpel <[email protected]>
+M: Björn Töpel <[email protected]>
S: Maintained
@@ -3556,7 +3554,7 @@ S: Supported
F: drivers/net/ethernet/broadcom/bnxt/
BROADCOM BRCM80211 IEEE802.11n WIRELESS DRIVER
-M: Arend van Spriel <[email protected]>
+M: Arend van Spriel <[email protected]>
M: Franky Lin <[email protected]>
M: Hante Meuleman <[email protected]>
M: Chi-hsien Lin <[email protected]>
@@ -3881,9 +3879,9 @@ F: Documentation/devicetree/bindings/mtd/cadence-nand-controller.txt
F: drivers/mtd/nand/raw/cadence-nand-controller.c
CADENCE USB3 DRD IP DRIVER
-M: Peter Chen <[email protected]>
+M: Peter Chen <[email protected]>
M: Pawel Laszczak <[email protected]>
-M: Roger Quadros <[email protected]>
+R: Roger Quadros <[email protected]>
R: Aswath Govindraju <[email protected]>
S: Maintained
@@ -3961,7 +3959,7 @@ F: net/can/
CAN-J1939 NETWORK LAYER
M: Robin van der Gracht <[email protected]>
M: Oleksij Rempel <[email protected]>
-R: Pengutronix Kernel Team <[email protected]>
S: Maintained
F: Documentation/networking/j1939.rst
@@ -4163,7 +4161,7 @@ S: Maintained
F: Documentation/translations/zh_CN/
CHIPIDEA USB HIGH SPEED DUAL ROLE CONTROLLER
-M: Peter Chen <[email protected]>
+M: Peter Chen <[email protected]>
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/peter.chen/usb.git
@@ -4313,7 +4311,9 @@ W: https://clangbuiltlinux.github.io/
B: https://github.com/ClangBuiltLinux/linux/issues
C: irc://chat.freenode.net/clangbuiltlinux
F: Documentation/kbuild/llvm.rst
+F: include/linux/compiler-clang.h
F: scripts/clang-tools/
+F: scripts/clang-version.sh
F: scripts/lld-version.sh
K: \b(?i:clang|llvm)\b
@@ -4588,7 +4588,7 @@ B: https://bugzilla.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm.git
F: Documentation/admin-guide/pm/cpuidle.rst
F: Documentation/driver-api/pm/cpuidle.rst
-F: drivers/cpuidle/*
+F: drivers/cpuidle/
F: include/linux/cpuidle.h
CPU POWER MONITORING SUBSYSTEM
@@ -4922,9 +4922,8 @@ F: Documentation/scsi/dc395x.rst
F: drivers/scsi/dc395x.*
DCCP PROTOCOL
-M: Gerrit Renker <[email protected]>
-S: Maintained
+S: Orphan
W: http://www.linuxfoundation.org/collaborate/workgroups/networking/dccp
F: include/linux/dccp.h
F: include/linux/tfrc.h
@@ -5784,6 +5783,7 @@ F: drivers/gpu/drm/vboxvideo/
DRM DRIVER FOR VMWARE VIRTUAL GPU
M: "VMware Graphics" <[email protected]>
M: Roland Scheidegger <[email protected]>
+M: Zack Rusin <[email protected]>
S: Supported
T: git git://people.freedesktop.org/~sroland/linux
@@ -5985,8 +5985,8 @@ F: Documentation/devicetree/bindings/display/st,stm32-ltdc.yaml
F: drivers/gpu/drm/stm
DRM DRIVERS FOR TI KEYSTONE
-M: Jyri Sarha <[email protected]>
-M: Tomi Valkeinen <[email protected]>
+M: Jyri Sarha <[email protected]>
+M: Tomi Valkeinen <[email protected]>
S: Maintained
T: git git://anongit.freedesktop.org/drm/drm-misc
@@ -5996,15 +5996,15 @@ F: Documentation/devicetree/bindings/display/ti/ti,k2g-dss.yaml
F: drivers/gpu/drm/tidss/
DRM DRIVERS FOR TI LCDC
-M: Jyri Sarha <[email protected]>
-R: Tomi Valkeinen <[email protected]>
+M: Jyri Sarha <[email protected]>
+R: Tomi Valkeinen <[email protected]>
S: Maintained
F: Documentation/devicetree/bindings/display/tilcdc/
F: drivers/gpu/drm/tilcdc/
DRM DRIVERS FOR TI OMAP
-M: Tomi Valkeinen <[email protected]>
+M: Tomi Valkeinen <[email protected]>
S: Maintained
F: Documentation/devicetree/bindings/display/ti/
@@ -7363,7 +7363,6 @@ L: [email protected]
S: Maintained
F: Documentation/kbuild/gcc-plugins.rst
F: scripts/Makefile.gcc-plugins
-F: scripts/gcc-plugin.sh
F: scripts/gcc-plugins/
GCOV BASED KERNEL PROFILING
@@ -9240,7 +9239,7 @@ F: tools/testing/selftests/sgx/*
K: \bSGX_
INTERCONNECT API
-M: Georgi Djakov <[email protected]>
+M: Georgi Djakov <[email protected]>
S: Maintained
F: Documentation/devicetree/bindings/interconnect/
@@ -9273,7 +9272,7 @@ F: drivers/net/ethernet/sgi/ioc3-eth.c
IOMAP FILESYSTEM LIBRARY
M: Christoph Hellwig <[email protected]>
-M: Darrick J. Wong <[email protected]>
+M: Darrick J. Wong <[email protected]>
@@ -9327,7 +9326,6 @@ W: http://www.adaptec.com/
F: drivers/scsi/ips*
IPVS
-M: Wensong Zhang <[email protected]>
M: Simon Horman <[email protected]>
M: Julian Anastasov <[email protected]>
@@ -9776,7 +9774,7 @@ F: tools/testing/selftests/kvm/s390x/
KERNEL VIRTUAL MACHINE FOR X86 (KVM/x86)
M: Paolo Bonzini <[email protected]>
-R: Sean Christopherson <[email protected]>
+R: Sean Christopherson <[email protected]>
R: Vitaly Kuznetsov <[email protected]>
R: Wanpeng Li <[email protected]>
R: Jim Mattson <[email protected]>
@@ -10260,7 +10258,6 @@ S: Supported
T: git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git dev
F: Documentation/atomic_bitops.txt
F: Documentation/atomic_t.txt
-F: Documentation/core-api/atomic_ops.rst
F: Documentation/core-api/refcount-vs-atomic.rst
F: Documentation/litmus-tests/
F: Documentation/memory-barriers.txt
@@ -10847,7 +10844,7 @@ F: drivers/media/radio/radio-maxiradio*
MCAN MMIO DEVICE DRIVER
M: Dan Murphy <[email protected]>
-M: Sriram Dash <[email protected]>
+M: Pankaj Sharma <[email protected]>
S: Maintained
F: Documentation/devicetree/bindings/net/can/bosch,m_can.yaml
@@ -11667,7 +11664,7 @@ F: drivers/media/platform/atmel/atmel-isi.h
MICROCHIP KSZ SERIES ETHERNET SWITCH DRIVER
M: Woojung Huh <[email protected]>
-M: Microchip Linux Driver Support <[email protected]>
S: Maintained
F: Documentation/devicetree/bindings/net/dsa/microchip,ksz.yaml
@@ -11677,7 +11674,7 @@ F: net/dsa/tag_ksz.c
MICROCHIP LAN743X ETHERNET DRIVER
M: Bryan Whitehead <[email protected]>
-M: Microchip Linux Driver Support <[email protected]>
S: Maintained
F: drivers/net/ethernet/microchip/lan743x_*
@@ -11771,7 +11768,7 @@ F: drivers/net/wireless/microchip/wilc1000/
MICROSEMI MIPS SOCS
M: Alexandre Belloni <[email protected]>
-M: Microchip Linux Driver Support <[email protected]>
S: Supported
F: Documentation/devicetree/bindings/mips/mscc.txt
@@ -12418,7 +12415,6 @@ F: tools/testing/selftests/net/ipsec.c
NETWORKING [IPv4/IPv6]
M: "David S. Miller" <[email protected]>
-M: Alexey Kuznetsov <[email protected]>
M: Hideaki YOSHIFUJI <[email protected]>
S: Maintained
@@ -12475,7 +12471,6 @@ F: net/ipv6/tcp*.c
NETWORKING [TLS]
M: Boris Pismenny <[email protected]>
-M: Aviad Yehezkel <[email protected]>
M: John Fastabend <[email protected]>
M: Daniel Borkmann <[email protected]>
M: Jakub Kicinski <[email protected]>
@@ -12825,10 +12820,10 @@ F: tools/objtool/
F: include/linux/objtool.h
OCELOT ETHERNET SWITCH DRIVER
-M: Microchip Linux Driver Support <[email protected]>
M: Vladimir Oltean <[email protected]>
M: Claudiu Manoil <[email protected]>
M: Alexandre Belloni <[email protected]>
S: Supported
F: drivers/net/dsa/ocelot/*
@@ -12850,7 +12845,7 @@ F: include/misc/ocxl*
F: include/uapi/misc/ocxl.h
OMAP AUDIO SUPPORT
-M: Peter Ujfalusi <[email protected]>
+M: Peter Ujfalusi <[email protected]>
M: Jarkko Nikula <[email protected]>
L: [email protected] (moderated for non-subscribers)
@@ -13890,7 +13885,7 @@ F: drivers/platform/x86/peaq-wmi.c
PENSANDO ETHERNET DRIVERS
M: Shannon Nelson <[email protected]>
-M: Pensando Drivers <[email protected]>
S: Supported
F: Documentation/networking/device_drivers/ethernet/pensando/ionic.rst
@@ -14512,10 +14507,18 @@ S: Supported
F: drivers/crypto/qat/
QCOM AUDIO (ASoC) DRIVERS
-M: Patrick Lai <[email protected]>
+M: Srinivas Kandagatla <[email protected]>
M: Banajit Goswami <[email protected]>
L: [email protected] (moderated for non-subscribers)
S: Supported
+F: sound/soc/codecs/lpass-va-macro.c
+F: sound/soc/codecs/lpass-wsa-macro.*
+F: sound/soc/codecs/msm8916-wcd-analog.c
+F: sound/soc/codecs/msm8916-wcd-digital.c
+F: sound/soc/codecs/wcd9335.*
+F: sound/soc/codecs/wcd934x.c
+F: sound/soc/codecs/wcd-clsh-v2.*
+F: sound/soc/codecs/wsa881x.c
F: sound/soc/qcom/
QCOM IPA DRIVER
@@ -14669,7 +14672,7 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/ath.git
F: drivers/net/wireless/ath/ath11k/
QUALCOMM ATHEROS ATH9K WIRELESS DRIVER
-M: QCA ath9k Development <[email protected]>
S: Supported
W: https://wireless.wiki.kernel.org/en/users/Drivers/ath9k
@@ -14820,7 +14823,7 @@ M: Alex Deucher <[email protected]>
M: Christian König <[email protected]>
S: Supported
-T: git git://people.freedesktop.org/~agd5f/linux
+T: git https://gitlab.freedesktop.org/agd5f/linux.git
F: drivers/gpu/drm/amd/
F: drivers/gpu/drm/radeon/
F: include/uapi/drm/amdgpu_drm.h
@@ -16321,6 +16324,7 @@ M: Pekka Enberg <[email protected]>
M: David Rientjes <[email protected]>
M: Joonsoo Kim <[email protected]>
M: Andrew Morton <[email protected]>
+M: Vlastimil Babka <[email protected]>
S: Maintained
F: include/linux/sl?b*.h
@@ -16710,6 +16714,8 @@ M: Samuel Thibault <[email protected]>
S: Odd Fixes
W: http://www.linux-speakup.org/
+W: https://github.com/linux-speakup/speakup
+B: https://github.com/linux-speakup/speakup/issues
F: drivers/accessibility/speakup/
SPEAR CLOCK FRAMEWORK SUPPORT
@@ -16964,7 +16970,7 @@ M: Olivier Moysan <[email protected]>
M: Arnaud Pouliquen <[email protected]>
L: [email protected] (moderated for non-subscribers)
S: Maintained
-F: Documentation/devicetree/bindings/sound/st,stm32-*.txt
+F: Documentation/devicetree/bindings/iio/adc/st,stm32-*.yaml
F: sound/soc/stm/
STM32 TIMER/LPTIMER DRIVERS
@@ -17541,7 +17547,7 @@ F: arch/xtensa/
F: drivers/irqchip/irq-xtensa-*
TEXAS INSTRUMENTS ASoC DRIVERS
-M: Peter Ujfalusi <[email protected]>
+M: Peter Ujfalusi <[email protected]>
L: [email protected] (moderated for non-subscribers)
S: Maintained
F: sound/soc/ti/
@@ -17553,6 +17559,19 @@ S: Supported
F: Documentation/devicetree/bindings/iio/dac/ti,dac7612.txt
F: drivers/iio/dac/ti-dac7612.c
+TEXAS INSTRUMENTS DMA DRIVERS
+M: Peter Ujfalusi <[email protected]>
+S: Maintained
+F: Documentation/devicetree/bindings/dma/ti-dma-crossbar.txt
+F: Documentation/devicetree/bindings/dma/ti-edma.txt
+F: Documentation/devicetree/bindings/dma/ti/
+F: drivers/dma/ti/
+X: drivers/dma/ti/cppi41.c
+F: include/linux/dma/k3-udma-glue.h
+F: include/linux/dma/ti-cppi5.h
+F: include/linux/dma/k3-psil.h
+
TEXAS INSTRUMENTS' SYSTEM CONTROL INTERFACE (TISCI) PROTOCOL DRIVER
M: Nishanth Menon <[email protected]>
M: Tero Kristo <[email protected]>
@@ -17838,7 +17857,7 @@ F: Documentation/devicetree/bindings/net/nfc/trf7970a.txt
F: drivers/nfc/trf7970a.c
TI TWL4030 SERIES SOC CODEC DRIVER
-M: Peter Ujfalusi <[email protected]>
+M: Peter Ujfalusi <[email protected]>
L: [email protected] (moderated for non-subscribers)
S: Maintained
F: sound/soc/codecs/twl4030*
@@ -18370,7 +18389,7 @@ F: include/linux/usb/isp116x.h
USB LAN78XX ETHERNET DRIVER
M: Woojung Huh <[email protected]>
-M: Microchip Linux Driver Support <[email protected]>
S: Maintained
F: Documentation/devicetree/bindings/net/microchip,lan78xx.txt
@@ -18404,7 +18423,7 @@ F: Documentation/usb/ohci.rst
F: drivers/usb/host/ohci*
USB OTG FSM (Finite State Machine)
-M: Peter Chen <[email protected]>
+M: Peter Chen <[email protected]>
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/peter.chen/usb.git
@@ -18484,7 +18503,7 @@ F: drivers/net/usb/smsc75xx.*
USB SMSC95XX ETHERNET DRIVER
M: Steve Glendinning <[email protected]>
-M: Microchip Linux Driver Support <[email protected]>
S: Maintained
F: drivers/net/usb/smsc95xx.*
@@ -19031,7 +19050,7 @@ F: drivers/input/mouse/vmmouse.h
VMWARE VMXNET3 ETHERNET DRIVER
M: Ronak Doshi <[email protected]>
-M: "VMware, Inc." <[email protected]>
S: Maintained
F: drivers/net/vmxnet3/
@@ -19058,7 +19077,6 @@ K: regulator_get_optional
VRF
M: David Ahern <[email protected]>
-M: Shrijeet Mukherjee <[email protected]>
S: Maintained
F: Documentation/networking/vrf.rst
@@ -19409,7 +19427,7 @@ F: drivers/net/ethernet/*/*/*xdp*
K: (?:\b|_)xdp(?:\b|_)
XDP SOCKETS (AF_XDP)
-M: Björn Töpel <[email protected]>
+M: Björn Töpel <[email protected]>
M: Magnus Karlsson <[email protected]>
R: Jonathan Lemon <[email protected]>
@@ -19505,7 +19523,7 @@ F: arch/x86/xen/*swiotlb*
F: drivers/xen/*swiotlb*
XFS FILESYSTEM
-M: Darrick J. Wong <[email protected]>
+M: Darrick J. Wong <[email protected]>
S: Supported
diff --git a/Makefile b/Makefile
index 3d328b7ab200..e0af7a4a5598 100644
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
VERSION = 5
PATCHLEVEL = 11
SUBLEVEL = 0
-EXTRAVERSION = -rc1
+EXTRAVERSION = -rc5
NAME = Kleptomaniac Octopus
# *DOCUMENTATION*
diff --git a/arch/Kconfig b/arch/Kconfig
index 78c6f05b10f9..24862d15f3a3 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -1105,6 +1105,12 @@ config HAVE_ARCH_PFN_VALID
config ARCH_SUPPORTS_DEBUG_PAGEALLOC
bool
+config ARCH_SPLIT_ARG64
+ bool
+ help
+ If a 32-bit architecture requires 64-bit arguments to be split into
+ pairs of 32-bit arguments, select this option.
+
source "kernel/gcov/Kconfig"
source "scripts/gcc-plugins/Kconfig"
diff --git a/arch/alpha/include/asm/local64.h b/arch/alpha/include/asm/local64.h
deleted file mode 100644
index 36c93b5cc239..000000000000
--- a/arch/alpha/include/asm/local64.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/local64.h>
diff --git a/arch/arc/Makefile b/arch/arc/Makefile
index 0c6bf0d1df7a..578bdbbb0fa7 100644
--- a/arch/arc/Makefile
+++ b/arch/arc/Makefile
@@ -102,16 +102,22 @@ libs-y += arch/arc/lib/ $(LIBGCC)
boot := arch/arc/boot
-#default target for make without any arguments.
-KBUILD_IMAGE := $(boot)/bootpImage
-
-all: bootpImage
-bootpImage: vmlinux
-
-boot_targets += uImage uImage.bin uImage.gz
+boot_targets := uImage.bin uImage.gz uImage.lzma
+PHONY += $(boot_targets)
$(boot_targets): vmlinux
$(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
+uimage-default-y := uImage.bin
+uimage-default-$(CONFIG_KERNEL_GZIP) := uImage.gz
+uimage-default-$(CONFIG_KERNEL_LZMA) := uImage.lzma
+
+PHONY += uImage
+uImage: $(uimage-default-y)
+ @ln -sf $< $(boot)/uImage
+ @$(kecho) ' Image $(boot)/uImage is ready'
+
+CLEAN_FILES += $(boot)/uImage
+
archclean:
$(Q)$(MAKE) $(clean)=$(boot)
diff --git a/arch/arc/boot/Makefile b/arch/arc/boot/Makefile
index 538b92f4dd25..5648748c285f 100644
--- a/arch/arc/boot/Makefile
+++ b/arch/arc/boot/Makefile
@@ -1,5 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
-targets := vmlinux.bin vmlinux.bin.gz uImage
# uImage build relies on mkimage being availble on your host for ARC target
# You will need to build u-boot for ARC, rename mkimage to arc-elf32-mkimage
@@ -7,23 +6,18 @@ targets := vmlinux.bin vmlinux.bin.gz uImage
OBJCOPYFLAGS= -O binary -R .note -R .note.gnu.build-id -R .comment -S
-LINUX_START_TEXT = $$(readelf -h vmlinux | \
+LINUX_START_TEXT = $$($(READELF) -h vmlinux | \
grep "Entry point address" | grep -o 0x.*)
UIMAGE_LOADADDR = $(CONFIG_LINUX_LINK_BASE)
UIMAGE_ENTRYADDR = $(LINUX_START_TEXT)
-suffix-y := bin
-suffix-$(CONFIG_KERNEL_GZIP) := gz
-suffix-$(CONFIG_KERNEL_LZMA) := lzma
-
-targets += uImage
+targets += vmlinux.bin
+targets += vmlinux.bin.gz
+targets += vmlinux.bin.lzma
targets += uImage.bin
targets += uImage.gz
targets += uImage.lzma
-extra-y += vmlinux.bin
-extra-y += vmlinux.bin.gz
-extra-y += vmlinux.bin.lzma
$(obj)/vmlinux.bin: vmlinux FORCE
$(call if_changed,objcopy)
@@ -42,7 +36,3 @@ $(obj)/uImage.gz: $(obj)/vmlinux.bin.gz FORCE
$(obj)/uImage.lzma: $(obj)/vmlinux.bin.lzma FORCE
$(call if_changed,uimage,lzma)
-
-$(obj)/uImage: $(obj)/uImage.$(suffix-y)
- @ln -sf $(notdir $<) $@
- @echo ' Image $@ is ready'
diff --git a/arch/arc/include/asm/Kbuild b/arch/arc/include/asm/Kbuild
index 81f4edec0c2a..3c1afa524b9c 100644
--- a/arch/arc/include/asm/Kbuild
+++ b/arch/arc/include/asm/Kbuild
@@ -1,7 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
generic-y += extable.h
generic-y += kvm_para.h
-generic-y += local64.h
generic-y += mcs_spinlock.h
generic-y += parport.h
generic-y += user.h
diff --git a/arch/arc/include/asm/page.h b/arch/arc/include/asm/page.h
index 23e41e890eda..ad9b7fe4dba3 100644
--- a/arch/arc/include/asm/page.h
+++ b/arch/arc/include/asm/page.h
@@ -10,6 +10,7 @@
#ifndef __ASSEMBLY__
#define clear_page(paddr) memset((paddr), 0, PAGE_SIZE)
+#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
#define copy_page(to, from) memcpy((to), (from), PAGE_SIZE)
struct vm_area_struct;
diff --git a/arch/arc/kernel/entry.S b/arch/arc/kernel/entry.S
index 1f5308abf36d..1743506081da 100644
--- a/arch/arc/kernel/entry.S
+++ b/arch/arc/kernel/entry.S
@@ -307,7 +307,7 @@ resume_user_mode_begin:
mov r0, sp ; pt_regs for arg to do_signal()/do_notify_resume()
GET_CURR_THR_INFO_FLAGS r9
- and.f 0, r9, TIF_SIGPENDING|TIF_NOTIFY_SIGNAL
+ and.f 0, r9, _TIF_SIGPENDING|_TIF_NOTIFY_SIGNAL
bz .Lchk_notify_resume
; Normal Trap/IRQ entry only saves Scratch (caller-saved) regs
diff --git a/arch/arc/plat-hsdk/Kconfig b/arch/arc/plat-hsdk/Kconfig
index 6b5c54576f54..a2d10c29fbcc 100644
--- a/arch/arc/plat-hsdk/Kconfig
+++ b/arch/arc/plat-hsdk/Kconfig
@@ -7,6 +7,7 @@ menuconfig ARC_SOC_HSDK
depends on ISA_ARCV2
select ARC_HAS_ACCL_REGS
select ARC_IRQ_NO_AUTOSAVE
+ select ARC_FPU_SAVE_RESTORE
select CLK_HSDK
select RESET_CONTROLLER
select RESET_HSDK
diff --git a/arch/arm/boot/dts/omap3-n950-n9.dtsi b/arch/arm/boot/dts/omap3-n950-n9.dtsi
index 11d41e86f814..7dde9fbb06d3 100644
--- a/arch/arm/boot/dts/omap3-n950-n9.dtsi
+++ b/arch/arm/boot/dts/omap3-n950-n9.dtsi
@@ -494,3 +494,11 @@
clock-names = "sysclk";
};
};
+
+&aes1_target {
+ status = "disabled";
+};
+
+&aes2_target {
+ status = "disabled";
+};
diff --git a/arch/arm/boot/dts/omap5.dtsi b/arch/arm/boot/dts/omap5.dtsi
index 5f1a8bd13880..e025b7c9a357 100644
--- a/arch/arm/boot/dts/omap5.dtsi
+++ b/arch/arm/boot/dts/omap5.dtsi
@@ -518,6 +518,9 @@
clocks = <&dss_clkctrl OMAP5_DSS_CORE_CLKCTRL 8>,
<&dss_clkctrl OMAP5_DSS_CORE_CLKCTRL 10>;
clock-names = "fck", "sys_clk";
+
+ #address-cells = <1>;
+ #size-cells = <0>;
};
};
@@ -550,6 +553,9 @@
clocks = <&dss_clkctrl OMAP5_DSS_CORE_CLKCTRL 8>,
<&dss_clkctrl OMAP5_DSS_CORE_CLKCTRL 10>;
clock-names = "fck", "sys_clk";
+
+ #address-cells = <1>;
+ #size-cells = <0>;
};
};
diff --git a/arch/arm/boot/dts/picoxcell-pc3x2.dtsi b/arch/arm/boot/dts/picoxcell-pc3x2.dtsi
index c4c6c7e9e37b..5898879a3038 100644
--- a/arch/arm/boot/dts/picoxcell-pc3x2.dtsi
+++ b/arch/arm/boot/dts/picoxcell-pc3x2.dtsi
@@ -45,18 +45,21 @@
emac: gem@30000 {
compatible = "cadence,gem";
reg = <0x30000 0x10000>;
+ interrupt-parent = <&vic0>;
interrupts = <31>;
};
dmac1: dmac@40000 {
compatible = "snps,dw-dmac";
reg = <0x40000 0x10000>;
+ interrupt-parent = <&vic0>;
interrupts = <25>;
};
dmac2: dmac@50000 {
compatible = "snps,dw-dmac";
reg = <0x50000 0x10000>;
+ interrupt-parent = <&vic0>;
interrupts = <26>;
};
@@ -233,6 +236,7 @@
axi2pico@c0000000 {
compatible = "picochip,axi2pico-pc3x2";
reg = <0xc0000000 0x10000>;
+ interrupt-parent = <&vic0>;
interrupts = <13 14 15 16 17 18 19 20 21>;
};
};
diff --git a/arch/arm/boot/dts/ste-ux500-samsung-golden.dts b/arch/arm/boot/dts/ste-ux500-samsung-golden.dts
index 496f9d3ba7b7..60fe6189e728 100644
--- a/arch/arm/boot/dts/ste-ux500-samsung-golden.dts
+++ b/arch/arm/boot/dts/ste-ux500-samsung-golden.dts
@@ -329,6 +329,7 @@
panel@0 {
compatible = "samsung,s6e63m0";
reg = <0>;
+ max-brightness = <15>;
vdd3-supply = <&panel_reg_3v0>;
vci-supply = <&panel_reg_1v8>;
reset-gpios = <&gpio4 11 GPIO_ACTIVE_LOW>;
diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig
index 1c11d1557779..20eb381e9761 100644
--- a/arch/arm/configs/omap2plus_defconfig
+++ b/arch/arm/configs/omap2plus_defconfig
@@ -279,6 +279,7 @@ CONFIG_SERIAL_OMAP_CONSOLE=y
CONFIG_SERIAL_DEV_BUS=y
CONFIG_I2C_CHARDEV=y
CONFIG_SPI=y
+CONFIG_SPI_GPIO=m
CONFIG_SPI_OMAP24XX=y
CONFIG_SPI_TI_QSPI=m
CONFIG_HSI=m
@@ -296,7 +297,6 @@ CONFIG_GPIO_TWL4030=y
CONFIG_W1=m
CONFIG_HDQ_MASTER_OMAP=m
CONFIG_W1_SLAVE_DS250X=m
-CONFIG_POWER_AVS=y
CONFIG_POWER_RESET=y
CONFIG_POWER_RESET_GPIO=y
CONFIG_BATTERY_BQ27XXX=m
@@ -369,8 +369,8 @@ CONFIG_DRM_OMAP=m
CONFIG_OMAP5_DSS_HDMI=y
CONFIG_OMAP2_DSS_SDI=y
CONFIG_OMAP2_DSS_DSI=y
-CONFIG_DRM_OMAP_PANEL_DSI_CM=m
CONFIG_DRM_TILCDC=m
+CONFIG_DRM_PANEL_DSI_CM=m
CONFIG_DRM_PANEL_SIMPLE=m
CONFIG_DRM_PANEL_LG_LB035Q02=m
CONFIG_DRM_PANEL_NEC_NL8048HL11=m
diff --git a/arch/arm/crypto/chacha-glue.c b/arch/arm/crypto/chacha-glue.c
index 7b5cf8430c6d..cdde8fd01f8f 100644
--- a/arch/arm/crypto/chacha-glue.c
+++ b/arch/arm/crypto/chacha-glue.c
@@ -60,6 +60,7 @@ static void chacha_doneon(u32 *state, u8 *dst, const u8 *src,
chacha_block_xor_neon(state, d, s, nrounds);
if (d != dst)
memcpy(dst, buf, bytes);
+ state[12]++;
}
}
diff --git a/arch/arm/include/asm/Kbuild b/arch/arm/include/asm/Kbuild
index 4a0848aef207..03657ff8fbe3 100644
--- a/arch/arm/include/asm/Kbuild
+++ b/arch/arm/include/asm/Kbuild
@@ -2,7 +2,6 @@
generic-y += early_ioremap.h
generic-y += extable.h
generic-y += flat.h
-generic-y += local64.h
generic-y += parport.h
generated-y += mach-types.h
diff --git a/arch/arm/mach-omap2/omap_device.c b/arch/arm/mach-omap2/omap_device.c
index f3191704cab9..56d6814bec26 100644
--- a/arch/arm/mach-omap2/omap_device.c
+++ b/arch/arm/mach-omap2/omap_device.c
@@ -230,10 +230,12 @@ static int _omap_device_notifier_call(struct notifier_block *nb,
break;
case BUS_NOTIFY_BIND_DRIVER:
od = to_omap_device(pdev);
- if (od && (od->_state == OMAP_DEVICE_STATE_ENABLED) &&
- pm_runtime_status_suspended(dev)) {
+ if (od) {
od->_driver_status = BUS_NOTIFY_BIND_DRIVER;
- pm_runtime_set_active(dev);
+ if (od->_state == OMAP_DEVICE_STATE_ENABLED &&
+ pm_runtime_status_suspended(dev)) {
+ pm_runtime_set_active(dev);
+ }
}
break;
case BUS_NOTIFY_ADD_DEVICE:
diff --git a/arch/arm/mach-omap2/pmic-cpcap.c b/arch/arm/mach-omap2/pmic-cpcap.c
index eab281a5fc9f..09076ad0576d 100644
--- a/arch/arm/mach-omap2/pmic-cpcap.c
+++ b/arch/arm/mach-omap2/pmic-cpcap.c
@@ -71,7 +71,7 @@ static struct omap_voltdm_pmic omap_cpcap_iva = {
.vp_vstepmin = OMAP4_VP_VSTEPMIN_VSTEPMIN,
.vp_vstepmax = OMAP4_VP_VSTEPMAX_VSTEPMAX,
.vddmin = 900000,
- .vddmax = 1350000,
+ .vddmax = 1375000,
.vp_timeout_us = OMAP4_VP_VLIMITTO_TIMEOUT_US,
.i2c_slave_addr = 0x44,
.volt_reg_addr = 0x0,
diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c
index 60e901cd0de6..5a957a9a0984 100644
--- a/arch/arm/xen/enlighten.c
+++ b/arch/arm/xen/enlighten.c
@@ -371,7 +371,7 @@ static int __init xen_guest_init(void)
}
gnttab_init();
if (!xen_initial_domain())
- xenbus_probe(NULL);
+ xenbus_probe();
/*
* Making sure board specific code will not set up ops for
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 05e17351e4f3..f39568b28ec1 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -174,8 +174,6 @@ config ARM64
select HAVE_NMI
select HAVE_PATA_PLATFORM
select HAVE_PERF_EVENTS
- select HAVE_PERF_EVENTS_NMI if ARM64_PSEUDO_NMI && HW_PERF_EVENTS
- select HAVE_HARDLOCKUP_DETECTOR_PERF if PERF_EVENTS && HAVE_PERF_EVENTS_NMI
select HAVE_PERF_REGS
select HAVE_PERF_USER_STACK_DUMP
select HAVE_REGS_AND_STACK_ACCESS_API
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
index 6be9b3750250..90309208bb28 100644
--- a/arch/arm64/Makefile
+++ b/arch/arm64/Makefile
@@ -10,7 +10,7 @@
#
# Copyright (C) 1995-2001 by Russell King
-LDFLAGS_vmlinux :=--no-undefined -X -z norelro
+LDFLAGS_vmlinux :=--no-undefined -X
ifeq ($(CONFIG_RELOCATABLE), y)
# Pass --no-apply-dynamic-relocs to restore pre-binutils-2.27 behaviour
@@ -115,16 +115,20 @@ KBUILD_CPPFLAGS += -mbig-endian
CHECKFLAGS += -D__AARCH64EB__
# Prefer the baremetal ELF build target, but not all toolchains include
# it so fall back to the standard linux version if needed.
-KBUILD_LDFLAGS += -EB $(call ld-option, -maarch64elfb, -maarch64linuxb)
+KBUILD_LDFLAGS += -EB $(call ld-option, -maarch64elfb, -maarch64linuxb -z norelro)
UTS_MACHINE := aarch64_be
else
KBUILD_CPPFLAGS += -mlittle-endian
CHECKFLAGS += -D__AARCH64EL__
# Same as above, prefer ELF but fall back to linux target if needed.
-KBUILD_LDFLAGS += -EL $(call ld-option, -maarch64elf, -maarch64linux)
+KBUILD_LDFLAGS += -EL $(call ld-option, -maarch64elf, -maarch64linux -z norelro)
UTS_MACHINE := aarch64
endif
+ifeq ($(CONFIG_LD_IS_LLD), y)
+KBUILD_LDFLAGS += -z norelro
+endif
+
CHECKFLAGS += -D__aarch64__
ifeq ($(CONFIG_DYNAMIC_FTRACE_WITH_REGS),y)
diff --git a/arch/arm64/boot/dts/bitmain/bm1880.dtsi b/arch/arm64/boot/dts/bitmain/bm1880.dtsi
index fa6e6905f588..53a9b76057aa 100644
--- a/arch/arm64/boot/dts/bitmain/bm1880.dtsi
+++ b/arch/arm64/boot/dts/bitmain/bm1880.dtsi
@@ -127,7 +127,7 @@
compatible = "snps,dw-apb-gpio-port";
gpio-controller;
#gpio-cells = <2>;
- snps,nr-gpios = <32>;
+ ngpios = <32>;
reg = <0>;
interrupt-controller;
#interrupt-cells = <2>;
@@ -145,7 +145,7 @@
compatible = "snps,dw-apb-gpio-port";
gpio-controller;
#gpio-cells = <2>;
- snps,nr-gpios = <32>;
+ ngpios = <32>;
reg = <0>;
interrupt-controller;
#interrupt-cells = <2>;
@@ -163,7 +163,7 @@
compatible = "snps,dw-apb-gpio-port";
gpio-controller;
#gpio-cells = <2>;
- snps,nr-gpios = <8>;
+ ngpios = <8>;
reg = <0>;
interrupt-controller;
#interrupt-cells = <2>;
diff --git a/arch/arm64/include/asm/Kbuild b/arch/arm64/include/asm/Kbuild
index ff9cbb631212..07ac208edc89 100644
--- a/arch/arm64/include/asm/Kbuild
+++ b/arch/arm64/include/asm/Kbuild
@@ -1,6 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
generic-y += early_ioremap.h
-generic-y += local64.h
generic-y += mcs_spinlock.h
generic-y += qrwlock.h
generic-y += qspinlock.h
diff --git a/arch/arm64/include/asm/atomic.h b/arch/arm64/include/asm/atomic.h
index 015ddffaf6ca..b56a4b2bc248 100644
--- a/arch/arm64/include/asm/atomic.h
+++ b/arch/arm64/include/asm/atomic.h
@@ -17,7 +17,7 @@
#include <asm/lse.h>
#define ATOMIC_OP(op) \
-static inline void arch_##op(int i, atomic_t *v) \
+static __always_inline void arch_##op(int i, atomic_t *v) \
{ \
__lse_ll_sc_body(op, i, v); \
}
@@ -32,7 +32,7 @@ ATOMIC_OP(atomic_sub)
#undef ATOMIC_OP
#define ATOMIC_FETCH_OP(name, op) \
-static inline int arch_##op##name(int i, atomic_t *v) \
+static __always_inline int arch_##op##name(int i, atomic_t *v) \
{ \
return __lse_ll_sc_body(op##name, i, v); \
}
@@ -56,7 +56,7 @@ ATOMIC_FETCH_OPS(atomic_sub_return)
#undef ATOMIC_FETCH_OPS
#define ATOMIC64_OP(op) \
-static inline void arch_##op(long i, atomic64_t *v) \
+static __always_inline void arch_##op(long i, atomic64_t *v) \
{ \
__lse_ll_sc_body(op, i, v); \
}
@@ -71,7 +71,7 @@ ATOMIC64_OP(atomic64_sub)
#undef ATOMIC64_OP
#define ATOMIC64_FETCH_OP(name, op) \
-static inline long arch_##op##name(long i, atomic64_t *v) \
+static __always_inline long arch_##op##name(long i, atomic64_t *v) \
{ \
return __lse_ll_sc_body(op##name, i, v); \
}
@@ -94,7 +94,7 @@ ATOMIC64_FETCH_OPS(atomic64_sub_return)
#undef ATOMIC64_FETCH_OP
#undef ATOMIC64_FETCH_OPS
-static inline long arch_atomic64_dec_if_positive(atomic64_t *v)
+static __always_inline long arch_atomic64_dec_if_positive(atomic64_t *v)
{
return __lse_ll_sc_body(atomic64_dec_if_positive, v);
}
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 11beda85ee7e..8fcfab0c2567 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -17,6 +17,7 @@
#include <linux/jump_label.h>
#include <linux/kvm_types.h>
#include <linux/percpu.h>
+#include <linux/psci.h>
#include <asm/arch_gicv3.h>
#include <asm/barrier.h>
#include <asm/cpufeature.h>
@@ -240,6 +241,28 @@ struct kvm_host_data {
struct kvm_pmu_events pmu_events;
};
+struct kvm_host_psci_config {
+ /* PSCI version used by host. */
+ u32 version;
+
+ /* Function IDs used by host if version is v0.1. */
+ struct psci_0_1_function_ids function_ids_0_1;
+
+ bool psci_0_1_cpu_suspend_implemented;
+ bool psci_0_1_cpu_on_implemented;
+ bool psci_0_1_cpu_off_implemented;
+ bool psci_0_1_migrate_implemented;
+};
+
+extern struct kvm_host_psci_config kvm_nvhe_sym(kvm_host_psci_config);
+#define kvm_host_psci_config CHOOSE_NVHE_SYM(kvm_host_psci_config)
+
+extern s64 kvm_nvhe_sym(hyp_physvirt_offset);
+#define hyp_physvirt_offset CHOOSE_NVHE_SYM(hyp_physvirt_offset)
+
+extern u64 kvm_nvhe_sym(hyp_cpu_logical_map)[NR_CPUS];
+#define hyp_cpu_logical_map CHOOSE_NVHE_SYM(hyp_cpu_logical_map)
+
struct vcpu_reset_state {
unsigned long pc;
unsigned long r0;
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index 6f986e09a781..f0fe0cc6abe0 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -176,10 +176,21 @@ static inline void __uaccess_enable_hw_pan(void)
* The Tag check override (TCO) bit disables temporarily the tag checking
* preventing the issue.
*/
-static inline void uaccess_disable_privileged(void)
+static inline void __uaccess_disable_tco(void)
{
asm volatile(ALTERNATIVE("nop", SET_PSTATE_TCO(0),
ARM64_MTE, CONFIG_KASAN_HW_TAGS));
+}
+
+static inline void __uaccess_enable_tco(void)
+{
+ asm volatile(ALTERNATIVE("nop", SET_PSTATE_TCO(1),
+ ARM64_MTE, CONFIG_KASAN_HW_TAGS));
+}
+
+static inline void uaccess_disable_privileged(void)
+{
+ __uaccess_disable_tco();
if (uaccess_ttbr0_disable())
return;
@@ -189,8 +200,7 @@ static inline void uaccess_disable_privileged(void)
static inline void uaccess_enable_privileged(void)
{
- asm volatile(ALTERNATIVE("nop", SET_PSTATE_TCO(1),
- ARM64_MTE, CONFIG_KASAN_HW_TAGS));
+ __uaccess_enable_tco();
if (uaccess_ttbr0_enable())
return;
diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c
index f42fd9e33981..301784463587 100644
--- a/arch/arm64/kernel/asm-offsets.c
+++ b/arch/arm64/kernel/asm-offsets.c
@@ -75,7 +75,7 @@ int main(void)
DEFINE(S_SDEI_TTBR1, offsetof(struct pt_regs, sdei_ttbr1));
DEFINE(S_PMR_SAVE, offsetof(struct pt_regs, pmr_save));
DEFINE(S_STACKFRAME, offsetof(struct pt_regs, stackframe));
- DEFINE(S_FRAME_SIZE, sizeof(struct pt_regs));
+ DEFINE(PT_REGS_SIZE, sizeof(struct pt_regs));
BLANK();
#ifdef CONFIG_COMPAT
DEFINE(COMPAT_SIGFRAME_REGS_OFFSET, offsetof(struct compat_sigframe, uc.uc_mcontext.arm_r0));
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index 7ffb5f1d8b68..e99eddec0a46 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -2568,7 +2568,7 @@ static void verify_hyp_capabilities(void)
int parange, ipa_max;
unsigned int safe_vmid_bits, vmid_bits;
- if (!IS_ENABLED(CONFIG_KVM) || !IS_ENABLED(CONFIG_KVM_ARM_HOST))
+ if (!IS_ENABLED(CONFIG_KVM))
return;
safe_mmfr1 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
diff --git a/arch/arm64/kernel/entry-ftrace.S b/arch/arm64/kernel/entry-ftrace.S
index a338f40e64d3..b3e4f9a088b1 100644
--- a/arch/arm64/kernel/entry-ftrace.S
+++ b/arch/arm64/kernel/entry-ftrace.S
@@ -35,7 +35,7 @@
*/
.macro ftrace_regs_entry, allregs=0
/* Make room for pt_regs, plus a callee frame */
- sub sp, sp, #(S_FRAME_SIZE + 16)
+ sub sp, sp, #(PT_REGS_SIZE + 16)
/* Save function arguments (and x9 for simplicity) */
stp x0, x1, [sp, #S_X0]
@@ -61,15 +61,15 @@
.endif
/* Save the callsite's SP and LR */
- add x10, sp, #(S_FRAME_SIZE + 16)
+ add x10, sp, #(PT_REGS_SIZE + 16)
stp x9, x10, [sp, #S_LR]
/* Save the PC after the ftrace callsite */
str x30, [sp, #S_PC]
/* Create a frame record for the callsite above pt_regs */
- stp x29, x9, [sp, #S_FRAME_SIZE]
- add x29, sp, #S_FRAME_SIZE
+ stp x29, x9, [sp, #PT_REGS_SIZE]
+ add x29, sp, #PT_REGS_SIZE
/* Create our frame record within pt_regs. */
stp x29, x30, [sp, #S_STACKFRAME]
@@ -120,7 +120,7 @@ ftrace_common_return:
ldr x9, [sp, #S_PC]
/* Restore the callsite's SP */
- add sp, sp, #S_FRAME_SIZE + 16
+ add sp, sp, #PT_REGS_SIZE + 16
ret x9
SYM_CODE_END(ftrace_common)
@@ -130,7 +130,7 @@ SYM_CODE_START(ftrace_graph_caller)
ldr x0, [sp, #S_PC]
sub x0, x0, #AARCH64_INSN_SIZE // ip (callsite's BL insn)
add x1, sp, #S_LR // parent_ip (callsite's LR)
- ldr x2, [sp, #S_FRAME_SIZE] // parent fp (callsite's FP)
+ ldr x2, [sp, #PT_REGS_SIZE] // parent fp (callsite's FP)
bl prepare_ftrace_return
b ftrace_common_return
SYM_CODE_END(ftrace_graph_caller)
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index 2a93fa5f4e49..c9bae73f2621 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -75,7 +75,7 @@ alternative_else_nop_endif
.endif
#endif
- sub sp, sp, #S_FRAME_SIZE
+ sub sp, sp, #PT_REGS_SIZE
#ifdef CONFIG_VMAP_STACK
/*
* Test whether the SP has overflowed, without corrupting a GPR.
@@ -96,7 +96,7 @@ alternative_else_nop_endif
* userspace, and can clobber EL0 registers to free up GPRs.
*/
- /* Stash the original SP (minus S_FRAME_SIZE) in tpidr_el0. */
+ /* Stash the original SP (minus PT_REGS_SIZE) in tpidr_el0. */
msr tpidr_el0, x0
/* Recover the original x0 value and stash it in tpidrro_el0 */
@@ -182,7 +182,6 @@ alternative_else_nop_endif
mrs_s \tmp2, SYS_GCR_EL1
bfi \tmp2, \tmp, #0, #16
msr_s SYS_GCR_EL1, \tmp2
- isb
#endif
.endm
@@ -194,6 +193,7 @@ alternative_else_nop_endif
ldr_l \tmp, gcr_kernel_excl
mte_set_gcr \tmp, \tmp2
+ isb
1:
#endif
.endm
@@ -253,7 +253,7 @@ alternative_else_nop_endif
scs_load tsk, x20
.else
- add x21, sp, #S_FRAME_SIZE
+ add x21, sp, #PT_REGS_SIZE
get_current_task tsk
.endif /* \el == 0 */
mrs x22, elr_el1
@@ -377,7 +377,7 @@ alternative_else_nop_endif
ldp x26, x27, [sp, #16 * 13]
ldp x28, x29, [sp, #16 * 14]
ldr lr, [sp, #S_LR]
- add sp, sp, #S_FRAME_SIZE // restore sp
+ add sp, sp, #PT_REGS_SIZE // restore sp
.if \el == 0
alternative_insn eret, nop, ARM64_UNMAP_KERNEL_AT_EL0
@@ -580,12 +580,12 @@ __bad_stack:
/*
* Store the original GPRs to the new stack. The orginal SP (minus
- * S_FRAME_SIZE) was stashed in tpidr_el0 by kernel_ventry.
+ * PT_REGS_SIZE) was stashed in tpidr_el0 by kernel_ventry.
*/
- sub sp, sp, #S_FRAME_SIZE
+ sub sp, sp, #PT_REGS_SIZE
kernel_entry 1
mrs x0, tpidr_el0
- add x0, x0, #S_FRAME_SIZE
+ add x0, x0, #PT_REGS_SIZE
str x0, [sp, #S_SP]
/* Stash the regs for handle_bad_stack */
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
index 38bb07eff872..3605f77ad4df 100644
--- a/arch/arm64/kernel/perf_event.c
+++ b/arch/arm64/kernel/perf_event.c
@@ -23,8 +23,6 @@
#include <linux/platform_device.h>
#include <linux/sched_clock.h>
#include <linux/smp.h>
-#include <linux/nmi.h>
-#include <linux/cpufreq.h>
/* ARMv8 Cortex-A53 specific event types. */
#define ARMV8_A53_PERFCTR_PREF_LINEFILL 0xC2
@@ -1250,21 +1248,10 @@ static struct platform_driver armv8_pmu_driver = {
static int __init armv8_pmu_driver_init(void)
{
- int ret;
-
if (acpi_disabled)
- ret = platform_driver_register(&armv8_pmu_driver);
+ return platform_driver_register(&armv8_pmu_driver);
else
- ret = arm_pmu_acpi_probe(armv8_pmuv3_init);
-
- /*
- * Try to re-initialize lockup detector after PMU init in
- * case PMU events are triggered via NMIs.
- */
- if (ret == 0 && arm_pmu_irq_is_nmi())
- lockup_detector_init();
-
- return ret;
+ return arm_pmu_acpi_probe(armv8_pmuv3_init);
}
device_initcall(armv8_pmu_driver_init)
@@ -1322,27 +1309,3 @@ void arch_perf_update_userpage(struct perf_event *event,
userpg->cap_user_time_zero = 1;
userpg->cap_user_time_short = 1;
}
-
-#ifdef CONFIG_HARDLOCKUP_DETECTOR_PERF
-/*
- * Safe maximum CPU frequency in case a particular platform doesn't implement
- * cpufreq driver. Although, architecture doesn't put any restrictions on
- * maximum frequency but 5 GHz seems to be safe maximum given the available
- * Arm CPUs in the market which are clocked much less than 5 GHz. On the other
- * hand, we can't make it much higher as it would lead to a large hard-lockup
- * detection timeout on parts which are running slower (eg. 1GHz on
- * Developerbox) and doesn't possess a cpufreq driver.
- */
-#define SAFE_MAX_CPU_FREQ 5000000000UL // 5 GHz
-u64 hw_nmi_get_sample_period(int watchdog_thresh)
-{
- unsigned int cpu = smp_processor_id();
- unsigned long max_cpu_freq;
-
- max_cpu_freq = cpufreq_get_hw_max_freq(cpu) * 1000UL;
- if (!max_cpu_freq)
- max_cpu_freq = SAFE_MAX_CPU_FREQ;
-
- return (u64)max_cpu_freq * watchdog_thresh;
-}
-#endif
diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c
index 89c64ada8732..66aac2881ba8 100644
--- a/arch/arm64/kernel/probes/kprobes.c
+++ b/arch/arm64/kernel/probes/kprobes.c
@@ -352,8 +352,8 @@ kprobe_breakpoint_ss_handler(struct pt_regs *regs, unsigned int esr)
unsigned long addr = instruction_pointer(regs);
struct kprobe *cur = kprobe_running();
- if (cur && (kcb->kprobe_status == KPROBE_HIT_SS)
- && ((unsigned long)&cur->ainsn.api.insn[1] == addr)) {
+ if (cur && (kcb->kprobe_status & (KPROBE_HIT_SS | KPROBE_REENTER)) &&
+ ((unsigned long)&cur->ainsn.api.insn[1] == addr)) {
kprobes_restore_local_irqflag(kcb, regs);
post_kprobe_handler(cur, kcb, regs);
diff --git a/arch/arm64/kernel/probes/kprobes_trampoline.S b/arch/arm64/kernel/probes/kprobes_trampoline.S
index 890ca72c5a51..288a84e253cc 100644
--- a/arch/arm64/kernel/probes/kprobes_trampoline.S
+++ b/arch/arm64/kernel/probes/kprobes_trampoline.S
@@ -25,7 +25,7 @@
stp x24, x25, [sp, #S_X24]
stp x26, x27, [sp, #S_X26]
stp x28, x29, [sp, #S_X28]
- add x0, sp, #S_FRAME_SIZE
+ add x0, sp, #PT_REGS_SIZE
stp lr, x0, [sp, #S_LR]
/*
* Construct a useful saved PSTATE
@@ -62,7 +62,7 @@
.endm
SYM_CODE_START(kretprobe_trampoline)
- sub sp, sp, #S_FRAME_SIZE
+ sub sp, sp, #PT_REGS_SIZE
save_all_base_regs
@@ -76,7 +76,7 @@ SYM_CODE_START(kretprobe_trampoline)
restore_all_base_regs
- add sp, sp, #S_FRAME_SIZE
+ add sp, sp, #PT_REGS_SIZE
ret
SYM_CODE_END(kretprobe_trampoline)
diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c
index f71d6ce4673f..6237486ff6bb 100644
--- a/arch/arm64/kernel/signal.c
+++ b/arch/arm64/kernel/signal.c
@@ -914,13 +914,6 @@ static void do_signal(struct pt_regs *regs)
asmlinkage void do_notify_resume(struct pt_regs *regs,
unsigned long thread_flags)
{
- /*
- * The assembly code enters us with IRQs off, but it hasn't
- * informed the tracing code of that for efficiency reasons.
- * Update the trace code with the current status.
- */
- trace_hardirqs_off();
-
do {
if (thread_flags & _TIF_NEED_RESCHED) {
/* Unmask Debug and SError for the next task */
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index 6bc3a3698c3d..ad00f99ee9b0 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -434,7 +434,7 @@ static void __init hyp_mode_check(void)
"CPU: CPUs started in inconsistent modes");
else
pr_info("CPU: All CPU(s) started at EL1\n");
- if (IS_ENABLED(CONFIG_KVM))
+ if (IS_ENABLED(CONFIG_KVM) && !is_kernel_in_hyp_mode())
kvm_compute_layout();
}
@@ -807,7 +807,6 @@ int arch_show_interrupts(struct seq_file *p, int prec)
unsigned int cpu, i;
for (i = 0; i < NR_IPI; i++) {
- unsigned int irq = irq_desc_get_irq(ipi_desc[i]);
seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i,
prec >= 4 ? " " : "");
for_each_online_cpu(cpu)
diff --git a/arch/arm64/kernel/syscall.c b/arch/arm64/kernel/syscall.c
index f61e9d8cc55a..c2877c332f2d 100644
--- a/arch/arm64/kernel/syscall.c
+++ b/arch/arm64/kernel/syscall.c
@@ -9,6 +9,7 @@
#include <asm/daifflags.h>
#include <asm/debug-monitors.h>
+#include <asm/exception.h>
#include <asm/fpsimd.h>
#include <asm/syscall.h>
#include <asm/thread_info.h>
@@ -165,15 +166,8 @@ static void el0_svc_common(struct pt_regs *regs, int scno, int sc_nr,
if (!has_syscall_work(flags) && !IS_ENABLED(CONFIG_DEBUG_RSEQ)) {
local_daif_mask();
flags = current_thread_info()->flags;
- if (!has_syscall_work(flags) && !(flags & _TIF_SINGLESTEP)) {
- /*
- * We're off to userspace, where interrupts are
- * always enabled after we restore the flags from
- * the SPSR.
- */
- trace_hardirqs_on();
+ if (!has_syscall_work(flags) && !(flags & _TIF_SINGLESTEP))
return;
- }
local_daif_restore(DAIF_PROCCTX);
}
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index 08156be75569..6895ce777e7f 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -42,7 +42,6 @@
#include <asm/smp.h>
#include <asm/stack_pointer.h>
#include <asm/stacktrace.h>
-#include <asm/exception.h>
#include <asm/system_misc.h>
#include <asm/sysreg.h>
diff --git a/arch/arm64/kernel/vdso/Makefile b/arch/arm64/kernel/vdso/Makefile
index a8f8e409e2bf..cd9c3fa25902 100644
--- a/arch/arm64/kernel/vdso/Makefile
+++ b/arch/arm64/kernel/vdso/Makefile
@@ -24,8 +24,7 @@ btildflags-$(CONFIG_ARM64_BTI_KERNEL) += -z force-bti
# routines, as x86 does (see 6f121e548f83 ("x86, vdso: Reimplement vdso.so
# preparation in build-time C")).
ldflags-y := -shared -nostdlib -soname=linux-vdso.so.1 --hash-style=sysv \
- -Bsymbolic $(call ld-option, --no-eh-frame-hdr) --build-id=sha1 -n \
- $(btildflags-y) -T
+ -Bsymbolic --build-id=sha1 -n $(btildflags-y) -T
ccflags-y := -fno-common -fno-builtin -fno-stack-protector -ffixed-x18
ccflags-y += -DDISABLE_BRANCH_PROFILING -DBUILD_VDSO
diff --git a/arch/arm64/kernel/vdso/vdso.lds.S b/arch/arm64/kernel/vdso/vdso.lds.S
index d808ad31e01f..61dbb4c838ef 100644
--- a/arch/arm64/kernel/vdso/vdso.lds.S
+++ b/arch/arm64/kernel/vdso/vdso.lds.S
@@ -40,9 +40,6 @@ SECTIONS
PROVIDE (_etext = .);
PROVIDE (etext = .);
- .eh_frame_hdr : { *(.eh_frame_hdr) } :text :eh_frame_hdr
- .eh_frame : { KEEP (*(.eh_frame)) } :text
-
.dynamic : { *(.dynamic) } :text :dynamic
.rodata : { *(.rodata*) } :text
@@ -54,6 +51,7 @@ SECTIONS
*(.note.GNU-stack)
*(.data .data.* .gnu.linkonce.d.* .sdata*)
*(.bss .sbss .dynbss .dynsbss)
+ *(.eh_frame .eh_frame_hdr)
}
}
@@ -66,7 +64,6 @@ PHDRS
text PT_LOAD FLAGS(5) FILEHDR PHDRS; /* PF_R|PF_X */
dynamic PT_DYNAMIC FLAGS(4); /* PF_R */
note PT_NOTE FLAGS(4); /* PF_R */
- eh_frame_hdr PT_GNU_EH_FRAME;
}
/*
diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig
index 043756db8f6e..3964acf5451e 100644
--- a/arch/arm64/kvm/Kconfig
+++ b/arch/arm64/kvm/Kconfig
@@ -49,14 +49,6 @@ if KVM
source "virt/kvm/Kconfig"
-config KVM_ARM_PMU
- bool "Virtual Performance Monitoring Unit (PMU) support"
- depends on HW_PERF_EVENTS
- default y
- help
- Adds support for a virtual Performance Monitoring Unit (PMU) in
- virtual machines.
-
endif # KVM
endif # VIRTUALIZATION
diff --git a/arch/arm64/kvm/Makefile b/arch/arm64/kvm/Makefile
index 60fd181df624..13b017284bf9 100644
--- a/arch/arm64/kvm/Makefile
+++ b/arch/arm64/kvm/Makefile
@@ -24,4 +24,4 @@ kvm-y := $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o $(KVM)/eventfd.o \
vgic/vgic-mmio-v3.o vgic/vgic-kvm-device.o \
vgic/vgic-its.o vgic/vgic-debug.o
-kvm-$(CONFIG_KVM_ARM_PMU) += pmu-emul.o
+kvm-$(CONFIG_HW_PERF_EVENTS) += pmu-emul.o
diff --git a/arch/arm64/kvm/arch_timer.c b/arch/arm64/kvm/arch_timer.c
index 32ba6fbc3814..74e0699661e9 100644
--- a/arch/arm64/kvm/arch_timer.c
+++ b/arch/arm64/kvm/arch_timer.c
@@ -1129,9 +1129,10 @@ int kvm_timer_enable(struct kvm_vcpu *vcpu)
if (!irqchip_in_kernel(vcpu->kvm))
goto no_vgic;
- if (!vgic_initialized(vcpu->kvm))
- return -ENODEV;
-
+ /*
+ * At this stage, we have the guarantee that the vgic is both
+ * available and initialized.
+ */
if (!timer_irqs_are_valid(vcpu)) {
kvm_debug("incorrectly configured timer irqs\n");
return -EINVAL;
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index 6e637d2b4cfb..04c44853b103 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -65,10 +65,6 @@ static bool vgic_present;
static DEFINE_PER_CPU(unsigned char, kvm_arm_hardware_enabled);
DEFINE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
-extern u64 kvm_nvhe_sym(__cpu_logical_map)[NR_CPUS];
-extern u32 kvm_nvhe_sym(kvm_host_psci_version);
-extern struct psci_0_1_function_ids kvm_nvhe_sym(kvm_host_psci_0_1_function_ids);
-
int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
{
return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE;
@@ -584,11 +580,9 @@ static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
* Map the VGIC hardware resources before running a vcpu the
* first time on this VM.
*/
- if (unlikely(!vgic_ready(kvm))) {
- ret = kvm_vgic_map_resources(kvm);
- if (ret)
- return ret;
- }
+ ret = kvm_vgic_map_resources(kvm);
+ if (ret)
+ return ret;
} else {
/*
* Tell the rest of the code that there are userspace irqchip
@@ -1574,12 +1568,12 @@ static struct notifier_block hyp_init_cpu_pm_nb = {
.notifier_call = hyp_init_cpu_pm_notifier,
};
-static void __init hyp_cpu_pm_init(void)
+static void hyp_cpu_pm_init(void)
{
if (!is_protected_kvm_enabled())
cpu_pm_register_notifier(&hyp_init_cpu_pm_nb);
}
-static void __init hyp_cpu_pm_exit(void)
+static void hyp_cpu_pm_exit(void)
{
if (!is_protected_kvm_enabled())
cpu_pm_unregister_notifier(&hyp_init_cpu_pm_nb);
@@ -1604,9 +1598,12 @@ static void init_cpu_logical_map(void)
* allow any other CPUs from the `possible` set to boot.
*/
for_each_online_cpu(cpu)
- kvm_nvhe_sym(__cpu_logical_map)[cpu] = cpu_logical_map(cpu);
+ hyp_cpu_logical_map[cpu] = cpu_logical_map(cpu);
}
+#define init_psci_0_1_impl_state(config, what) \
+ config.psci_0_1_ ## what ## _implemented = psci_ops.what
+
static bool init_psci_relay(void)
{
/*
@@ -1618,8 +1615,15 @@ static bool init_psci_relay(void)
return false;
}
- kvm_nvhe_sym(kvm_host_psci_version) = psci_ops.get_version();
- kvm_nvhe_sym(kvm_host_psci_0_1_function_ids) = get_psci_0_1_function_ids();
+ kvm_host_psci_config.version = psci_ops.get_version();
+
+ if (kvm_host_psci_config.version == PSCI_VERSION(0, 1)) {
+ kvm_host_psci_config.function_ids_0_1 = get_psci_0_1_function_ids();
+ init_psci_0_1_impl_state(kvm_host_psci_config, cpu_suspend);
+ init_psci_0_1_impl_state(kvm_host_psci_config, cpu_on);
+ init_psci_0_1_impl_state(kvm_host_psci_config, cpu_off);
+ init_psci_0_1_impl_state(kvm_host_psci_config, migrate);
+ }
return true;
}
diff --git a/arch/arm64/kvm/hyp/include/hyp/adjust_pc.h b/arch/arm64/kvm/hyp/include/hyp/adjust_pc.h
index b1f60923a8fe..61716359035d 100644
--- a/arch/arm64/kvm/hyp/include/hyp/adjust_pc.h
+++ b/arch/arm64/kvm/hyp/include/hyp/adjust_pc.h
@@ -59,4 +59,13 @@ static inline void __adjust_pc(struct kvm_vcpu *vcpu)
}
}
+/*
+ * Skip an instruction while host sysregs are live.
+ * Assumes host is always 64-bit.
+ */
+static inline void kvm_skip_host_instr(void)
+{
+ write_sysreg_el2(read_sysreg_el2(SYS_ELR) + 4, SYS_ELR);
+}
+
#endif
diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-main.c b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
index bde658d51404..a906f9e2ff34 100644
--- a/arch/arm64/kvm/hyp/nvhe/hyp-main.c
+++ b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
@@ -157,11 +157,6 @@ static void default_host_smc_handler(struct kvm_cpu_context *host_ctxt)
__kvm_hyp_host_forward_smc(host_ctxt);
}
-static void skip_host_instruction(void)
-{
- write_sysreg_el2(read_sysreg_el2(SYS_ELR) + 4, SYS_ELR);
-}
-
static void handle_host_smc(struct kvm_cpu_context *host_ctxt)
{
bool handled;
@@ -170,11 +165,8 @@ static void handle_host_smc(struct kvm_cpu_context *host_ctxt)
if (!handled)
default_host_smc_handler(host_ctxt);
- /*
- * Unlike HVC, the return address of an SMC is the instruction's PC.
- * Move the return address past the instruction.
- */
- skip_host_instruction();
+ /* SMC was trapped, move ELR past the current PC. */
+ kvm_skip_host_instr();
}
void handle_trap(struct kvm_cpu_context *host_ctxt)
diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-smp.c b/arch/arm64/kvm/hyp/nvhe/hyp-smp.c
index cbab0c6246e2..2997aa156d8e 100644
--- a/arch/arm64/kvm/hyp/nvhe/hyp-smp.c
+++ b/arch/arm64/kvm/hyp/nvhe/hyp-smp.c
@@ -14,14 +14,14 @@
* Other CPUs should not be allowed to boot because their features were
* not checked against the finalized system capabilities.
*/
-u64 __ro_after_init __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = INVALID_HWID };
+u64 __ro_after_init hyp_cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = INVALID_HWID };
u64 cpu_logical_map(unsigned int cpu)
{
- if (cpu >= ARRAY_SIZE(__cpu_logical_map))
+ if (cpu >= ARRAY_SIZE(hyp_cpu_logical_map))
hyp_panic();
- return __cpu_logical_map[cpu];
+ return hyp_cpu_logical_map[cpu];
}
unsigned long __hyp_per_cpu_offset(unsigned int cpu)
diff --git a/arch/arm64/kvm/hyp/nvhe/psci-relay.c b/arch/arm64/kvm/hyp/nvhe/psci-relay.c
index 08dc9de69314..e3947846ffcb 100644
--- a/arch/arm64/kvm/hyp/nvhe/psci-relay.c
+++ b/arch/arm64/kvm/hyp/nvhe/psci-relay.c
@@ -7,11 +7,8 @@
#include <asm/kvm_asm.h>
#include <asm/kvm_hyp.h>
#include <asm/kvm_mmu.h>
-#include <kvm/arm_hypercalls.h>
#include <linux/arm-smccc.h>
#include <linux/kvm_host.h>
-#include <linux/psci.h>
-#include <kvm/arm_psci.h>
#include <uapi/linux/psci.h>
#include <nvhe/trap_handler.h>
@@ -22,9 +19,8 @@ void kvm_hyp_cpu_resume(unsigned long r0);
void __noreturn __host_enter(struct kvm_cpu_context *host_ctxt);
/* Config options set by the host. */
-__ro_after_init u32 kvm_host_psci_version;
-__ro_after_init struct psci_0_1_function_ids kvm_host_psci_0_1_function_ids;
-__ro_after_init s64 hyp_physvirt_offset;
+struct kvm_host_psci_config __ro_after_init kvm_host_psci_config;
+s64 __ro_after_init hyp_physvirt_offset;
#define __hyp_pa(x) ((phys_addr_t)((x)) + hyp_physvirt_offset)
@@ -47,19 +43,16 @@ struct psci_boot_args {
static DEFINE_PER_CPU(struct psci_boot_args, cpu_on_args) = PSCI_BOOT_ARGS_INIT;
static DEFINE_PER_CPU(struct psci_boot_args, suspend_args) = PSCI_BOOT_ARGS_INIT;
-static u64 get_psci_func_id(struct kvm_cpu_context *host_ctxt)
-{
- DECLARE_REG(u64, func_id, host_ctxt, 0);
-
- return func_id;
-}
+#define is_psci_0_1(what, func_id) \
+ (kvm_host_psci_config.psci_0_1_ ## what ## _implemented && \
+ (func_id) == kvm_host_psci_config.function_ids_0_1.what)
static bool is_psci_0_1_call(u64 func_id)
{
- return (func_id == kvm_host_psci_0_1_function_ids.cpu_suspend) ||
- (func_id == kvm_host_psci_0_1_function_ids.cpu_on) ||
- (func_id == kvm_host_psci_0_1_function_ids.cpu_off) ||
- (func_id == kvm_host_psci_0_1_function_ids.migrate);
+ return (is_psci_0_1(cpu_suspend, func_id) ||
+ is_psci_0_1(cpu_on, func_id) ||
+ is_psci_0_1(cpu_off, func_id) ||
+ is_psci_0_1(migrate, func_id));
}
static bool is_psci_0_2_call(u64 func_id)
@@ -69,16 +62,6 @@ static bool is_psci_0_2_call(u64 func_id)
(PSCI_0_2_FN64(0) <= func_id && func_id <= PSCI_0_2_FN64(31));
}
-static bool is_psci_call(u64 func_id)
-{
- switch (kvm_host_psci_version) {
- case PSCI_VERSION(0, 1):
- return is_psci_0_1_call(func_id);
- default:
- return is_psci_0_2_call(func_id);
- }
-}
-
static unsigned long psci_call(unsigned long fn, unsigned long arg0,
unsigned long arg1, unsigned long arg2)
{
@@ -248,15 +231,14 @@ asmlinkage void __noreturn kvm_host_psci_cpu_entry(bool is_cpu_on)
static unsigned long psci_0_1_handler(u64 func_id, struct kvm_cpu_context *host_ctxt)
{
- if ((func_id == kvm_host_psci_0_1_function_ids.cpu_off) ||
- (func_id == kvm_host_psci_0_1_function_ids.migrate))
+ if (is_psci_0_1(cpu_off, func_id) || is_psci_0_1(migrate, func_id))
return psci_forward(host_ctxt);
- else if (func_id == kvm_host_psci_0_1_function_ids.cpu_on)
+ if (is_psci_0_1(cpu_on, func_id))
return psci_cpu_on(func_id, host_ctxt);
- else if (func_id == kvm_host_psci_0_1_function_ids.cpu_suspend)
+ if (is_psci_0_1(cpu_suspend, func_id))
return psci_cpu_suspend(func_id, host_ctxt);
- else
- return PSCI_RET_NOT_SUPPORTED;
+
+ return PSCI_RET_NOT_SUPPORTED;
}
static unsigned long psci_0_2_handler(u64 func_id, struct kvm_cpu_context *host_ctxt)
@@ -298,20 +280,23 @@ static unsigned long psci_1_0_handler(u64 func_id, struct kvm_cpu_context *host_
bool kvm_host_psci_handler(struct kvm_cpu_context *host_ctxt)
{
- u64 func_id = get_psci_func_id(host_ctxt);
+ DECLARE_REG(u64, func_id, host_ctxt, 0);
unsigned long ret;
- if (!is_psci_call(func_id))
- return false;
-
- switch (kvm_host_psci_version) {
+ switch (kvm_host_psci_config.version) {
case PSCI_VERSION(0, 1):
+ if (!is_psci_0_1_call(func_id))
+ return false;
ret = psci_0_1_handler(func_id, host_ctxt);
break;
case PSCI_VERSION(0, 2):
+ if (!is_psci_0_2_call(func_id))
+ return false;
ret = psci_0_2_handler(func_id, host_ctxt);
break;
default:
+ if (!is_psci_0_2_call(func_id))
+ return false;
ret = psci_1_0_handler(func_id, host_ctxt);
break;
}
diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c
index 398f6df1bbe4..4ad66a532e38 100644
--- a/arch/arm64/kvm/pmu-emul.c
+++ b/arch/arm64/kvm/pmu-emul.c
@@ -850,8 +850,6 @@ int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu)
return -EINVAL;
}
- kvm_pmu_vcpu_reset(vcpu);
-
return 0;
}
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index 3313dedfa505..42ccc27fb684 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -594,6 +594,10 @@ static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
{
u64 pmcr, val;
+ /* No PMU available, PMCR_EL0 may UNDEF... */
+ if (!kvm_arm_support_pmu_v3())
+ return;
+
pmcr = read_sysreg(pmcr_el0);
/*
* Writable bits of PMCR_EL0 (ARMV8_PMU_PMCR_MASK) are reset to UNKNOWN
@@ -919,7 +923,7 @@ static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
#define reg_to_encoding(x) \
sys_reg((u32)(x)->Op0, (u32)(x)->Op1, \
- (u32)(x)->CRn, (u32)(x)->CRm, (u32)(x)->Op2);
+ (u32)(x)->CRn, (u32)(x)->CRm, (u32)(x)->Op2)
/* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */
#define DBG_BCR_BVR_WCR_WVR_EL1(n) \
diff --git a/arch/arm64/kvm/va_layout.c b/arch/arm64/kvm/va_layout.c
index d8cc51bd60bf..70fcd6a12fe1 100644
--- a/arch/arm64/kvm/va_layout.c
+++ b/arch/arm64/kvm/va_layout.c
@@ -34,17 +34,16 @@ static u64 __early_kern_hyp_va(u64 addr)
}
/*
- * Store a hyp VA <-> PA offset into a hyp-owned variable.
+ * Store a hyp VA <-> PA offset into a EL2-owned variable.
*/
static void init_hyp_physvirt_offset(void)
{
- extern s64 kvm_nvhe_sym(hyp_physvirt_offset);
u64 kern_va, hyp_va;
/* Compute the offset from the hyp VA and PA of a random symbol. */
- kern_va = (u64)kvm_ksym_ref(__hyp_text_start);
+ kern_va = (u64)lm_alias(__hyp_text_start);
hyp_va = __early_kern_hyp_va(kern_va);
- CHOOSE_NVHE_SYM(hyp_physvirt_offset) = (s64)__pa(kern_va) - (s64)hyp_va;
+ hyp_physvirt_offset = (s64)__pa(kern_va) - (s64)hyp_va;
}
/*
diff --git a/arch/arm64/kvm/vgic/vgic-init.c b/arch/arm64/kvm/vgic/vgic-init.c
index 32e32d67a127..052917deb149 100644
--- a/arch/arm64/kvm/vgic/vgic-init.c
+++ b/arch/arm64/kvm/vgic/vgic-init.c
@@ -419,7 +419,8 @@ int vgic_lazy_init(struct kvm *kvm)
* Map the MMIO regions depending on the VGIC model exposed to the guest
* called on the first VCPU run.
* Also map the virtual CPU interface into the VM.
- * v2/v3 derivatives call vgic_init if not already done.
+ * v2 calls vgic_init() if not already done.
+ * v3 and derivatives return an error if the VGIC is not initialized.
* vgic_ready() returns true if this function has succeeded.
* @kvm: kvm struct pointer
*/
@@ -428,7 +429,13 @@ int kvm_vgic_map_resources(struct kvm *kvm)
struct vgic_dist *dist = &kvm->arch.vgic;
int ret = 0;
+ if (likely(vgic_ready(kvm)))
+ return 0;
+
mutex_lock(&kvm->lock);
+ if (vgic_ready(kvm))
+ goto out;
+
if (!irqchip_in_kernel(kvm))
goto out;
@@ -439,6 +446,8 @@ int kvm_vgic_map_resources(struct kvm *kvm)
if (ret)
__kvm_vgic_destroy(kvm);
+ else
+ dist->ready = true;
out:
mutex_unlock(&kvm->lock);
diff --git a/arch/arm64/kvm/vgic/vgic-v2.c b/arch/arm64/kvm/vgic/vgic-v2.c
index ebf53a4e1296..11934c2af2f4 100644
--- a/arch/arm64/kvm/vgic/vgic-v2.c
+++ b/arch/arm64/kvm/vgic/vgic-v2.c
@@ -306,20 +306,15 @@ int vgic_v2_map_resources(struct kvm *kvm)
struct vgic_dist *dist = &kvm->arch.vgic;
int ret = 0;
- if (vgic_ready(kvm))
- goto out;
-
if (IS_VGIC_ADDR_UNDEF(dist->vgic_dist_base) ||
IS_VGIC_ADDR_UNDEF(dist->vgic_cpu_base)) {
kvm_err("Need to set vgic cpu and dist addresses first\n");
- ret = -ENXIO;
- goto out;
+ return -ENXIO;
}
if (!vgic_v2_check_base(dist->vgic_dist_base, dist->vgic_cpu_base)) {
kvm_err("VGIC CPU and dist frames overlap\n");
- ret = -EINVAL;
- goto out;
+ return -EINVAL;
}
/*
@@ -329,13 +324,13 @@ int vgic_v2_map_resources(struct kvm *kvm)
ret = vgic_init(kvm);
if (ret) {
kvm_err("Unable to initialize VGIC dynamic data structures\n");
- goto out;
+ return ret;
}
ret = vgic_register_dist_iodev(kvm, dist->vgic_dist_base, VGIC_V2);
if (ret) {
kvm_err("Unable to register VGIC MMIO regions\n");
- goto out;
+ return ret;
}
if (!static_branch_unlikely(&vgic_v2_cpuif_trap)) {
@@ -344,14 +339,11 @@ int vgic_v2_map_resources(struct kvm *kvm)
KVM_VGIC_V2_CPU_SIZE, true);
if (ret) {
kvm_err("Unable to remap VGIC CPU to VCPU\n");
- goto out;
+ return ret;
}
}
- dist->ready = true;
-
-out:
- return ret;
+ return 0;
}
DEFINE_STATIC_KEY_FALSE(vgic_v2_cpuif_trap);
diff --git a/arch/arm64/kvm/vgic/vgic-v3.c b/arch/arm64/kvm/vgic/vgic-v3.c
index 9cdf39a94a63..52915b342351 100644
--- a/arch/arm64/kvm/vgic/vgic-v3.c
+++ b/arch/arm64/kvm/vgic/vgic-v3.c
@@ -500,29 +500,23 @@ int vgic_v3_map_resources(struct kvm *kvm)
int ret = 0;
int c;
- if (vgic_ready(kvm))
- goto out;
-
kvm_for_each_vcpu(c, vcpu, kvm) {
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
if (IS_VGIC_ADDR_UNDEF(vgic_cpu->rd_iodev.base_addr)) {
kvm_debug("vcpu %d redistributor base not set\n", c);
- ret = -ENXIO;
- goto out;
+ return -ENXIO;
}
}
if (IS_VGIC_ADDR_UNDEF(dist->vgic_dist_base)) {
kvm_err("Need to set vgic distributor addresses first\n");
- ret = -ENXIO;
- goto out;
+ return -ENXIO;
}
if (!vgic_v3_check_base(kvm)) {
kvm_err("VGIC redist and dist frames overlap\n");
- ret = -EINVAL;
- goto out;
+ return -EINVAL;
}
/*
@@ -530,22 +524,19 @@ int vgic_v3_map_resources(struct kvm *kvm)
* the VGIC before we need to use it.
*/
if (!vgic_initialized(kvm)) {
- ret = -EBUSY;
- goto out;
+ return -EBUSY;
}
ret = vgic_register_dist_iodev(kvm, dist->vgic_dist_base, VGIC_V3);
if (ret) {
kvm_err("Unable to register VGICv3 dist MMIO regions\n");
- goto out;
+ return ret;
}
if (kvm_vgic_global_state.has_gicv4_1)
vgic_v4_configure_vsgis(kvm);
- dist->ready = true;
-out:
- return ret;
+ return 0;
}
DEFINE_STATIC_KEY_FALSE(vgic_v3_cpuif_trap);
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 3c40da479899..35d75c60e2b8 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -709,10 +709,11 @@ static int do_tag_check_fault(unsigned long far, unsigned int esr,
struct pt_regs *regs)
{
/*
- * The architecture specifies that bits 63:60 of FAR_EL1 are UNKNOWN for tag
- * check faults. Mask them out now so that userspace doesn't see them.
+ * The architecture specifies that bits 63:60 of FAR_EL1 are UNKNOWN
+ * for tag check faults. Set them to corresponding bits in the untagged
+ * address.
*/
- far &= (1UL << 60) - 1;
+ far = (__untagged_addr(far) & ~MTE_TAG_MASK) | (far & MTE_TAG_MASK);
do_bad_area(far, esr, regs);
return 0;
}
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 75addb36354a..709d98fea90c 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -53,13 +53,13 @@ s64 memstart_addr __ro_after_init = -1;
EXPORT_SYMBOL(memstart_addr);
/*
- * We create both ZONE_DMA and ZONE_DMA32. ZONE_DMA covers the first 1G of
- * memory as some devices, namely the Raspberry Pi 4, have peripherals with
- * this limited view of the memory. ZONE_DMA32 will cover the rest of the 32
- * bit addressable memory area.
+ * If the corresponding config options are enabled, we create both ZONE_DMA
+ * and ZONE_DMA32. By default ZONE_DMA covers the 32-bit addressable memory
+ * unless restricted on specific platforms (e.g. 30-bit on Raspberry Pi 4).
+ * In such case, ZONE_DMA32 covers the rest of the 32-bit addressable memory,
+ * otherwise it is empty.
*/
phys_addr_t arm64_dma_phys_limit __ro_after_init;
-static phys_addr_t arm64_dma32_phys_limit __ro_after_init;
#ifdef CONFIG_KEXEC_CORE
/*
@@ -84,7 +84,7 @@ static void __init reserve_crashkernel(void)
if (crash_base == 0) {
/* Current arm64 boot protocol requires 2MB alignment */
- crash_base = memblock_find_in_range(0, arm64_dma32_phys_limit,
+ crash_base = memblock_find_in_range(0, arm64_dma_phys_limit,
crash_size, SZ_2M);
if (crash_base == 0) {
pr_warn("cannot allocate crashkernel (size:0x%llx)\n",
@@ -196,6 +196,7 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max)
unsigned long max_zone_pfns[MAX_NR_ZONES] = {0};
unsigned int __maybe_unused acpi_zone_dma_bits;
unsigned int __maybe_unused dt_zone_dma_bits;
+ phys_addr_t __maybe_unused dma32_phys_limit = max_zone_phys(32);
#ifdef CONFIG_ZONE_DMA
acpi_zone_dma_bits = fls64(acpi_iort_dma_get_max_cpu_address());
@@ -205,8 +206,12 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max)
max_zone_pfns[ZONE_DMA] = PFN_DOWN(arm64_dma_phys_limit);
#endif
#ifdef CONFIG_ZONE_DMA32
- max_zone_pfns[ZONE_DMA32] = PFN_DOWN(arm64_dma32_phys_limit);
+ max_zone_pfns[ZONE_DMA32] = PFN_DOWN(dma32_phys_limit);
+ if (!arm64_dma_phys_limit)
+ arm64_dma_phys_limit = dma32_phys_limit;
#endif
+ if (!arm64_dma_phys_limit)
+ arm64_dma_phys_limit = PHYS_MASK + 1;
max_zone_pfns[ZONE_NORMAL] = max;
free_area_init(max_zone_pfns);
@@ -394,16 +399,9 @@ void __init arm64_memblock_init(void)
early_init_fdt_scan_reserved_mem();
- if (IS_ENABLED(CONFIG_ZONE_DMA32))
- arm64_dma32_phys_limit = max_zone_phys(32);
- else
- arm64_dma32_phys_limit = PHYS_MASK + 1;
-
reserve_elfcorehdr();
high_memory = __va(memblock_end_of_DRAM() - 1) + 1;
-
- dma_contiguous_reserve(arm64_dma32_phys_limit);
}
void __init bootmem_init(void)
@@ -439,6 +437,11 @@ void __init bootmem_init(void)
zone_sizes_init(min, max);
/*
+ * Reserve the CMA area after arm64_dma_phys_limit was initialised.
+ */
+ dma_contiguous_reserve(arm64_dma_phys_limit);
+
+ /*
* request_standard_resources() depends on crashkernel's memory being
* reserved, so do it here.
*/
@@ -455,7 +458,7 @@ void __init bootmem_init(void)
void __init mem_init(void)
{
if (swiotlb_force == SWIOTLB_FORCE ||
- max_pfn > PFN_DOWN(arm64_dma_phys_limit ? : arm64_dma32_phys_limit))
+ max_pfn > PFN_DOWN(arm64_dma_phys_limit))
swiotlb_init(1);
else
swiotlb_force = SWIOTLB_NO_FORCE;
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index 37a54b57178a..1f7ee8c8b7b8 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -46,7 +46,7 @@
#endif
#ifdef CONFIG_KASAN_HW_TAGS
-#define TCR_KASAN_HW_FLAGS SYS_TCR_EL1_TCMA1 | TCR_TBI1
+#define TCR_KASAN_HW_FLAGS SYS_TCR_EL1_TCMA1 | TCR_TBI1 | TCR_TBID1
#else
#define TCR_KASAN_HW_FLAGS 0
#endif
diff --git a/arch/csky/include/asm/Kbuild b/arch/csky/include/asm/Kbuild
index 93372255984d..cc24bb8e539f 100644
--- a/arch/csky/include/asm/Kbuild
+++ b/arch/csky/include/asm/Kbuild
@@ -2,7 +2,6 @@
generic-y += asm-offsets.h
generic-y += gpio.h
generic-y += kvm_para.h
-generic-y += local64.h
generic-y += qrwlock.h
generic-y += user.h
generic-y += vmlinux.lds.h
diff --git a/arch/h8300/include/asm/Kbuild b/arch/h8300/include/asm/Kbuild
index ddf04f32b546..60ee7f0d60a8 100644
--- a/arch/h8300/include/asm/Kbuild
+++ b/arch/h8300/include/asm/Kbuild
@@ -2,7 +2,6 @@
generic-y += asm-offsets.h
generic-y += extable.h
generic-y += kvm_para.h
-generic-y += local64.h
generic-y += mcs_spinlock.h
generic-y += parport.h
generic-y += spinlock.h
diff --git a/arch/hexagon/include/asm/Kbuild b/arch/hexagon/include/asm/Kbuild
index 373964bb177e..3ece3c93fe08 100644
--- a/arch/hexagon/include/asm/Kbuild
+++ b/arch/hexagon/include/asm/Kbuild
@@ -2,5 +2,4 @@
generic-y += extable.h
generic-y += iomap.h
generic-y += kvm_para.h
-generic-y += local64.h
generic-y += mcs_spinlock.h
diff --git a/arch/ia64/include/asm/local64.h b/arch/ia64/include/asm/local64.h
deleted file mode 100644
index 36c93b5cc239..000000000000
--- a/arch/ia64/include/asm/local64.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/local64.h>
diff --git a/arch/ia64/include/asm/sparsemem.h b/arch/ia64/include/asm/sparsemem.h
index dd8c166ffd7b..42ed5248fae9 100644
--- a/arch/ia64/include/asm/sparsemem.h
+++ b/arch/ia64/include/asm/sparsemem.h
@@ -3,6 +3,7 @@
#define _ASM_IA64_SPARSEMEM_H
#ifdef CONFIG_SPARSEMEM
+#include <asm/page.h>
/*
* SECTION_SIZE_BITS 2^N: how big each section will be
* MAX_PHYSMEM_BITS 2^N: how much memory we can have in that space
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index 9b5acf8fb092..e76386a3479e 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -536,7 +536,7 @@ virtual_memmap_init(u64 start, u64 end, void *arg)
if (map_start < map_end)
memmap_init_zone((unsigned long)(map_end - map_start),
- args->nid, args->zone, page_to_pfn(map_start),
+ args->nid, args->zone, page_to_pfn(map_start), page_to_pfn(map_end),
MEMINIT_EARLY, NULL, MIGRATE_MOVABLE);
return 0;
}
@@ -546,7 +546,7 @@ memmap_init (unsigned long size, int nid, unsigned long zone,
unsigned long start_pfn)
{
if (!vmem_map) {
- memmap_init_zone(size, nid, zone, start_pfn,
+ memmap_init_zone(size, nid, zone, start_pfn, start_pfn + size,
MEMINIT_EARLY, NULL, MIGRATE_MOVABLE);
} else {
struct page *start;
diff --git a/arch/m68k/include/asm/Kbuild b/arch/m68k/include/asm/Kbuild
index 1bff55aa2d54..0dbf9c5c6fae 100644
--- a/arch/m68k/include/asm/Kbuild
+++ b/arch/m68k/include/asm/Kbuild
@@ -2,6 +2,5 @@
generated-y += syscall_table.h
generic-y += extable.h
generic-y += kvm_para.h
-generic-y += local64.h
generic-y += mcs_spinlock.h
generic-y += spinlock.h
diff --git a/arch/microblaze/include/asm/Kbuild b/arch/microblaze/include/asm/Kbuild
index 63bce836b9f1..29b0e557aa7c 100644
--- a/arch/microblaze/include/asm/Kbuild
+++ b/arch/microblaze/include/asm/Kbuild
@@ -2,7 +2,6 @@
generated-y += syscall_table.h
generic-y += extable.h
generic-y += kvm_para.h
-generic-y += local64.h
generic-y += mcs_spinlock.h
generic-y += parport.h
generic-y += syscalls.h
diff --git a/arch/mips/boot/compressed/decompress.c b/arch/mips/boot/compressed/decompress.c
index c61c641674e6..e3946b06e840 100644
--- a/arch/mips/boot/compressed/decompress.c
+++ b/arch/mips/boot/compressed/decompress.c
@@ -13,6 +13,7 @@
#include <linux/libfdt.h>
#include <asm/addrspace.h>
+#include <asm/unaligned.h>
/*
* These two variables specify the free mem region
@@ -117,7 +118,7 @@ void decompress_kernel(unsigned long boot_heap_start)
dtb_size = fdt_totalsize((void *)&__appended_dtb);
/* last four bytes is always image size in little endian */
- image_size = le32_to_cpup((void *)&__image_end - 4);
+ image_size = get_unaligned_le32((void *)&__image_end - 4);
/* copy dtb to where the booted kernel will expect it */
memcpy((void *)VMLINUX_LOAD_ADDRESS_ULL + image_size,
diff --git a/arch/mips/cavium-octeon/octeon-irq.c b/arch/mips/cavium-octeon/octeon-irq.c
index bd47e15d02c7..be5d4afcd30f 100644
--- a/arch/mips/cavium-octeon/octeon-irq.c
+++ b/arch/mips/cavium-octeon/octeon-irq.c
@@ -1444,7 +1444,7 @@ static void octeon_irq_setup_secondary_ciu2(void)
static int __init octeon_irq_init_ciu(
struct device_node *ciu_node, struct device_node *parent)
{
- unsigned int i, r;
+ int i, r;
struct irq_chip *chip;
struct irq_chip *chip_edge;
struct irq_chip *chip_mbox;
diff --git a/arch/mips/include/asm/Kbuild b/arch/mips/include/asm/Kbuild
index 198b3bafdac9..95b4fa7bd0d1 100644
--- a/arch/mips/include/asm/Kbuild
+++ b/arch/mips/include/asm/Kbuild
@@ -6,7 +6,6 @@ generated-y += syscall_table_64_n64.h
generated-y += syscall_table_64_o32.h
generic-y += export.h
generic-y += kvm_para.h
-generic-y += local64.h
generic-y += mcs_spinlock.h
generic-y += parport.h
generic-y += qrwlock.h
diff --git a/arch/mips/include/asm/highmem.h b/arch/mips/include/asm/highmem.h
index 19edf8e69971..292d0425717f 100644
--- a/arch/mips/include/asm/highmem.h
+++ b/arch/mips/include/asm/highmem.h
@@ -51,6 +51,7 @@ extern void kmap_flush_tlb(unsigned long addr);
#define flush_cache_kmaps() BUG_ON(cpu_has_dc_aliases)
+#define arch_kmap_local_set_pte(mm, vaddr, ptep, ptev) set_pte(ptep, ptev)
#define arch_kmap_local_post_map(vaddr, pteval) local_flush_tlb_one(vaddr)
#define arch_kmap_local_post_unmap(vaddr) local_flush_tlb_one(vaddr)
diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
index 6ee3f7218c67..c4441416e96b 100644
--- a/arch/mips/kernel/binfmt_elfn32.c
+++ b/arch/mips/kernel/binfmt_elfn32.c
@@ -103,4 +103,11 @@ jiffies_to_old_timeval32(unsigned long jiffies, struct old_timeval32 *value)
#undef ns_to_kernel_old_timeval
#define ns_to_kernel_old_timeval ns_to_old_timeval32
+/*
+ * Some data types as stored in coredump.
+ */
+#define user_long_t compat_long_t
+#define user_siginfo_t compat_siginfo_t
+#define copy_siginfo_to_external copy_siginfo_to_external32
+
#include "../../../fs/binfmt_elf.c"
diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
index 6dd103d3cebb..7b2a23f48c1a 100644
--- a/arch/mips/kernel/binfmt_elfo32.c
+++ b/arch/mips/kernel/binfmt_elfo32.c
@@ -106,4 +106,11 @@ jiffies_to_old_timeval32(unsigned long jiffies, struct old_timeval32 *value)
#undef ns_to_kernel_old_timeval
#define ns_to_kernel_old_timeval ns_to_old_timeval32
+/*
+ * Some data types as stored in coredump.
+ */
+#define user_long_t compat_long_t
+#define user_siginfo_t compat_siginfo_t
+#define copy_siginfo_to_external copy_siginfo_to_external32
+
#include "../../../fs/binfmt_elf.c"
diff --git a/arch/mips/kernel/relocate.c b/arch/mips/kernel/relocate.c
index 47aeb3350a76..0e365b7c742d 100644
--- a/arch/mips/kernel/relocate.c
+++ b/arch/mips/kernel/relocate.c
@@ -187,8 +187,14 @@ static int __init relocate_exception_table(long offset)
static inline __init unsigned long rotate_xor(unsigned long hash,
const void *area, size_t size)
{
- size_t i;
- unsigned long *ptr = (unsigned long *)area;
+ const typeof(hash) *ptr = PTR_ALIGN(area, sizeof(hash));
+ size_t diff, i;
+
+ diff = (void *)ptr - area;
+ if (unlikely(size < diff + sizeof(hash)))
+ return hash;
+
+ size = ALIGN_DOWN(size - diff, sizeof(hash));
for (i = 0; i < size / sizeof(hash); i++) {
/* Rotate by odd number of bits and XOR. */
diff --git a/arch/nds32/include/asm/Kbuild b/arch/nds32/include/asm/Kbuild
index ff1e94299317..82a4453c9c2d 100644
--- a/arch/nds32/include/asm/Kbuild
+++ b/arch/nds32/include/asm/Kbuild
@@ -4,6 +4,5 @@ generic-y += cmpxchg.h
generic-y += export.h
generic-y += gpio.h
generic-y += kvm_para.h
-generic-y += local64.h
generic-y += parport.h
generic-y += user.h
diff --git a/arch/openrisc/include/asm/Kbuild b/arch/openrisc/include/asm/Kbuild
index 442f3d3bcd90..ca5987e11053 100644
--- a/arch/openrisc/include/asm/Kbuild
+++ b/arch/openrisc/include/asm/Kbuild
@@ -1,7 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
generic-y += extable.h
generic-y += kvm_para.h
-generic-y += local64.h
generic-y += mcs_spinlock.h
generic-y += qspinlock_types.h
generic-y += qspinlock.h
diff --git a/arch/openrisc/include/asm/io.h b/arch/openrisc/include/asm/io.h
index 7d6b4a77b379..c298061c70a7 100644
--- a/arch/openrisc/include/asm/io.h
+++ b/arch/openrisc/include/asm/io.h
@@ -31,7 +31,7 @@
void __iomem *ioremap(phys_addr_t offset, unsigned long size);
#define iounmap iounmap
-extern void iounmap(void *addr);
+extern void iounmap(void __iomem *addr);
#include <asm-generic/io.h>
diff --git a/arch/openrisc/mm/ioremap.c b/arch/openrisc/mm/ioremap.c
index 5aed97a18bac..daae13a76743 100644
--- a/arch/openrisc/mm/ioremap.c
+++ b/arch/openrisc/mm/ioremap.c
@@ -77,7 +77,7 @@ void __iomem *__ref ioremap(phys_addr_t addr, unsigned long size)
}
EXPORT_SYMBOL(ioremap);
-void iounmap(void *addr)
+void iounmap(void __iomem *addr)
{
/* If the page is from the fixmap pool then we just clear out
* the fixmap mapping.
diff --git a/arch/parisc/include/asm/Kbuild b/arch/parisc/include/asm/Kbuild
index f16c4db80116..4406475a2304 100644
--- a/arch/parisc/include/asm/Kbuild
+++ b/arch/parisc/include/asm/Kbuild
@@ -3,6 +3,5 @@ generated-y += syscall_table_32.h
generated-y += syscall_table_64.h
generated-y += syscall_table_c32.h
generic-y += kvm_para.h
-generic-y += local64.h
generic-y += mcs_spinlock.h
generic-y += user.h
diff --git a/arch/powerpc/include/asm/Kbuild b/arch/powerpc/include/asm/Kbuild
index 90cd5c53af66..e1f9b4ea1c53 100644
--- a/arch/powerpc/include/asm/Kbuild
+++ b/arch/powerpc/include/asm/Kbuild
@@ -5,7 +5,6 @@ generated-y += syscall_table_c32.h
generated-y += syscall_table_spu.h
generic-y += export.h
generic-y += kvm_types.h
-generic-y += local64.h
generic-y += mcs_spinlock.h
generic-y += qrwlock.h
generic-y += vtime.h
diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h
index 1d32b174ab6a..c1a8aac01cf9 100644
--- a/arch/powerpc/include/asm/exception-64s.h
+++ b/arch/powerpc/include/asm/exception-64s.h
@@ -63,6 +63,12 @@
nop; \
nop;
+#define SCV_ENTRY_FLUSH_SLOT \
+ SCV_ENTRY_FLUSH_FIXUP_SECTION; \
+ nop; \
+ nop; \
+ nop;
+
/*
* r10 must be free to use, r13 must be paca
*/
@@ -71,6 +77,13 @@
ENTRY_FLUSH_SLOT
/*
+ * r10, ctr must be free to use, r13 must be paca
+ */
+#define SCV_INTERRUPT_TO_KERNEL \
+ STF_ENTRY_BARRIER_SLOT; \
+ SCV_ENTRY_FLUSH_SLOT
+
+/*
* Macros for annotating the expected destination of (h)rfid
*
* The nop instructions allow us to insert one or more instructions to flush the
diff --git a/arch/powerpc/include/asm/feature-fixups.h b/arch/powerpc/include/asm/feature-fixups.h
index f6d2acb57425..ac605fc369c4 100644
--- a/arch/powerpc/include/asm/feature-fixups.h
+++ b/arch/powerpc/include/asm/feature-fixups.h
@@ -240,6 +240,14 @@ label##3: \
FTR_ENTRY_OFFSET 957b-958b; \
.popsection;
+#define SCV_ENTRY_FLUSH_FIXUP_SECTION \
+957: \
+ .pushsection __scv_entry_flush_fixup,"a"; \
+ .align 2; \
+958: \
+ FTR_ENTRY_OFFSET 957b-958b; \
+ .popsection;
+
#define RFI_FLUSH_FIXUP_SECTION \
951: \
.pushsection __rfi_flush_fixup,"a"; \
@@ -273,10 +281,12 @@ label##3: \
extern long stf_barrier_fallback;
extern long entry_flush_fallback;
+extern long scv_entry_flush_fallback;
extern long __start___stf_entry_barrier_fixup, __stop___stf_entry_barrier_fixup;
extern long __start___stf_exit_barrier_fixup, __stop___stf_exit_barrier_fixup;
extern long __start___uaccess_flush_fixup, __stop___uaccess_flush_fixup;
extern long __start___entry_flush_fixup, __stop___entry_flush_fixup;
+extern long __start___scv_entry_flush_fixup, __stop___scv_entry_flush_fixup;
extern long __start___rfi_flush_fixup, __stop___rfi_flush_fixup;
extern long __start___barrier_nospec_fixup, __stop___barrier_nospec_fixup;
extern long __start__btb_flush_fixup, __stop__btb_flush_fixup;
diff --git a/arch/powerpc/include/asm/highmem.h b/arch/powerpc/include/asm/highmem.h
index 80a5ae771c65..c0fcd1bbdba9 100644
--- a/arch/powerpc/include/asm/highmem.h
+++ b/arch/powerpc/include/asm/highmem.h
@@ -58,6 +58,8 @@ extern pte_t *pkmap_page_table;
#define flush_cache_kmaps() flush_cache_all()
+#define arch_kmap_local_set_pte(mm, vaddr, ptep, ptev) \
+ __set_pte_at(mm, vaddr, ptep, ptev, 1)
#define arch_kmap_local_post_map(vaddr, pteval) \
local_flush_tlb_page(NULL, vaddr)
#define arch_kmap_local_post_unmap(vaddr) \
diff --git a/arch/powerpc/include/asm/vdso/gettimeofday.h b/arch/powerpc/include/asm/vdso/gettimeofday.h
index 81671aa365b3..77c635c2c90d 100644
--- a/arch/powerpc/include/asm/vdso/gettimeofday.h
+++ b/arch/powerpc/include/asm/vdso/gettimeofday.h
@@ -103,6 +103,8 @@ int gettimeofday_fallback(struct __kernel_old_timeval *_tv, struct timezone *_tz
return do_syscall_2(__NR_gettimeofday, (unsigned long)_tv, (unsigned long)_tz);
}
+#ifdef __powerpc64__
+
static __always_inline
int clock_gettime_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
{
@@ -115,11 +117,23 @@ int clock_getres_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
return do_syscall_2(__NR_clock_getres, _clkid, (unsigned long)_ts);
}
-#ifdef CONFIG_VDSO32
+#else
#define BUILD_VDSO32 1
static __always_inline
+int clock_gettime_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
+{
+ return do_syscall_2(__NR_clock_gettime64, _clkid, (unsigned long)_ts);
+}
+
+static __always_inline
+int clock_getres_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
+{
+ return do_syscall_2(__NR_clock_getres_time64, _clkid, (unsigned long)_ts);
+}
+
+static __always_inline
int clock_gettime32_fallback(clockid_t _clkid, struct old_timespec32 *_ts)
{
return do_syscall_2(__NR_clock_gettime, _clkid, (unsigned long)_ts);
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index aa1af139d947..33ddfeef4fe9 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -75,7 +75,7 @@ BEGIN_FTR_SECTION
bne .Ltabort_syscall
END_FTR_SECTION_IFSET(CPU_FTR_TM)
#endif
- INTERRUPT_TO_KERNEL
+ SCV_INTERRUPT_TO_KERNEL
mr r10,r1
ld r1,PACAKSAVE(r13)
std r10,0(r1)
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index e02ad6fefa46..6e53f7638737 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -2993,6 +2993,25 @@ TRAMP_REAL_BEGIN(entry_flush_fallback)
ld r11,PACA_EXRFI+EX_R11(r13)
blr
+/*
+ * The SCV entry flush happens with interrupts enabled, so it must disable
+ * to prevent EXRFI being clobbered by NMIs (e.g., soft_nmi_common). r10
+ * (containing LR) does not need to be preserved here because scv entry
+ * puts 0 in the pt_regs, CTR can be clobbered for the same reason.
+ */
+TRAMP_REAL_BEGIN(scv_entry_flush_fallback)
+ li r10,0
+ mtmsrd r10,1
+ lbz r10,PACAIRQHAPPENED(r13)
+ ori r10,r10,PACA_IRQ_HARD_DIS
+ stb r10,PACAIRQHAPPENED(r13)
+ std r11,PACA_EXRFI+EX_R11(r13)
+ L1D_DISPLACEMENT_FLUSH
+ ld r11,PACA_EXRFI+EX_R11(r13)
+ li r10,MSR_RI
+ mtmsrd r10,1
+ blr
+
TRAMP_REAL_BEGIN(rfi_flush_fallback)
SET_SCRATCH0(r13);
GET_PACA(r13);
diff --git a/arch/powerpc/kernel/head_book3s_32.S b/arch/powerpc/kernel/head_book3s_32.S
index 349bf3f0c3af..858fbc8b19f3 100644
--- a/arch/powerpc/kernel/head_book3s_32.S
+++ b/arch/powerpc/kernel/head_book3s_32.S
@@ -260,10 +260,19 @@ __secondary_hold_acknowledge:
MachineCheck:
EXCEPTION_PROLOG_0
#ifdef CONFIG_PPC_CHRP
+#ifdef CONFIG_VMAP_STACK
+ mtspr SPRN_SPRG_SCRATCH2,r1
+ mfspr r1, SPRN_SPRG_THREAD
+ lwz r1, RTAS_SP(r1)
+ cmpwi cr1, r1, 0
+ bne cr1, 7f
+ mfspr r1, SPRN_SPRG_SCRATCH2
+#else
mfspr r11, SPRN_SPRG_THREAD
lwz r11, RTAS_SP(r11)
cmpwi cr1, r11, 0
bne cr1, 7f
+#endif
#endif /* CONFIG_PPC_CHRP */
EXCEPTION_PROLOG_1 for_rtas=1
7: EXCEPTION_PROLOG_2
diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
index 0318ba436f34..72fa3c00229a 100644
--- a/arch/powerpc/kernel/vmlinux.lds.S
+++ b/arch/powerpc/kernel/vmlinux.lds.S
@@ -85,7 +85,7 @@ SECTIONS
ALIGN_FUNCTION();
#endif
/* careful! __ftr_alt_* sections need to be close to .text */
- *(.text.hot TEXT_MAIN .text.fixup .text.unlikely .fixup __ftr_alt_* .ref.text);
+ *(.text.hot .text.hot.* TEXT_MAIN .text.fixup .text.unlikely .text.unlikely.* .fixup __ftr_alt_* .ref.text);
#ifdef CONFIG_PPC64
*(.tramp.ftrace.text);
#endif
@@ -146,6 +146,13 @@ SECTIONS
}
. = ALIGN(8);
+ __scv_entry_flush_fixup : AT(ADDR(__scv_entry_flush_fixup) - LOAD_OFFSET) {
+ __start___scv_entry_flush_fixup = .;
+ *(__scv_entry_flush_fixup)
+ __stop___scv_entry_flush_fixup = .;
+ }
+
+ . = ALIGN(8);
__stf_exit_barrier_fixup : AT(ADDR(__stf_exit_barrier_fixup) - LOAD_OFFSET) {
__start___stf_exit_barrier_fixup = .;
*(__stf_exit_barrier_fixup)
@@ -187,6 +194,12 @@ SECTIONS
.init.text : AT(ADDR(.init.text) - LOAD_OFFSET) {
_sinittext = .;
INIT_TEXT
+
+ /*
+ *.init.text might be RO so we must ensure this section ends on
+ * a page boundary.
+ */
+ . = ALIGN(PAGE_SIZE);
_einittext = .;
#ifdef CONFIG_PPC64
*(.tramp.ftrace.init);
@@ -200,6 +213,8 @@ SECTIONS
EXIT_TEXT
}
+ . = ALIGN(PAGE_SIZE);
+
INIT_DATA_SECTION(16)
. = ALIGN(8);
diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c
index 47821055b94c..1fd31b4b0e13 100644
--- a/arch/powerpc/lib/feature-fixups.c
+++ b/arch/powerpc/lib/feature-fixups.c
@@ -290,9 +290,6 @@ void do_entry_flush_fixups(enum l1d_flush_type types)
long *start, *end;
int i;
- start = PTRRELOC(&__start___entry_flush_fixup);
- end = PTRRELOC(&__stop___entry_flush_fixup);
-
instrs[0] = 0x60000000; /* nop */
instrs[1] = 0x60000000; /* nop */
instrs[2] = 0x60000000; /* nop */
@@ -312,6 +309,8 @@ void do_entry_flush_fixups(enum l1d_flush_type types)
if (types & L1D_FLUSH_MTTRIG)
instrs[i++] = 0x7c12dba6; /* mtspr TRIG2,r0 (SPR #882) */
+ start = PTRRELOC(&__start___entry_flush_fixup);
+ end = PTRRELOC(&__stop___entry_flush_fixup);
for (i = 0; start < end; start++, i++) {
dest = (void *)start + *start;
@@ -328,6 +327,25 @@ void do_entry_flush_fixups(enum l1d_flush_type types)
patch_instruction((struct ppc_inst *)(dest + 2), ppc_inst(instrs[2]));
}
+ start = PTRRELOC(&__start___scv_entry_flush_fixup);
+ end = PTRRELOC(&__stop___scv_entry_flush_fixup);
+ for (; start < end; start++, i++) {
+ dest = (void *)start + *start;
+
+ pr_devel("patching dest %lx\n", (unsigned long)dest);
+
+ patch_instruction((struct ppc_inst *)dest, ppc_inst(instrs[0]));
+
+ if (types == L1D_FLUSH_FALLBACK)
+ patch_branch((struct ppc_inst *)(dest + 1), (unsigned long)&scv_entry_flush_fallback,
+ BRANCH_SET_LINK);
+ else
+ patch_instruction((struct ppc_inst *)(dest + 1), ppc_inst(instrs[1]));
+
+ patch_instruction((struct ppc_inst *)(dest + 2), ppc_inst(instrs[2]));
+ }
+
+
printk(KERN_DEBUG "entry-flush: patched %d locations (%s flush)\n", i,
(types == L1D_FLUSH_NONE) ? "no" :
(types == L1D_FLUSH_FALLBACK) ? "fallback displacement" :
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
index 81b76d44725d..e9e2c1f0a690 100644
--- a/arch/riscv/Kconfig
+++ b/arch/riscv/Kconfig
@@ -137,7 +137,7 @@ config PA_BITS
config PAGE_OFFSET
hex
- default 0xC0000000 if 32BIT && MAXPHYSMEM_2GB
+ default 0xC0000000 if 32BIT && MAXPHYSMEM_1GB
default 0x80000000 if 64BIT && !MMU
default 0xffffffff80000000 if 64BIT && MAXPHYSMEM_2GB
default 0xffffffe000000000 if 64BIT && MAXPHYSMEM_128GB
@@ -247,10 +247,12 @@ config MODULE_SECTIONS
choice
prompt "Maximum Physical Memory"
- default MAXPHYSMEM_2GB if 32BIT
+ default MAXPHYSMEM_1GB if 32BIT
default MAXPHYSMEM_2GB if 64BIT && CMODEL_MEDLOW
default MAXPHYSMEM_128GB if 64BIT && CMODEL_MEDANY
+ config MAXPHYSMEM_1GB
+ bool "1GiB"
config MAXPHYSMEM_2GB
bool "2GiB"
config MAXPHYSMEM_128GB
diff --git a/arch/riscv/boot/dts/sifive/hifive-unleashed-a00.dts b/arch/riscv/boot/dts/sifive/hifive-unleashed-a00.dts
index 4a2729f5ca3f..24d75a146e02 100644
--- a/arch/riscv/boot/dts/sifive/hifive-unleashed-a00.dts
+++ b/arch/riscv/boot/dts/sifive/hifive-unleashed-a00.dts
@@ -88,7 +88,9 @@
phy-mode = "gmii";
phy-handle = <&phy0>;
phy0: ethernet-phy@0 {
+ compatible = "ethernet-phy-id0007.0771";
reg = <0>;
+ reset-gpios = <&gpio 12 GPIO_ACTIVE_LOW>;
};
};
diff --git a/arch/riscv/configs/defconfig b/arch/riscv/configs/defconfig
index d222d353d86d..8c3d1e451703 100644
--- a/arch/riscv/configs/defconfig
+++ b/arch/riscv/configs/defconfig
@@ -64,6 +64,8 @@ CONFIG_HW_RANDOM=y
CONFIG_HW_RANDOM_VIRTIO=y
CONFIG_SPI=y
CONFIG_SPI_SIFIVE=y
+CONFIG_GPIOLIB=y
+CONFIG_GPIO_SIFIVE=y
# CONFIG_PTP_1588_CLOCK is not set
CONFIG_POWER_RESET=y
CONFIG_DRM=y
diff --git a/arch/riscv/include/asm/Kbuild b/arch/riscv/include/asm/Kbuild
index 59dd7be55005..445ccc97305a 100644
--- a/arch/riscv/include/asm/Kbuild
+++ b/arch/riscv/include/asm/Kbuild
@@ -3,6 +3,5 @@ generic-y += early_ioremap.h
generic-y += extable.h
generic-y += flat.h
generic-y += kvm_para.h
-generic-y += local64.h
generic-y += user.h
generic-y += vmlinux.lds.h
diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
index 41a72861987c..251e1db088fa 100644
--- a/arch/riscv/include/asm/pgtable.h
+++ b/arch/riscv/include/asm/pgtable.h
@@ -99,7 +99,6 @@
| _PAGE_DIRTY)
#define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
-#define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL | _PAGE_EXEC)
#define PAGE_KERNEL_READ __pgprot(_PAGE_KERNEL & ~_PAGE_WRITE)
#define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL | _PAGE_EXEC)
#define PAGE_KERNEL_READ_EXEC __pgprot((_PAGE_KERNEL & ~_PAGE_WRITE) \
diff --git a/arch/riscv/include/asm/vdso.h b/arch/riscv/include/asm/vdso.h
index 8454f746bbfd..1453a2f563bc 100644
--- a/arch/riscv/include/asm/vdso.h
+++ b/arch/riscv/include/asm/vdso.h
@@ -10,7 +10,7 @@
#include <linux/types.h>
-#ifndef GENERIC_TIME_VSYSCALL
+#ifndef CONFIG_GENERIC_TIME_VSYSCALL
struct vdso_data {
};
#endif
diff --git a/arch/riscv/kernel/cacheinfo.c b/arch/riscv/kernel/cacheinfo.c
index de59dd457b41..d86781357044 100644
--- a/arch/riscv/kernel/cacheinfo.c
+++ b/arch/riscv/kernel/cacheinfo.c
@@ -26,7 +26,16 @@ cache_get_priv_group(struct cacheinfo *this_leaf)
static struct cacheinfo *get_cacheinfo(u32 level, enum cache_type type)
{
- struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(smp_processor_id());
+ /*
+ * Using raw_smp_processor_id() elides a preemptability check, but this
+ * is really indicative of a larger problem: the cacheinfo UABI assumes
+ * that cores have a homonogenous view of the cache hierarchy. That
+ * happens to be the case for the current set of RISC-V systems, but
+ * likely won't be true in general. Since there's no way to provide
+ * correct information for these systems via the current UABI we're
+ * just eliding the check for now.
+ */
+ struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(raw_smp_processor_id());
struct cacheinfo *this_leaf;
int index;
diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S
index 524d918f3601..744f3209c48d 100644
--- a/arch/riscv/kernel/entry.S
+++ b/arch/riscv/kernel/entry.S
@@ -124,15 +124,15 @@ skip_context_tracking:
REG_L a1, (a1)
jr a1
1:
-#ifdef CONFIG_TRACE_IRQFLAGS
- call trace_hardirqs_on
-#endif
/*
* Exceptions run with interrupts enabled or disabled depending on the
* state of SR_PIE in m/sstatus.
*/
andi t0, s1, SR_PIE
beqz t0, 1f
+#ifdef CONFIG_TRACE_IRQFLAGS
+ call trace_hardirqs_on
+#endif
csrs CSR_STATUS, SR_IE
1:
@@ -155,6 +155,15 @@ skip_context_tracking:
tail do_trap_unknown
handle_syscall:
+#ifdef CONFIG_RISCV_M_MODE
+ /*
+ * When running is M-Mode (no MMU config), MPIE does not get set.
+ * As a result, we need to force enable interrupts here because
+ * handle_exception did not do set SR_IE as it always sees SR_PIE
+ * being cleared.
+ */
+ csrs CSR_STATUS, SR_IE
+#endif
#if defined(CONFIG_TRACE_IRQFLAGS) || defined(CONFIG_CONTEXT_TRACKING)
/* Recover a0 - a7 for system calls */
REG_L a0, PT_A0(sp)
@@ -186,14 +195,7 @@ check_syscall_nr:
* Syscall number held in a7.
* If syscall number is above allowed value, redirect to ni_syscall.
*/
- bge a7, t0, 1f
- /*
- * Check if syscall is rejected by tracer, i.e., a7 == -1.
- * If yes, we pretend it was executed.
- */
- li t1, -1
- beq a7, t1, ret_from_syscall_rejected
- blt a7, t1, 1f
+ bgeu a7, t0, 1f
/* Call syscall */
la s0, sys_call_table
slli t0, a7, RISCV_LGPTR
diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c
index 1d85e9bf783c..3fa3f26dde85 100644
--- a/arch/riscv/kernel/setup.c
+++ b/arch/riscv/kernel/setup.c
@@ -127,7 +127,9 @@ static void __init init_resources(void)
{
struct memblock_region *region = NULL;
struct resource *res = NULL;
- int ret = 0;
+ struct resource *mem_res = NULL;
+ size_t mem_res_sz = 0;
+ int ret = 0, i = 0;
code_res.start = __pa_symbol(_text);
code_res.end = __pa_symbol(_etext) - 1;
@@ -145,16 +147,17 @@ static void __init init_resources(void)
bss_res.end = __pa_symbol(__bss_stop) - 1;
bss_res.flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
+ mem_res_sz = (memblock.memory.cnt + memblock.reserved.cnt) * sizeof(*mem_res);
+ mem_res = memblock_alloc(mem_res_sz, SMP_CACHE_BYTES);
+ if (!mem_res)
+ panic("%s: Failed to allocate %zu bytes\n", __func__, mem_res_sz);
/*
* Start by adding the reserved regions, if they overlap
* with /memory regions, insert_resource later on will take
* care of it.
*/
for_each_reserved_mem_region(region) {
- res = memblock_alloc(sizeof(struct resource), SMP_CACHE_BYTES);
- if (!res)
- panic("%s: Failed to allocate %zu bytes\n", __func__,
- sizeof(struct resource));
+ res = &mem_res[i++];
res->name = "Reserved";
res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
@@ -171,8 +174,10 @@ static void __init init_resources(void)
* Ignore any other reserved regions within
* system memory.
*/
- if (memblock_is_memory(res->start))
+ if (memblock_is_memory(res->start)) {
+ memblock_free((phys_addr_t) res, sizeof(struct resource));
continue;
+ }
ret = add_resource(&iomem_resource, res);
if (ret < 0)
@@ -181,10 +186,7 @@ static void __init init_resources(void)
/* Add /memory regions to the resource tree */
for_each_mem_region(region) {
- res = memblock_alloc(sizeof(struct resource), SMP_CACHE_BYTES);
- if (!res)
- panic("%s: Failed to allocate %zu bytes\n", __func__,
- sizeof(struct resource));
+ res = &mem_res[i++];
if (unlikely(memblock_is_nomap(region))) {
res->name = "Reserved";
@@ -205,9 +207,9 @@ static void __init init_resources(void)
return;
error:
- memblock_free((phys_addr_t) res, sizeof(struct resource));
/* Better an empty resource tree than an inconsistent one */
release_child_resources(&iomem_resource);
+ memblock_free((phys_addr_t) mem_res, mem_res_sz);
}
diff --git a/arch/riscv/kernel/stacktrace.c b/arch/riscv/kernel/stacktrace.c
index 48b870a685b3..df5d2da7c40b 100644
--- a/arch/riscv/kernel/stacktrace.c
+++ b/arch/riscv/kernel/stacktrace.c
@@ -14,7 +14,7 @@
#include <asm/stacktrace.h>
-register unsigned long sp_in_global __asm__("sp");
+register const unsigned long sp_in_global __asm__("sp");
#ifdef CONFIG_FRAME_POINTER
@@ -28,9 +28,8 @@ void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs,
sp = user_stack_pointer(regs);
pc = instruction_pointer(regs);
} else if (task == NULL || task == current) {
- const register unsigned long current_sp = sp_in_global;
fp = (unsigned long)__builtin_frame_address(0);
- sp = current_sp;
+ sp = sp_in_global;
pc = (unsigned long)walk_stackframe;
} else {
/* task blocked in __switch_to */
diff --git a/arch/riscv/kernel/time.c b/arch/riscv/kernel/time.c
index 4d3a1048ad8b..8a5cf99c0776 100644
--- a/arch/riscv/kernel/time.c
+++ b/arch/riscv/kernel/time.c
@@ -4,6 +4,7 @@
* Copyright (C) 2017 SiFive
*/
+#include <linux/of_clk.h>
#include <linux/clocksource.h>
#include <linux/delay.h>
#include <asm/sbi.h>
@@ -24,6 +25,8 @@ void __init time_init(void)
riscv_timebase = prop;
lpj_fine = riscv_timebase / HZ;
+
+ of_clk_init(NULL);
timer_probe();
}
diff --git a/arch/riscv/kernel/vdso.c b/arch/riscv/kernel/vdso.c
index 678204231700..3f1d35e7c98a 100644
--- a/arch/riscv/kernel/vdso.c
+++ b/arch/riscv/kernel/vdso.c
@@ -12,7 +12,7 @@
#include <linux/binfmts.h>
#include <linux/err.h>
#include <asm/page.h>
-#ifdef GENERIC_TIME_VSYSCALL
+#ifdef CONFIG_GENERIC_TIME_VSYSCALL
#include <vdso/datapage.h>
#else
#include <asm/vdso.h>
diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
index bf5379135e39..7cd4993f4ff2 100644
--- a/arch/riscv/mm/init.c
+++ b/arch/riscv/mm/init.c
@@ -157,9 +157,10 @@ disable:
void __init setup_bootmem(void)
{
phys_addr_t mem_start = 0;
- phys_addr_t start, end = 0;
+ phys_addr_t start, dram_end, end = 0;
phys_addr_t vmlinux_end = __pa_symbol(&_end);
phys_addr_t vmlinux_start = __pa_symbol(&_start);
+ phys_addr_t max_mapped_addr = __pa(~(ulong)0);
u64 i;
/* Find the memory region containing the kernel */
@@ -181,7 +182,18 @@ void __init setup_bootmem(void)
/* Reserve from the start of the kernel to the end of the kernel */
memblock_reserve(vmlinux_start, vmlinux_end - vmlinux_start);
- max_pfn = PFN_DOWN(memblock_end_of_DRAM());
+ dram_end = memblock_end_of_DRAM();
+
+ /*
+ * memblock allocator is not aware of the fact that last 4K bytes of
+ * the addressable memory can not be mapped because of IS_ERR_VALUE
+ * macro. Make sure that last 4k bytes are not usable by memblock
+ * if end of dram is equal to maximum addressable memory.
+ */
+ if (max_mapped_addr == (dram_end - 1))
+ memblock_set_current_limit(max_mapped_addr - 4096);
+
+ max_pfn = PFN_DOWN(dram_end);
max_low_pfn = max_pfn;
dma32_phys_limit = min(4UL * SZ_1G, (unsigned long)PFN_PHYS(max_low_pfn));
set_max_mapnr(max_low_pfn);
diff --git a/arch/riscv/mm/kasan_init.c b/arch/riscv/mm/kasan_init.c
index 12ddd1f6bf70..a8a2ffd9114a 100644
--- a/arch/riscv/mm/kasan_init.c
+++ b/arch/riscv/mm/kasan_init.c
@@ -93,8 +93,8 @@ void __init kasan_init(void)
VMALLOC_END));
for_each_mem_range(i, &_start, &_end) {
- void *start = (void *)_start;
- void *end = (void *)_end;
+ void *start = (void *)__va(_start);
+ void *end = (void *)__va(_end);
if (start >= end)
break;
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index e84bdd15150b..c72874f09741 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -54,17 +54,23 @@ config KASAN_SHADOW_OFFSET
config S390
def_bool y
+ #
+ # Note: keep this list sorted alphabetically
+ #
+ imply IMA_SECURE_AND_OR_TRUSTED_BOOT
select ARCH_BINFMT_ELF_STATE
select ARCH_HAS_DEBUG_VM_PGTABLE
select ARCH_HAS_DEBUG_WX
select ARCH_HAS_DEVMEM_IS_ALLOWED
select ARCH_HAS_ELF_RANDOMIZE
+ select ARCH_HAS_FORCE_DMA_UNENCRYPTED
select ARCH_HAS_FORTIFY_SOURCE
select ARCH_HAS_GCOV_PROFILE_ALL
select ARCH_HAS_GIGANTIC_PAGE
select ARCH_HAS_KCOV
select ARCH_HAS_MEM_ENCRYPT
select ARCH_HAS_PTE_SPECIAL
+ select ARCH_HAS_SCALED_CPUTIME
select ARCH_HAS_SET_MEMORY
select ARCH_HAS_STRICT_KERNEL_RWX
select ARCH_HAS_STRICT_MODULE_RWX
@@ -111,8 +117,10 @@ config S390
select ARCH_WANT_IPC_PARSE_VERSION
select BUILDTIME_TABLE_SORT
select CLONE_BACKWARDS2
+ select CPU_NO_EFFICIENT_FFS if !HAVE_MARCH_Z9_109_FEATURES
select DMA_OPS if PCI
select DYNAMIC_FTRACE if FUNCTION_TRACER
+ select GENERIC_ALLOCATOR
select GENERIC_CPU_AUTOPROBE
select GENERIC_CPU_VULNERABILITIES
select GENERIC_FIND_FIRST_BIT
@@ -126,22 +134,21 @@ config S390
select HAVE_ARCH_JUMP_LABEL_RELATIVE
select HAVE_ARCH_KASAN
select HAVE_ARCH_KASAN_VMALLOC
- select CPU_NO_EFFICIENT_FFS if !HAVE_MARCH_Z9_109_FEATURES
select HAVE_ARCH_SECCOMP_FILTER
select HAVE_ARCH_SOFT_DIRTY
select HAVE_ARCH_TRACEHOOK
select HAVE_ARCH_TRANSPARENT_HUGEPAGE
select HAVE_ARCH_VMAP_STACK
select HAVE_ASM_MODVERSIONS
- select HAVE_EBPF_JIT if PACK_STACK && HAVE_MARCH_Z196_FEATURES
select HAVE_CMPXCHG_DOUBLE
select HAVE_CMPXCHG_LOCAL
select HAVE_DEBUG_KMEMLEAK
select HAVE_DMA_CONTIGUOUS
select HAVE_DYNAMIC_FTRACE
select HAVE_DYNAMIC_FTRACE_WITH_REGS
- select HAVE_FAST_GUP
+ select HAVE_EBPF_JIT if PACK_STACK && HAVE_MARCH_Z196_FEATURES
select HAVE_EFFICIENT_UNALIGNED_ACCESS
+ select HAVE_FAST_GUP
select HAVE_FENTRY
select HAVE_FTRACE_MCOUNT_RECORD
select HAVE_FUNCTION_ERROR_INJECTION
@@ -163,16 +170,15 @@ config S390
select HAVE_KRETPROBES
select HAVE_KVM
select HAVE_LIVEPATCH
- select HAVE_PERF_REGS
- select HAVE_PERF_USER_STACK_DUMP
select HAVE_MEMBLOCK_PHYS_MAP
- select MMU_GATHER_NO_GATHER
select HAVE_MOD_ARCH_SPECIFIC
+ select HAVE_NMI
select HAVE_NOP_MCOUNT
select HAVE_OPROFILE
select HAVE_PCI
select HAVE_PERF_EVENTS
- select MMU_GATHER_RCU_TABLE_FREE
+ select HAVE_PERF_REGS
+ select HAVE_PERF_USER_STACK_DUMP
select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_RELIABLE_STACKTRACE
select HAVE_RSEQ
@@ -181,6 +187,8 @@ config S390
select HAVE_VIRT_CPU_ACCOUNTING_IDLE
select IOMMU_HELPER if PCI
select IOMMU_SUPPORT if PCI
+ select MMU_GATHER_NO_GATHER
+ select MMU_GATHER_RCU_TABLE_FREE
select MODULES_USE_ELF_RELA
select NEED_DMA_MAP_STATE if PCI
select NEED_SG_DMA_LENGTH if PCI
@@ -190,17 +198,12 @@ config S390
select PCI_MSI if PCI
select PCI_MSI_ARCH_FALLBACKS if PCI_MSI
select SPARSE_IRQ
+ select SWIOTLB
select SYSCTL_EXCEPTION_TRACE
select THREAD_INFO_IN_TASK
select TTY
select VIRT_CPU_ACCOUNTING
- select ARCH_HAS_SCALED_CPUTIME
- select HAVE_NMI
- select ARCH_HAS_FORCE_DMA_UNENCRYPTED
- select SWIOTLB
- select GENERIC_ALLOCATOR
- imply IMA_SECURE_AND_OR_TRUSTED_BOOT
-
+ # Note: keep the above list sorted alphabetically
config SCHED_OMIT_FRAME_POINTER
def_bool y
diff --git a/arch/s390/configs/debug_defconfig b/arch/s390/configs/debug_defconfig
index 1be32fcf6f2e..c4f6ff98a612 100644
--- a/arch/s390/configs/debug_defconfig
+++ b/arch/s390/configs/debug_defconfig
@@ -61,7 +61,9 @@ CONFIG_OPROFILE=m
CONFIG_KPROBES=y
CONFIG_JUMP_LABEL=y
CONFIG_STATIC_KEYS_SELFTEST=y
+CONFIG_SECCOMP_CACHE_DEBUG=y
CONFIG_LOCK_EVENT_COUNTS=y
+# CONFIG_GCC_PLUGINS is not set
CONFIG_MODULES=y
CONFIG_MODULE_FORCE_LOAD=y
CONFIG_MODULE_UNLOAD=y
@@ -410,12 +412,12 @@ CONFIG_SCSI_ENCLOSURE=m
CONFIG_SCSI_CONSTANTS=y
CONFIG_SCSI_LOGGING=y
CONFIG_SCSI_SPI_ATTRS=m
-CONFIG_SCSI_FC_ATTRS=y
+CONFIG_SCSI_FC_ATTRS=m
CONFIG_SCSI_SAS_LIBSAS=m
CONFIG_SCSI_SRP_ATTRS=m
CONFIG_ISCSI_TCP=m
CONFIG_SCSI_DEBUG=m
-CONFIG_ZFCP=y
+CONFIG_ZFCP=m
CONFIG_SCSI_VIRTIO=m
CONFIG_SCSI_DH=y
CONFIG_SCSI_DH_RDAC=m
@@ -444,6 +446,7 @@ CONFIG_DM_MULTIPATH=m
CONFIG_DM_MULTIPATH_QL=m
CONFIG_DM_MULTIPATH_ST=m
CONFIG_DM_MULTIPATH_HST=m
+CONFIG_DM_MULTIPATH_IOA=m
CONFIG_DM_DELAY=m
CONFIG_DM_UEVENT=y
CONFIG_DM_FLAKEY=m
@@ -542,7 +545,6 @@ CONFIG_INPUT_EVDEV=y
# CONFIG_INPUT_MOUSE is not set
# CONFIG_SERIO is not set
CONFIG_LEGACY_PTY_COUNT=0
-CONFIG_NULL_TTY=m
CONFIG_VIRTIO_CONSOLE=y
CONFIG_HW_RANDOM_VIRTIO=m
CONFIG_RAW_DRIVER=m
@@ -574,6 +576,7 @@ CONFIG_VIRTIO_BALLOON=m
CONFIG_VIRTIO_INPUT=y
CONFIG_VHOST_NET=m
CONFIG_VHOST_VSOCK=m
+# CONFIG_SURFACE_PLATFORMS is not set
CONFIG_S390_CCW_IOMMU=y
CONFIG_S390_AP_IOMMU=y
CONFIG_EXT4_FS=y
@@ -655,6 +658,7 @@ CONFIG_CIFS_XATTR=y
CONFIG_CIFS_POSIX=y
# CONFIG_CIFS_DEBUG is not set
CONFIG_CIFS_DFS_UPCALL=y
+CONFIG_CIFS_SWN_UPCALL=y
CONFIG_NLS_DEFAULT="utf8"
CONFIG_NLS_CODEPAGE_437=m
CONFIG_NLS_CODEPAGE_850=m
@@ -826,6 +830,8 @@ CONFIG_FTRACE_SYSCALLS=y
CONFIG_BLK_DEV_IO_TRACE=y
CONFIG_BPF_KPROBE_OVERRIDE=y
CONFIG_HIST_TRIGGERS=y
+CONFIG_FTRACE_STARTUP_TEST=y
+# CONFIG_EVENT_TRACE_STARTUP_TEST is not set
CONFIG_DEBUG_USER_ASCE=y
CONFIG_NOTIFIER_ERROR_INJECTION=m
CONFIG_NETDEV_NOTIFIER_ERROR_INJECT=m
diff --git a/arch/s390/configs/defconfig b/arch/s390/configs/defconfig
index e2171a008809..51135893cffe 100644
--- a/arch/s390/configs/defconfig
+++ b/arch/s390/configs/defconfig
@@ -58,6 +58,7 @@ CONFIG_S390_UNWIND_SELFTEST=m
CONFIG_OPROFILE=m
CONFIG_KPROBES=y
CONFIG_JUMP_LABEL=y
+# CONFIG_GCC_PLUGINS is not set
CONFIG_MODULES=y
CONFIG_MODULE_FORCE_LOAD=y
CONFIG_MODULE_UNLOAD=y
@@ -95,7 +96,6 @@ CONFIG_ZSMALLOC_STAT=y
CONFIG_DEFERRED_STRUCT_PAGE_INIT=y
CONFIG_IDLE_PAGE_TRACKING=y
CONFIG_PERCPU_STATS=y
-CONFIG_GUP_TEST=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_PACKET_DIAG=m
@@ -403,12 +403,12 @@ CONFIG_SCSI_ENCLOSURE=m
CONFIG_SCSI_CONSTANTS=y
CONFIG_SCSI_LOGGING=y
CONFIG_SCSI_SPI_ATTRS=m
-CONFIG_SCSI_FC_ATTRS=y
+CONFIG_SCSI_FC_ATTRS=m
CONFIG_SCSI_SAS_LIBSAS=m
CONFIG_SCSI_SRP_ATTRS=m
CONFIG_ISCSI_TCP=m
CONFIG_SCSI_DEBUG=m
-CONFIG_ZFCP=y
+CONFIG_ZFCP=m
CONFIG_SCSI_VIRTIO=m
CONFIG_SCSI_DH=y
CONFIG_SCSI_DH_RDAC=m
@@ -437,6 +437,7 @@ CONFIG_DM_MULTIPATH=m
CONFIG_DM_MULTIPATH_QL=m
CONFIG_DM_MULTIPATH_ST=m
CONFIG_DM_MULTIPATH_HST=m
+CONFIG_DM_MULTIPATH_IOA=m
CONFIG_DM_DELAY=m
CONFIG_DM_UEVENT=y
CONFIG_DM_FLAKEY=m
@@ -536,7 +537,6 @@ CONFIG_INPUT_EVDEV=y
# CONFIG_INPUT_MOUSE is not set
# CONFIG_SERIO is not set
CONFIG_LEGACY_PTY_COUNT=0
-CONFIG_NULL_TTY=m
CONFIG_VIRTIO_CONSOLE=y
CONFIG_HW_RANDOM_VIRTIO=m
CONFIG_RAW_DRIVER=m
@@ -568,6 +568,7 @@ CONFIG_VIRTIO_BALLOON=m
CONFIG_VIRTIO_INPUT=y
CONFIG_VHOST_NET=m
CONFIG_VHOST_VSOCK=m
+# CONFIG_SURFACE_PLATFORMS is not set
CONFIG_S390_CCW_IOMMU=y
CONFIG_S390_AP_IOMMU=y
CONFIG_EXT4_FS=y
@@ -645,6 +646,7 @@ CONFIG_CIFS_XATTR=y
CONFIG_CIFS_POSIX=y
# CONFIG_CIFS_DEBUG is not set
CONFIG_CIFS_DFS_UPCALL=y
+CONFIG_CIFS_SWN_UPCALL=y
CONFIG_NLS_DEFAULT="utf8"
CONFIG_NLS_CODEPAGE_437=m
CONFIG_NLS_CODEPAGE_850=m
@@ -778,6 +780,7 @@ CONFIG_FTRACE_SYSCALLS=y
CONFIG_BLK_DEV_IO_TRACE=y
CONFIG_BPF_KPROBE_OVERRIDE=y
CONFIG_HIST_TRIGGERS=y
+CONFIG_DEBUG_USER_ASCE=y
CONFIG_LKDTM=m
CONFIG_PERCPU_TEST=m
CONFIG_ATOMIC64_SELFTEST=y
diff --git a/arch/s390/configs/zfcpdump_defconfig b/arch/s390/configs/zfcpdump_defconfig
index a302630341ef..1ef211dae77a 100644
--- a/arch/s390/configs/zfcpdump_defconfig
+++ b/arch/s390/configs/zfcpdump_defconfig
@@ -22,6 +22,7 @@ CONFIG_CRASH_DUMP=y
# CONFIG_VIRTUALIZATION is not set
# CONFIG_S390_GUEST is not set
# CONFIG_SECCOMP is not set
+# CONFIG_GCC_PLUGINS is not set
CONFIG_PARTITION_ADVANCED=y
CONFIG_IBM_PARTITION=y
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
@@ -58,6 +59,7 @@ CONFIG_RAW_DRIVER=y
# CONFIG_HID is not set
# CONFIG_VIRTIO_MENU is not set
# CONFIG_VHOST_MENU is not set
+# CONFIG_SURFACE_PLATFORMS is not set
# CONFIG_IOMMU_SUPPORT is not set
# CONFIG_DNOTIFY is not set
# CONFIG_INOTIFY_USER is not set
diff --git a/arch/s390/include/asm/Kbuild b/arch/s390/include/asm/Kbuild
index 319efa0e6d02..1a18d7b82f86 100644
--- a/arch/s390/include/asm/Kbuild
+++ b/arch/s390/include/asm/Kbuild
@@ -7,5 +7,4 @@ generated-y += unistd_nr.h
generic-y += asm-offsets.h
generic-y += export.h
generic-y += kvm_types.h
-generic-y += local64.h
generic-y += mcs_spinlock.h
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index 5fa580219a86..52646f52f130 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -29,7 +29,6 @@ config SUPERH
select HAVE_ARCH_KGDB
select HAVE_ARCH_SECCOMP_FILTER
select HAVE_ARCH_TRACEHOOK
- select HAVE_COPY_THREAD_TLS
select HAVE_DEBUG_BUGVERBOSE
select HAVE_DEBUG_KMEMLEAK
select HAVE_DYNAMIC_FTRACE
diff --git a/arch/sh/boards/mach-sh03/rtc.c b/arch/sh/boards/mach-sh03/rtc.c
index 8b23ed7c201c..7fb474844a2d 100644
--- a/arch/sh/boards/mach-sh03/rtc.c
+++ b/arch/sh/boards/mach-sh03/rtc.c
@@ -11,7 +11,6 @@
#include <linux/sched.h>
#include <linux/time.h>
#include <linux/bcd.h>
-#include <linux/rtc.h>
#include <linux/spinlock.h>
#include <linux/io.h>
#include <linux/rtc.h>
diff --git a/arch/sh/configs/landisk_defconfig b/arch/sh/configs/landisk_defconfig
index ba6ec042606f..e6c5ddf070c0 100644
--- a/arch/sh/configs/landisk_defconfig
+++ b/arch/sh/configs/landisk_defconfig
@@ -27,13 +27,12 @@ CONFIG_NETFILTER=y
CONFIG_ATALK=m
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
-CONFIG_IDE=y
-CONFIG_BLK_DEV_IDECD=y
-CONFIG_BLK_DEV_OFFBOARD=y
-CONFIG_BLK_DEV_GENERIC=y
-CONFIG_BLK_DEV_AEC62XX=y
+CONFIG_ATA=y
+CONFIG_ATA_GENERIC=y
+CONFIG_PATA_ATP867X=y
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
+CONFIG_BLK_DEV_SR=y
CONFIG_SCSI_MULTI_LUN=y
CONFIG_MD=y
CONFIG_BLK_DEV_MD=m
diff --git a/arch/sh/configs/microdev_defconfig b/arch/sh/configs/microdev_defconfig
index c65667d00313..e9825196dd66 100644
--- a/arch/sh/configs/microdev_defconfig
+++ b/arch/sh/configs/microdev_defconfig
@@ -20,8 +20,6 @@ CONFIG_IP_PNP=y
# CONFIG_IPV6 is not set
# CONFIG_FW_LOADER is not set
CONFIG_BLK_DEV_RAM=y
-CONFIG_IDE=y
-CONFIG_BLK_DEV_IDECD=y
CONFIG_NETDEVICES=y
CONFIG_NET_ETHERNET=y
CONFIG_SMC91X=y
diff --git a/arch/sh/configs/sdk7780_defconfig b/arch/sh/configs/sdk7780_defconfig
index d10a0414123a..d00376eb044f 100644
--- a/arch/sh/configs/sdk7780_defconfig
+++ b/arch/sh/configs/sdk7780_defconfig
@@ -44,16 +44,14 @@ CONFIG_NET_SCHED=y
CONFIG_PARPORT=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
-CONFIG_IDE=y
-CONFIG_BLK_DEV_IDECD=y
-CONFIG_BLK_DEV_PLATFORM=y
-CONFIG_BLK_DEV_GENERIC=y
CONFIG_BLK_DEV_SD=y
CONFIG_BLK_DEV_SR=y
CONFIG_CHR_DEV_SG=y
CONFIG_SCSI_SPI_ATTRS=y
CONFIG_SCSI_FC_ATTRS=y
CONFIG_ATA=y
+CONFIG_ATA_GENERIC=y
+CONFIG_PATA_PLATFORM=y
CONFIG_MD=y
CONFIG_BLK_DEV_DM=y
CONFIG_NETDEVICES=y
diff --git a/arch/sh/configs/sdk7786_defconfig b/arch/sh/configs/sdk7786_defconfig
index 61bec46ebd66..4a44cac640bc 100644
--- a/arch/sh/configs/sdk7786_defconfig
+++ b/arch/sh/configs/sdk7786_defconfig
@@ -116,9 +116,6 @@ CONFIG_MTD_UBI_GLUEBI=m
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_CRYPTOLOOP=y
CONFIG_BLK_DEV_RAM=y
-CONFIG_IDE=y
-CONFIG_BLK_DEV_IDECD=y
-CONFIG_BLK_DEV_PLATFORM=y
CONFIG_BLK_DEV_SD=y
CONFIG_BLK_DEV_SR=y
CONFIG_SCSI_MULTI_LUN=y
diff --git a/arch/sh/configs/se7750_defconfig b/arch/sh/configs/se7750_defconfig
index 3f1c13799d79..4defc7628a49 100644
--- a/arch/sh/configs/se7750_defconfig
+++ b/arch/sh/configs/se7750_defconfig
@@ -29,7 +29,6 @@ CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=y
CONFIG_MTD_CFI_AMDSTD=y
CONFIG_MTD_ROM=y
-CONFIG_IDE=y
CONFIG_SCSI=y
CONFIG_NETDEVICES=y
CONFIG_NET_ETHERNET=y
diff --git a/arch/sh/configs/sh03_defconfig b/arch/sh/configs/sh03_defconfig
index f0073ed39947..48b457d59e79 100644
--- a/arch/sh/configs/sh03_defconfig
+++ b/arch/sh/configs/sh03_defconfig
@@ -39,9 +39,6 @@ CONFIG_IP_PNP_RARP=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_NBD=y
CONFIG_BLK_DEV_RAM=y
-CONFIG_IDE=y
-CONFIG_BLK_DEV_IDECD=m
-CONFIG_BLK_DEV_IDETAPE=m
CONFIG_SCSI=m
CONFIG_BLK_DEV_SD=m
CONFIG_BLK_DEV_SR=m
diff --git a/arch/sh/drivers/dma/Kconfig b/arch/sh/drivers/dma/Kconfig
index d0de378beefe..7d54f284ce10 100644
--- a/arch/sh/drivers/dma/Kconfig
+++ b/arch/sh/drivers/dma/Kconfig
@@ -63,8 +63,7 @@ config PVR2_DMA
config G2_DMA
tristate "G2 Bus DMA support"
- depends on SH_DREAMCAST
- select SH_DMA_API
+ depends on SH_DREAMCAST && SH_DMA_API
help
This enables support for the DMA controller for the Dreamcast's
G2 bus. Drivers that want this will generally enable this on
diff --git a/arch/sh/include/asm/Kbuild b/arch/sh/include/asm/Kbuild
index 7435182ef846..fc44d9c88b41 100644
--- a/arch/sh/include/asm/Kbuild
+++ b/arch/sh/include/asm/Kbuild
@@ -1,6 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
generated-y += syscall_table.h
generic-y += kvm_para.h
-generic-y += local64.h
generic-y += mcs_spinlock.h
generic-y += parport.h
diff --git a/arch/sh/include/asm/gpio.h b/arch/sh/include/asm/gpio.h
index 351918894e86..d643250f0a0f 100644
--- a/arch/sh/include/asm/gpio.h
+++ b/arch/sh/include/asm/gpio.h
@@ -16,7 +16,6 @@
#include <cpu/gpio.h>
#endif
-#define ARCH_NR_GPIOS 512
#include <asm-generic/gpio.h>
#ifdef CONFIG_GPIOLIB
diff --git a/arch/sh/kernel/cpu/sh3/entry.S b/arch/sh/kernel/cpu/sh3/entry.S
index 25eb80905416..e48b3dd996f5 100644
--- a/arch/sh/kernel/cpu/sh3/entry.S
+++ b/arch/sh/kernel/cpu/sh3/entry.S
@@ -14,7 +14,6 @@
#include <cpu/mmu_context.h>
#include <asm/page.h>
#include <asm/cache.h>
-#include <asm/thread_info.h>
! NOTE:
! GNU as (as of 2.9.1) changes bf/s into bt/s and bra, when the address
diff --git a/arch/sh/mm/Kconfig b/arch/sh/mm/Kconfig
index 703d3069997c..77aa2f802d8d 100644
--- a/arch/sh/mm/Kconfig
+++ b/arch/sh/mm/Kconfig
@@ -105,7 +105,7 @@ config VSYSCALL
(the default value) say Y.
config NUMA
- bool "Non Uniform Memory Access (NUMA) Support"
+ bool "Non-Uniform Memory Access (NUMA) Support"
depends on MMU && SYS_SUPPORTS_NUMA
select ARCH_WANT_NUMA_VARIABLE_LOCALITY
default n
diff --git a/arch/sh/mm/asids-debugfs.c b/arch/sh/mm/asids-debugfs.c
index 4c1ca197e9c5..d16d6f5ec774 100644
--- a/arch/sh/mm/asids-debugfs.c
+++ b/arch/sh/mm/asids-debugfs.c
@@ -26,7 +26,7 @@
#include <asm/processor.h>
#include <asm/mmu_context.h>
-static int asids_seq_show(struct seq_file *file, void *iter)
+static int asids_debugfs_show(struct seq_file *file, void *iter)
{
struct task_struct *p;
@@ -48,18 +48,7 @@ static int asids_seq_show(struct seq_file *file, void *iter)
return 0;
}
-static int asids_debugfs_open(struct inode *inode, struct file *file)
-{
- return single_open(file, asids_seq_show, inode->i_private);
-}
-
-static const struct file_operations asids_debugfs_fops = {
- .owner = THIS_MODULE,
- .open = asids_debugfs_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(asids_debugfs);
static int __init asids_debugfs_init(void)
{
diff --git a/arch/sh/mm/cache-debugfs.c b/arch/sh/mm/cache-debugfs.c
index 17d780794497..b0f185169dfa 100644
--- a/arch/sh/mm/cache-debugfs.c
+++ b/arch/sh/mm/cache-debugfs.c
@@ -22,7 +22,7 @@ enum cache_type {
CACHE_TYPE_UNIFIED,
};
-static int cache_seq_show(struct seq_file *file, void *iter)
+static int cache_debugfs_show(struct seq_file *file, void *iter)
{
unsigned int cache_type = (unsigned int)file->private;
struct cache_info *cache;
@@ -94,18 +94,7 @@ static int cache_seq_show(struct seq_file *file, void *iter)
return 0;
}
-static int cache_debugfs_open(struct inode *inode, struct file *file)
-{
- return single_open(file, cache_seq_show, inode->i_private);
-}
-
-static const struct file_operations cache_debugfs_fops = {
- .owner = THIS_MODULE,
- .open = cache_debugfs_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(cache_debugfs);
static int __init cache_debugfs_init(void)
{
diff --git a/arch/sh/mm/pmb.c b/arch/sh/mm/pmb.c
index b20aba6e1b37..68eb7cc6e564 100644
--- a/arch/sh/mm/pmb.c
+++ b/arch/sh/mm/pmb.c
@@ -812,7 +812,7 @@ bool __in_29bit_mode(void)
return (__raw_readl(PMB_PASCR) & PASCR_SE) == 0;
}
-static int pmb_seq_show(struct seq_file *file, void *iter)
+static int pmb_debugfs_show(struct seq_file *file, void *iter)
{
int i;
@@ -846,18 +846,7 @@ static int pmb_seq_show(struct seq_file *file, void *iter)
return 0;
}
-static int pmb_debugfs_open(struct inode *inode, struct file *file)
-{
- return single_open(file, pmb_seq_show, NULL);
-}
-
-static const struct file_operations pmb_debugfs_fops = {
- .owner = THIS_MODULE,
- .open = pmb_debugfs_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(pmb_debugfs);
static int __init pmb_debugfs_init(void)
{
diff --git a/arch/sparc/include/asm/Kbuild b/arch/sparc/include/asm/Kbuild
index 5269a704801f..3688fdae50e4 100644
--- a/arch/sparc/include/asm/Kbuild
+++ b/arch/sparc/include/asm/Kbuild
@@ -6,5 +6,4 @@ generated-y += syscall_table_64.h
generated-y += syscall_table_c32.h
generic-y += export.h
generic-y += kvm_para.h
-generic-y += local64.h
generic-y += mcs_spinlock.h
diff --git a/arch/sparc/include/asm/highmem.h b/arch/sparc/include/asm/highmem.h
index 875116209ec1..c7b2e208328b 100644
--- a/arch/sparc/include/asm/highmem.h
+++ b/arch/sparc/include/asm/highmem.h
@@ -50,10 +50,11 @@ extern pte_t *pkmap_page_table;
#define flush_cache_kmaps() flush_cache_all()
-/* FIXME: Use __flush_tlb_one(vaddr) instead of flush_cache_all() -- Anton */
-#define arch_kmap_local_post_map(vaddr, pteval) flush_cache_all()
-#define arch_kmap_local_post_unmap(vaddr) flush_cache_all()
-
+/* FIXME: Use __flush_*_one(vaddr) instead of flush_*_all() -- Anton */
+#define arch_kmap_local_pre_map(vaddr, pteval) flush_cache_all()
+#define arch_kmap_local_pre_unmap(vaddr) flush_cache_all()
+#define arch_kmap_local_post_map(vaddr, pteval) flush_tlb_all()
+#define arch_kmap_local_post_unmap(vaddr) flush_tlb_all()
#endif /* __KERNEL__ */
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 7b6dd10b162a..21f851179ff0 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -19,6 +19,7 @@ config X86_32
select KMAP_LOCAL
select MODULES_USE_ELF_REL
select OLD_SIGACTION
+ select ARCH_SPLIT_ARG64
config X86_64
def_bool y
diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c
index 18d8f17f755c..0904f5676e4d 100644
--- a/arch/x86/entry/common.c
+++ b/arch/x86/entry/common.c
@@ -73,10 +73,8 @@ static __always_inline void do_syscall_32_irqs_on(struct pt_regs *regs,
unsigned int nr)
{
if (likely(nr < IA32_NR_syscalls)) {
- instrumentation_begin();
nr = array_index_nospec(nr, IA32_NR_syscalls);
regs->ax = ia32_sys_call_table[nr](regs);
- instrumentation_end();
}
}
@@ -91,8 +89,11 @@ __visible noinstr void do_int80_syscall_32(struct pt_regs *regs)
* or may not be necessary, but it matches the old asm behavior.
*/
nr = (unsigned int)syscall_enter_from_user_mode(regs, nr);
+ instrumentation_begin();
do_syscall_32_irqs_on(regs, nr);
+
+ instrumentation_end();
syscall_exit_to_user_mode(regs);
}
@@ -121,11 +122,12 @@ static noinstr bool __do_fast_syscall_32(struct pt_regs *regs)
res = get_user(*(u32 *)&regs->bp,
(u32 __user __force *)(unsigned long)(u32)regs->sp);
}
- instrumentation_end();
if (res) {
/* User code screwed up. */
regs->ax = -EFAULT;
+
+ instrumentation_end();
syscall_exit_to_user_mode(regs);
return false;
}
@@ -135,6 +137,8 @@ static noinstr bool __do_fast_syscall_32(struct pt_regs *regs)
/* Now this is just like a normal syscall. */
do_syscall_32_irqs_on(regs, nr);
+
+ instrumentation_end();
syscall_exit_to_user_mode(regs);
return true;
}
diff --git a/arch/x86/hyperv/hv_init.c b/arch/x86/hyperv/hv_init.c
index e04d90af4c27..6375967a8244 100644
--- a/arch/x86/hyperv/hv_init.c
+++ b/arch/x86/hyperv/hv_init.c
@@ -16,6 +16,7 @@
#include <asm/hyperv-tlfs.h>
#include <asm/mshyperv.h>
#include <asm/idtentry.h>
+#include <linux/kexec.h>
#include <linux/version.h>
#include <linux/vmalloc.h>
#include <linux/mm.h>
@@ -26,6 +27,8 @@
#include <linux/syscore_ops.h>
#include <clocksource/hyperv_timer.h>
+int hyperv_init_cpuhp;
+
void *hv_hypercall_pg;
EXPORT_SYMBOL_GPL(hv_hypercall_pg);
@@ -312,6 +315,25 @@ static struct syscore_ops hv_syscore_ops = {
.resume = hv_resume,
};
+static void (* __initdata old_setup_percpu_clockev)(void);
+
+static void __init hv_stimer_setup_percpu_clockev(void)
+{
+ /*
+ * Ignore any errors in setting up stimer clockevents
+ * as we can run with the LAPIC timer as a fallback.
+ */
+ (void)hv_stimer_alloc();
+
+ /*
+ * Still register the LAPIC timer, because the direct-mode STIMER is
+ * not supported by old versions of Hyper-V. This also allows users
+ * to switch to LAPIC timer via /sys, if they want to.
+ */
+ if (old_setup_percpu_clockev)
+ old_setup_percpu_clockev();
+}
+
/*
* This function is to be invoked early in the boot sequence after the
* hypervisor has been detected.
@@ -390,10 +412,14 @@ void __init hyperv_init(void)
wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
/*
- * Ignore any errors in setting up stimer clockevents
- * as we can run with the LAPIC timer as a fallback.
+ * hyperv_init() is called before LAPIC is initialized: see
+ * apic_intr_mode_init() -> x86_platform.apic_post_init() and
+ * apic_bsp_setup() -> setup_local_APIC(). The direct-mode STIMER
+ * depends on LAPIC, so hv_stimer_alloc() should be called from
+ * x86_init.timers.setup_percpu_clockev.
*/
- (void)hv_stimer_alloc();
+ old_setup_percpu_clockev = x86_init.timers.setup_percpu_clockev;
+ x86_init.timers.setup_percpu_clockev = hv_stimer_setup_percpu_clockev;
hv_apic_init();
@@ -401,6 +427,7 @@ void __init hyperv_init(void)
register_syscore_ops(&hv_syscore_ops);
+ hyperv_init_cpuhp = cpuhp;
return;
remove_cpuhp_state:
diff --git a/arch/x86/hyperv/mmu.c b/arch/x86/hyperv/mmu.c
index 5208ba49c89a..2c87350c1fb0 100644
--- a/arch/x86/hyperv/mmu.c
+++ b/arch/x86/hyperv/mmu.c
@@ -66,11 +66,17 @@ static void hyperv_flush_tlb_others(const struct cpumask *cpus,
if (!hv_hypercall_pg)
goto do_native;
- if (cpumask_empty(cpus))
- return;
-
local_irq_save(flags);
+ /*
+ * Only check the mask _after_ interrupt has been disabled to avoid the
+ * mask changing under our feet.
+ */
+ if (cpumask_empty(cpus)) {
+ local_irq_restore(flags);
+ return;
+ }
+
flush_pcpu = (struct hv_tlb_flush **)
this_cpu_ptr(hyperv_pcpu_input_arg);
diff --git a/arch/x86/include/asm/fpu/api.h b/arch/x86/include/asm/fpu/api.h
index a5aba4ab0224..67a4f1cb2aac 100644
--- a/arch/x86/include/asm/fpu/api.h
+++ b/arch/x86/include/asm/fpu/api.h
@@ -16,14 +16,25 @@
* Use kernel_fpu_begin/end() if you intend to use FPU in kernel context. It
* disables preemption so be careful if you intend to use it for long periods
* of time.
- * If you intend to use the FPU in softirq you need to check first with
+ * If you intend to use the FPU in irq/softirq you need to check first with
* irq_fpu_usable() if it is possible.
*/
-extern void kernel_fpu_begin(void);
+
+/* Kernel FPU states to initialize in kernel_fpu_begin_mask() */
+#define KFPU_387 _BITUL(0) /* 387 state will be initialized */
+#define KFPU_MXCSR _BITUL(1) /* MXCSR will be initialized */
+
+extern void kernel_fpu_begin_mask(unsigned int kfpu_mask);
extern void kernel_fpu_end(void);
extern bool irq_fpu_usable(void);
extern void fpregs_mark_activate(void);
+/* Code that is unaware of kernel_fpu_begin_mask() can use this */
+static inline void kernel_fpu_begin(void)
+{
+ kernel_fpu_begin_mask(KFPU_387 | KFPU_MXCSR);
+}
+
/*
* Use fpregs_lock() while editing CPU's FPU registers or fpu->state.
* A context switch will (and softirq might) save CPU's FPU registers to
diff --git a/arch/x86/include/asm/intel-family.h b/arch/x86/include/asm/intel-family.h
index 5e658ba2654a..9abe842dbd84 100644
--- a/arch/x86/include/asm/intel-family.h
+++ b/arch/x86/include/asm/intel-family.h
@@ -97,6 +97,7 @@
#define INTEL_FAM6_LAKEFIELD 0x8A
#define INTEL_FAM6_ALDERLAKE 0x97
+#define INTEL_FAM6_ALDERLAKE_L 0x9A
/* "Small Core" Processors (Atom) */
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 3ab7b46087b7..3d6616f6f6ef 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1010,9 +1010,21 @@ struct kvm_arch {
*/
bool tdp_mmu_enabled;
- /* List of struct tdp_mmu_pages being used as roots */
+ /*
+ * List of struct kvmp_mmu_pages being used as roots.
+ * All struct kvm_mmu_pages in the list should have
+ * tdp_mmu_page set.
+ * All struct kvm_mmu_pages in the list should have a positive
+ * root_count except when a thread holds the MMU lock and is removing
+ * an entry from the list.
+ */
struct list_head tdp_mmu_roots;
- /* List of struct tdp_mmu_pages not being used as roots */
+
+ /*
+ * List of struct kvmp_mmu_pages not being used as roots.
+ * All struct kvm_mmu_pages in the list should have
+ * tdp_mmu_page set and a root_count of 0.
+ */
struct list_head tdp_mmu_pages;
};
@@ -1287,6 +1299,8 @@ struct kvm_x86_ops {
void (*migrate_timers)(struct kvm_vcpu *vcpu);
void (*msr_filter_changed)(struct kvm_vcpu *vcpu);
int (*complete_emulated_msr)(struct kvm_vcpu *vcpu, int err);
+
+ void (*vcpu_deliver_sipi_vector)(struct kvm_vcpu *vcpu, u8 vector);
};
struct kvm_x86_nested_ops {
@@ -1468,6 +1482,7 @@ int kvm_fast_pio(struct kvm_vcpu *vcpu, int size, unsigned short port, int in);
int kvm_emulate_cpuid(struct kvm_vcpu *vcpu);
int kvm_emulate_halt(struct kvm_vcpu *vcpu);
int kvm_vcpu_halt(struct kvm_vcpu *vcpu);
+int kvm_emulate_ap_reset_hold(struct kvm_vcpu *vcpu);
int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu);
void kvm_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
diff --git a/arch/x86/include/asm/local64.h b/arch/x86/include/asm/local64.h
deleted file mode 100644
index 36c93b5cc239..000000000000
--- a/arch/x86/include/asm/local64.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/local64.h>
diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h
index ffc289992d1b..30f76b966857 100644
--- a/arch/x86/include/asm/mshyperv.h
+++ b/arch/x86/include/asm/mshyperv.h
@@ -74,6 +74,8 @@ static inline void hv_disable_stimer0_percpu_irq(int irq) {}
#if IS_ENABLED(CONFIG_HYPERV)
+extern int hyperv_init_cpuhp;
+
extern void *hv_hypercall_pg;
extern void __percpu **hyperv_pcpu_input_arg;
diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h
index 0b4920a7238e..e16cccdd0420 100644
--- a/arch/x86/include/asm/msr.h
+++ b/arch/x86/include/asm/msr.h
@@ -86,7 +86,7 @@ static inline void do_trace_rdpmc(unsigned int msr, u64 val, int failed) {}
* think of extending them - you will be slapped with a stinking trout or a frozen
* shark will reach you, wherever you are! You've been warned.
*/
-static inline unsigned long long notrace __rdmsr(unsigned int msr)
+static __always_inline unsigned long long __rdmsr(unsigned int msr)
{
DECLARE_ARGS(val, low, high);
@@ -98,7 +98,7 @@ static inline unsigned long long notrace __rdmsr(unsigned int msr)
return EAX_EDX_VAL(val, low, high);
}
-static inline void notrace __wrmsr(unsigned int msr, u32 low, u32 high)
+static __always_inline void __wrmsr(unsigned int msr, u32 low, u32 high)
{
asm volatile("1: wrmsr\n"
"2:\n"
diff --git a/arch/x86/include/asm/topology.h b/arch/x86/include/asm/topology.h
index 488a8e848754..9239399e5491 100644
--- a/arch/x86/include/asm/topology.h
+++ b/arch/x86/include/asm/topology.h
@@ -110,6 +110,8 @@ extern const struct cpumask *cpu_coregroup_mask(int cpu);
#define topology_die_id(cpu) (cpu_data(cpu).cpu_die_id)
#define topology_core_id(cpu) (cpu_data(cpu).cpu_core_id)
+extern unsigned int __max_die_per_package;
+
#ifdef CONFIG_SMP
#define topology_die_cpumask(cpu) (per_cpu(cpu_die_map, cpu))
#define topology_core_cpumask(cpu) (per_cpu(cpu_core_map, cpu))
@@ -118,8 +120,6 @@ extern const struct cpumask *cpu_coregroup_mask(int cpu);
extern unsigned int __max_logical_packages;
#define topology_max_packages() (__max_logical_packages)
-extern unsigned int __max_die_per_package;
-
static inline int topology_max_die_per_package(void)
{
return __max_die_per_package;
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index f8ca66f3d861..347a956f71ca 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -542,12 +542,12 @@ static void bsp_init_amd(struct cpuinfo_x86 *c)
u32 ecx;
ecx = cpuid_ecx(0x8000001e);
- nodes_per_socket = ((ecx >> 8) & 7) + 1;
+ __max_die_per_package = nodes_per_socket = ((ecx >> 8) & 7) + 1;
} else if (boot_cpu_has(X86_FEATURE_NODEID_MSR)) {
u64 value;
rdmsrl(MSR_FAM10H_NODE_ID, value);
- nodes_per_socket = ((value >> 3) & 7) + 1;
+ __max_die_per_package = nodes_per_socket = ((value >> 3) & 7) + 1;
}
if (!boot_cpu_has(X86_FEATURE_AMD_SSBD) &&
diff --git a/arch/x86/kernel/cpu/mce/core.c b/arch/x86/kernel/cpu/mce/core.c
index 13d3f1cbda17..e133ce1e562b 100644
--- a/arch/x86/kernel/cpu/mce/core.c
+++ b/arch/x86/kernel/cpu/mce/core.c
@@ -1992,10 +1992,9 @@ static __always_inline void exc_machine_check_kernel(struct pt_regs *regs)
* that out because it's an indirect call. Annotate it.
*/
instrumentation_begin();
- trace_hardirqs_off_finish();
+
machine_check_vector(regs);
- if (regs->flags & X86_EFLAGS_IF)
- trace_hardirqs_on_prepare();
+
instrumentation_end();
irqentry_nmi_exit(regs, irq_state);
}
@@ -2004,7 +2003,9 @@ static __always_inline void exc_machine_check_user(struct pt_regs *regs)
{
irqentry_enter_from_user_mode(regs);
instrumentation_begin();
+
machine_check_vector(regs);
+
instrumentation_end();
irqentry_exit_to_user_mode(regs);
}
diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c
index f628e3dc150f..43b54bef5448 100644
--- a/arch/x86/kernel/cpu/mshyperv.c
+++ b/arch/x86/kernel/cpu/mshyperv.c
@@ -135,14 +135,32 @@ static void hv_machine_shutdown(void)
{
if (kexec_in_progress && hv_kexec_handler)
hv_kexec_handler();
+
+ /*
+ * Call hv_cpu_die() on all the CPUs, otherwise later the hypervisor
+ * corrupts the old VP Assist Pages and can crash the kexec kernel.
+ */
+ if (kexec_in_progress && hyperv_init_cpuhp > 0)
+ cpuhp_remove_state(hyperv_init_cpuhp);
+
+ /* The function calls stop_other_cpus(). */
native_machine_shutdown();
+
+ /* Disable the hypercall page when there is only 1 active CPU. */
+ if (kexec_in_progress)
+ hyperv_cleanup();
}
static void hv_machine_crash_shutdown(struct pt_regs *regs)
{
if (hv_crash_handler)
hv_crash_handler(regs);
+
+ /* The function calls crash_smp_send_stop(). */
native_machine_crash_shutdown(regs);
+
+ /* Disable the hypercall page when there is only 1 active CPU. */
+ hyperv_cleanup();
}
#endif /* CONFIG_KEXEC_CORE */
#endif /* CONFIG_HYPERV */
diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c
index 23ad8e953dfb..a29997e6cf9e 100644
--- a/arch/x86/kernel/cpu/mtrr/generic.c
+++ b/arch/x86/kernel/cpu/mtrr/generic.c
@@ -167,9 +167,6 @@ static u8 mtrr_type_lookup_variable(u64 start, u64 end, u64 *partial_end,
*repeat = 0;
*uniform = 1;
- /* Make end inclusive instead of exclusive */
- end--;
-
prev_match = MTRR_TYPE_INVALID;
for (i = 0; i < num_var_ranges; ++i) {
unsigned short start_state, end_state, inclusive;
@@ -261,6 +258,9 @@ u8 mtrr_type_lookup(u64 start, u64 end, u8 *uniform)
int repeat;
u64 partial_end;
+ /* Make end inclusive instead of exclusive */
+ end--;
+
if (!mtrr_state_set)
return MTRR_TYPE_INVALID;
diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
index 29ffb95b25ff..460f3e0df106 100644
--- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c
+++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
@@ -525,89 +525,70 @@ static void rdtgroup_remove(struct rdtgroup *rdtgrp)
kfree(rdtgrp);
}
-struct task_move_callback {
- struct callback_head work;
- struct rdtgroup *rdtgrp;
-};
-
-static void move_myself(struct callback_head *head)
+static void _update_task_closid_rmid(void *task)
{
- struct task_move_callback *callback;
- struct rdtgroup *rdtgrp;
-
- callback = container_of(head, struct task_move_callback, work);
- rdtgrp = callback->rdtgrp;
-
/*
- * If resource group was deleted before this task work callback
- * was invoked, then assign the task to root group and free the
- * resource group.
+ * If the task is still current on this CPU, update PQR_ASSOC MSR.
+ * Otherwise, the MSR is updated when the task is scheduled in.
*/
- if (atomic_dec_and_test(&rdtgrp->waitcount) &&
- (rdtgrp->flags & RDT_DELETED)) {
- current->closid = 0;
- current->rmid = 0;
- rdtgroup_remove(rdtgrp);
- }
-
- if (unlikely(current->flags & PF_EXITING))
- goto out;
-
- preempt_disable();
- /* update PQR_ASSOC MSR to make resource group go into effect */
- resctrl_sched_in();
- preempt_enable();
+ if (task == current)
+ resctrl_sched_in();
+}
-out:
- kfree(callback);
+static void update_task_closid_rmid(struct task_struct *t)
+{
+ if (IS_ENABLED(CONFIG_SMP) && task_curr(t))
+ smp_call_function_single(task_cpu(t), _update_task_closid_rmid, t, 1);
+ else
+ _update_task_closid_rmid(t);
}
static int __rdtgroup_move_task(struct task_struct *tsk,
struct rdtgroup *rdtgrp)
{
- struct task_move_callback *callback;
- int ret;
-
- callback = kzalloc(sizeof(*callback), GFP_KERNEL);
- if (!callback)
- return -ENOMEM;
- callback->work.func = move_myself;
- callback->rdtgrp = rdtgrp;
+ /* If the task is already in rdtgrp, no need to move the task. */
+ if ((rdtgrp->type == RDTCTRL_GROUP && tsk->closid == rdtgrp->closid &&
+ tsk->rmid == rdtgrp->mon.rmid) ||
+ (rdtgrp->type == RDTMON_GROUP && tsk->rmid == rdtgrp->mon.rmid &&
+ tsk->closid == rdtgrp->mon.parent->closid))
+ return 0;
/*
- * Take a refcount, so rdtgrp cannot be freed before the
- * callback has been invoked.
+ * Set the task's closid/rmid before the PQR_ASSOC MSR can be
+ * updated by them.
+ *
+ * For ctrl_mon groups, move both closid and rmid.
+ * For monitor groups, can move the tasks only from
+ * their parent CTRL group.
*/
- atomic_inc(&rdtgrp->waitcount);
- ret = task_work_add(tsk, &callback->work, TWA_RESUME);
- if (ret) {
- /*
- * Task is exiting. Drop the refcount and free the callback.
- * No need to check the refcount as the group cannot be
- * deleted before the write function unlocks rdtgroup_mutex.
- */
- atomic_dec(&rdtgrp->waitcount);
- kfree(callback);
- rdt_last_cmd_puts("Task exited\n");
- } else {
- /*
- * For ctrl_mon groups move both closid and rmid.
- * For monitor groups, can move the tasks only from
- * their parent CTRL group.
- */
- if (rdtgrp->type == RDTCTRL_GROUP) {
- tsk->closid = rdtgrp->closid;
+
+ if (rdtgrp->type == RDTCTRL_GROUP) {
+ tsk->closid = rdtgrp->closid;
+ tsk->rmid = rdtgrp->mon.rmid;
+ } else if (rdtgrp->type == RDTMON_GROUP) {
+ if (rdtgrp->mon.parent->closid == tsk->closid) {
tsk->rmid = rdtgrp->mon.rmid;
- } else if (rdtgrp->type == RDTMON_GROUP) {
- if (rdtgrp->mon.parent->closid == tsk->closid) {
- tsk->rmid = rdtgrp->mon.rmid;
- } else {
- rdt_last_cmd_puts("Can't move task to different control group\n");
- ret = -EINVAL;
- }
+ } else {
+ rdt_last_cmd_puts("Can't move task to different control group\n");
+ return -EINVAL;
}
}
- return ret;
+
+ /*
+ * Ensure the task's closid and rmid are written before determining if
+ * the task is current that will decide if it will be interrupted.
+ */
+ barrier();
+
+ /*
+ * By now, the task's closid and rmid are set. If the task is current
+ * on a CPU, the PQR_ASSOC MSR needs to be updated to make the resource
+ * group go into effect. If the task is not current, the MSR will be
+ * updated when the task is scheduled in.
+ */
+ update_task_closid_rmid(tsk);
+
+ return 0;
}
static bool is_closid_match(struct task_struct *t, struct rdtgroup *r)
diff --git a/arch/x86/kernel/cpu/topology.c b/arch/x86/kernel/cpu/topology.c
index 1068002c8532..8678864ce712 100644
--- a/arch/x86/kernel/cpu/topology.c
+++ b/arch/x86/kernel/cpu/topology.c
@@ -25,10 +25,10 @@
#define BITS_SHIFT_NEXT_LEVEL(eax) ((eax) & 0x1f)
#define LEVEL_MAX_SIBLINGS(ebx) ((ebx) & 0xffff)
-#ifdef CONFIG_SMP
unsigned int __max_die_per_package __read_mostly = 1;
EXPORT_SYMBOL(__max_die_per_package);
+#ifdef CONFIG_SMP
/*
* Check if given CPUID extended toplogy "leaf" is implemented
*/
diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c
index eb86a2b831b1..571220ac8bea 100644
--- a/arch/x86/kernel/fpu/core.c
+++ b/arch/x86/kernel/fpu/core.c
@@ -121,7 +121,7 @@ int copy_fpregs_to_fpstate(struct fpu *fpu)
}
EXPORT_SYMBOL(copy_fpregs_to_fpstate);
-void kernel_fpu_begin(void)
+void kernel_fpu_begin_mask(unsigned int kfpu_mask)
{
preempt_disable();
@@ -141,13 +141,14 @@ void kernel_fpu_begin(void)
}
__cpu_invalidate_fpregs_state();
- if (boot_cpu_has(X86_FEATURE_XMM))
+ /* Put sane initial values into the control registers. */
+ if (likely(kfpu_mask & KFPU_MXCSR) && boot_cpu_has(X86_FEATURE_XMM))
ldmxcsr(MXCSR_DEFAULT);
- if (boot_cpu_has(X86_FEATURE_FPU))
+ if (unlikely(kfpu_mask & KFPU_387) && boot_cpu_has(X86_FEATURE_FPU))
asm volatile ("fninit");
}
-EXPORT_SYMBOL_GPL(kernel_fpu_begin);
+EXPORT_SYMBOL_GPL(kernel_fpu_begin_mask);
void kernel_fpu_end(void)
{
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 740f3bdb3f61..3412c4595efd 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -661,17 +661,6 @@ static void __init trim_platform_memory_ranges(void)
static void __init trim_bios_range(void)
{
/*
- * A special case is the first 4Kb of memory;
- * This is a BIOS owned area, not kernel ram, but generally
- * not listed as such in the E820 table.
- *
- * This typically reserves additional memory (64KiB by default)
- * since some BIOSes are known to corrupt low memory. See the
- * Kconfig help text for X86_RESERVE_LOW.
- */
- e820__range_update(0, PAGE_SIZE, E820_TYPE_RAM, E820_TYPE_RESERVED);
-
- /*
* special case: Some BIOSes report the PC BIOS
* area (640Kb -> 1Mb) as RAM even though it is not.
* take them out.
@@ -728,6 +717,15 @@ early_param("reservelow", parse_reservelow);
static void __init trim_low_memory_range(void)
{
+ /*
+ * A special case is the first 4Kb of memory;
+ * This is a BIOS owned area, not kernel ram, but generally
+ * not listed as such in the E820 table.
+ *
+ * This typically reserves additional memory (64KiB by default)
+ * since some BIOSes are known to corrupt low memory. See the
+ * Kconfig help text for X86_RESERVE_LOW.
+ */
memblock_reserve(0, ALIGN(reserve_low, PAGE_SIZE));
}
diff --git a/arch/x86/kernel/sev-es-shared.c b/arch/x86/kernel/sev-es-shared.c
index 7d04b356d44d..cdc04d091242 100644
--- a/arch/x86/kernel/sev-es-shared.c
+++ b/arch/x86/kernel/sev-es-shared.c
@@ -305,14 +305,14 @@ static enum es_result vc_ioio_exitinfo(struct es_em_ctxt *ctxt, u64 *exitinfo)
case 0xe4:
case 0xe5:
*exitinfo |= IOIO_TYPE_IN;
- *exitinfo |= (u64)insn->immediate.value << 16;
+ *exitinfo |= (u8)insn->immediate.value << 16;
break;
/* OUT immediate opcodes */
case 0xe6:
case 0xe7:
*exitinfo |= IOIO_TYPE_OUT;
- *exitinfo |= (u64)insn->immediate.value << 16;
+ *exitinfo |= (u8)insn->immediate.value << 16;
break;
/* IN register opcodes */
diff --git a/arch/x86/kernel/sev-es.c b/arch/x86/kernel/sev-es.c
index 0bd1a0fc587e..84c1821819af 100644
--- a/arch/x86/kernel/sev-es.c
+++ b/arch/x86/kernel/sev-es.c
@@ -225,7 +225,7 @@ static inline u64 sev_es_rd_ghcb_msr(void)
return __rdmsr(MSR_AMD64_SEV_ES_GHCB);
}
-static inline void sev_es_wr_ghcb_msr(u64 val)
+static __always_inline void sev_es_wr_ghcb_msr(u64 val)
{
u32 low, high;
@@ -286,6 +286,12 @@ static enum es_result vc_write_mem(struct es_em_ctxt *ctxt,
u16 d2;
u8 d1;
+ /* If instruction ran in kernel mode and the I/O buffer is in kernel space */
+ if (!user_mode(ctxt->regs) && !access_ok(target, size)) {
+ memcpy(dst, buf, size);
+ return ES_OK;
+ }
+
switch (size) {
case 1:
memcpy(&d1, buf, 1);
@@ -335,6 +341,12 @@ static enum es_result vc_read_mem(struct es_em_ctxt *ctxt,
u16 d2;
u8 d1;
+ /* If instruction ran in kernel mode and the I/O buffer is in kernel space */
+ if (!user_mode(ctxt->regs) && !access_ok(s, size)) {
+ memcpy(buf, src, size);
+ return ES_OK;
+ }
+
switch (size) {
case 1:
if (get_user(d1, s))
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 8ca66af96a54..117e24fbfd8a 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -56,6 +56,7 @@
#include <linux/numa.h>
#include <linux/pgtable.h>
#include <linux/overflow.h>
+#include <linux/syscore_ops.h>
#include <asm/acpi.h>
#include <asm/desc.h>
@@ -2083,6 +2084,23 @@ static void init_counter_refs(void)
this_cpu_write(arch_prev_mperf, mperf);
}
+#ifdef CONFIG_PM_SLEEP
+static struct syscore_ops freq_invariance_syscore_ops = {
+ .resume = init_counter_refs,
+};
+
+static void register_freq_invariance_syscore_ops(void)
+{
+ /* Bail out if registered already. */
+ if (freq_invariance_syscore_ops.node.prev)
+ return;
+
+ register_syscore_ops(&freq_invariance_syscore_ops);
+}
+#else
+static inline void register_freq_invariance_syscore_ops(void) {}
+#endif
+
static void init_freq_invariance(bool secondary, bool cppc_ready)
{
bool ret = false;
@@ -2109,6 +2127,7 @@ static void init_freq_invariance(bool secondary, bool cppc_ready)
if (ret) {
init_counter_refs();
static_branch_enable(&arch_scale_freq_key);
+ register_freq_invariance_syscore_ops();
pr_info("Estimated ratio of average max frequency by base frequency (times 1024): %llu\n", arch_max_freq_ratio);
} else {
pr_debug("Couldn't determine max cpu frequency, necessary for scale-invariant accounting.\n");
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 3136e05831cf..43cceadd073e 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -674,7 +674,7 @@ static bool pv_eoi_get_pending(struct kvm_vcpu *vcpu)
(unsigned long long)vcpu->arch.pv_eoi.msr_val);
return false;
}
- return val & 0x1;
+ return val & KVM_PV_EOI_ENABLED;
}
static void pv_eoi_set_pending(struct kvm_vcpu *vcpu)
@@ -2898,7 +2898,7 @@ void kvm_apic_accept_events(struct kvm_vcpu *vcpu)
/* evaluate pending_events before reading the vector */
smp_rmb();
sipi_vector = apic->sipi_vector;
- kvm_vcpu_deliver_sipi_vector(vcpu, sipi_vector);
+ kvm_x86_ops.vcpu_deliver_sipi_vector(vcpu, sipi_vector);
vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
}
}
diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
index 9c4a9c8e43d9..581925e476d6 100644
--- a/arch/x86/kvm/mmu.h
+++ b/arch/x86/kvm/mmu.h
@@ -49,7 +49,7 @@ static inline u64 rsvd_bits(int s, int e)
if (e < s)
return 0;
- return ((1ULL << (e - s + 1)) - 1) << s;
+ return ((2ULL << (e - s)) - 1) << s;
}
void kvm_mmu_set_mmio_spte_mask(u64 mmio_value, u64 access_mask);
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index c478904af518..6d16481aa29d 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -3493,26 +3493,25 @@ static bool mmio_info_in_cache(struct kvm_vcpu *vcpu, u64 addr, bool direct)
* Return the level of the lowest level SPTE added to sptes.
* That SPTE may be non-present.
*/
-static int get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes)
+static int get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes, int *root_level)
{
struct kvm_shadow_walk_iterator iterator;
- int leaf = vcpu->arch.mmu->root_level;
+ int leaf = -1;
u64 spte;
-
walk_shadow_page_lockless_begin(vcpu);
- for (shadow_walk_init(&iterator, vcpu, addr);
+ for (shadow_walk_init(&iterator, vcpu, addr),
+ *root_level = iterator.level;
shadow_walk_okay(&iterator);
__shadow_walk_next(&iterator, spte)) {
leaf = iterator.level;
spte = mmu_spte_get_lockless(iterator.sptep);
- sptes[leaf - 1] = spte;
+ sptes[leaf] = spte;
if (!is_shadow_present_pte(spte))
break;
-
}
walk_shadow_page_lockless_end(vcpu);
@@ -3520,14 +3519,12 @@ static int get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes)
return leaf;
}
-/* return true if reserved bit is detected on spte. */
+/* return true if reserved bit(s) are detected on a valid, non-MMIO SPTE. */
static bool get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep)
{
- u64 sptes[PT64_ROOT_MAX_LEVEL];
+ u64 sptes[PT64_ROOT_MAX_LEVEL + 1];
struct rsvd_bits_validate *rsvd_check;
- int root = vcpu->arch.mmu->shadow_root_level;
- int leaf;
- int level;
+ int root, leaf, level;
bool reserved = false;
if (!VALID_PAGE(vcpu->arch.mmu->root_hpa)) {
@@ -3536,35 +3533,45 @@ static bool get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep)
}
if (is_tdp_mmu_root(vcpu->kvm, vcpu->arch.mmu->root_hpa))
- leaf = kvm_tdp_mmu_get_walk(vcpu, addr, sptes);
+ leaf = kvm_tdp_mmu_get_walk(vcpu, addr, sptes, &root);
else
- leaf = get_walk(vcpu, addr, sptes);
+ leaf = get_walk(vcpu, addr, sptes, &root);
+
+ if (unlikely(leaf < 0)) {
+ *sptep = 0ull;
+ return reserved;
+ }
+
+ *sptep = sptes[leaf];
+
+ /*
+ * Skip reserved bits checks on the terminal leaf if it's not a valid
+ * SPTE. Note, this also (intentionally) skips MMIO SPTEs, which, by
+ * design, always have reserved bits set. The purpose of the checks is
+ * to detect reserved bits on non-MMIO SPTEs. i.e. buggy SPTEs.
+ */
+ if (!is_shadow_present_pte(sptes[leaf]))
+ leaf++;
rsvd_check = &vcpu->arch.mmu->shadow_zero_check;
- for (level = root; level >= leaf; level--) {
- if (!is_shadow_present_pte(sptes[level - 1]))
- break;
+ for (level = root; level >= leaf; level--)
/*
* Use a bitwise-OR instead of a logical-OR to aggregate the
* reserved bit and EPT's invalid memtype/XWR checks to avoid
* adding a Jcc in the loop.
*/
- reserved |= __is_bad_mt_xwr(rsvd_check, sptes[level - 1]) |
- __is_rsvd_bits_set(rsvd_check, sptes[level - 1],
- level);
- }
+ reserved |= __is_bad_mt_xwr(rsvd_check, sptes[level]) |
+ __is_rsvd_bits_set(rsvd_check, sptes[level], level);
if (reserved) {
pr_err("%s: detect reserved bits on spte, addr 0x%llx, dump hierarchy:\n",
__func__, addr);
for (level = root; level >= leaf; level--)
pr_err("------ spte 0x%llx level %d.\n",
- sptes[level - 1], level);
+ sptes[level], level);
}
- *sptep = sptes[leaf - 1];
-
return reserved;
}
diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
index 4bd2f1dc0172..2ef8615f9dba 100644
--- a/arch/x86/kvm/mmu/tdp_mmu.c
+++ b/arch/x86/kvm/mmu/tdp_mmu.c
@@ -44,7 +44,48 @@ void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm)
WARN_ON(!list_empty(&kvm->arch.tdp_mmu_roots));
}
-#define for_each_tdp_mmu_root(_kvm, _root) \
+static void tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root)
+{
+ if (kvm_mmu_put_root(kvm, root))
+ kvm_tdp_mmu_free_root(kvm, root);
+}
+
+static inline bool tdp_mmu_next_root_valid(struct kvm *kvm,
+ struct kvm_mmu_page *root)
+{
+ lockdep_assert_held(&kvm->mmu_lock);
+
+ if (list_entry_is_head(root, &kvm->arch.tdp_mmu_roots, link))
+ return false;
+
+ kvm_mmu_get_root(kvm, root);
+ return true;
+
+}
+
+static inline struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
+ struct kvm_mmu_page *root)
+{
+ struct kvm_mmu_page *next_root;
+
+ next_root = list_next_entry(root, link);
+ tdp_mmu_put_root(kvm, root);
+ return next_root;
+}
+
+/*
+ * Note: this iterator gets and puts references to the roots it iterates over.
+ * This makes it safe to release the MMU lock and yield within the loop, but
+ * if exiting the loop early, the caller must drop the reference to the most
+ * recent root. (Unless keeping a live reference is desirable.)
+ */
+#define for_each_tdp_mmu_root_yield_safe(_kvm, _root) \
+ for (_root = list_first_entry(&_kvm->arch.tdp_mmu_roots, \
+ typeof(*_root), link); \
+ tdp_mmu_next_root_valid(_kvm, _root); \
+ _root = tdp_mmu_next_root(_kvm, _root))
+
+#define for_each_tdp_mmu_root(_kvm, _root) \
list_for_each_entry(_root, &_kvm->arch.tdp_mmu_roots, link)
bool is_tdp_mmu_root(struct kvm *kvm, hpa_t hpa)
@@ -447,18 +488,9 @@ bool kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, gfn_t start, gfn_t end)
struct kvm_mmu_page *root;
bool flush = false;
- for_each_tdp_mmu_root(kvm, root) {
- /*
- * Take a reference on the root so that it cannot be freed if
- * this thread releases the MMU lock and yields in this loop.
- */
- kvm_mmu_get_root(kvm, root);
-
+ for_each_tdp_mmu_root_yield_safe(kvm, root)
flush |= zap_gfn_range(kvm, root, start, end, true);
- kvm_mmu_put_root(kvm, root);
- }
-
return flush;
}
@@ -619,13 +651,7 @@ static int kvm_tdp_mmu_handle_hva_range(struct kvm *kvm, unsigned long start,
int ret = 0;
int as_id;
- for_each_tdp_mmu_root(kvm, root) {
- /*
- * Take a reference on the root so that it cannot be freed if
- * this thread releases the MMU lock and yields in this loop.
- */
- kvm_mmu_get_root(kvm, root);
-
+ for_each_tdp_mmu_root_yield_safe(kvm, root) {
as_id = kvm_mmu_page_as_id(root);
slots = __kvm_memslots(kvm, as_id);
kvm_for_each_memslot(memslot, slots) {
@@ -647,8 +673,6 @@ static int kvm_tdp_mmu_handle_hva_range(struct kvm *kvm, unsigned long start,
ret |= handler(kvm, memslot, root, gfn_start,
gfn_end, data);
}
-
- kvm_mmu_put_root(kvm, root);
}
return ret;
@@ -838,21 +862,13 @@ bool kvm_tdp_mmu_wrprot_slot(struct kvm *kvm, struct kvm_memory_slot *slot,
int root_as_id;
bool spte_set = false;
- for_each_tdp_mmu_root(kvm, root) {
+ for_each_tdp_mmu_root_yield_safe(kvm, root) {
root_as_id = kvm_mmu_page_as_id(root);
if (root_as_id != slot->as_id)
continue;
- /*
- * Take a reference on the root so that it cannot be freed if
- * this thread releases the MMU lock and yields in this loop.
- */
- kvm_mmu_get_root(kvm, root);
-
spte_set |= wrprot_gfn_range(kvm, root, slot->base_gfn,
slot->base_gfn + slot->npages, min_level);
-
- kvm_mmu_put_root(kvm, root);
}
return spte_set;
@@ -906,21 +922,13 @@ bool kvm_tdp_mmu_clear_dirty_slot(struct kvm *kvm, struct kvm_memory_slot *slot)
int root_as_id;
bool spte_set = false;
- for_each_tdp_mmu_root(kvm, root) {
+ for_each_tdp_mmu_root_yield_safe(kvm, root) {
root_as_id = kvm_mmu_page_as_id(root);
if (root_as_id != slot->as_id)
continue;
- /*
- * Take a reference on the root so that it cannot be freed if
- * this thread releases the MMU lock and yields in this loop.
- */
- kvm_mmu_get_root(kvm, root);
-
spte_set |= clear_dirty_gfn_range(kvm, root, slot->base_gfn,
slot->base_gfn + slot->npages);
-
- kvm_mmu_put_root(kvm, root);
}
return spte_set;
@@ -1029,21 +1037,13 @@ bool kvm_tdp_mmu_slot_set_dirty(struct kvm *kvm, struct kvm_memory_slot *slot)
int root_as_id;
bool spte_set = false;
- for_each_tdp_mmu_root(kvm, root) {
+ for_each_tdp_mmu_root_yield_safe(kvm, root) {
root_as_id = kvm_mmu_page_as_id(root);
if (root_as_id != slot->as_id)
continue;
- /*
- * Take a reference on the root so that it cannot be freed if
- * this thread releases the MMU lock and yields in this loop.
- */
- kvm_mmu_get_root(kvm, root);
-
spte_set |= set_dirty_gfn_range(kvm, root, slot->base_gfn,
slot->base_gfn + slot->npages);
-
- kvm_mmu_put_root(kvm, root);
}
return spte_set;
}
@@ -1089,21 +1089,13 @@ void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
struct kvm_mmu_page *root;
int root_as_id;
- for_each_tdp_mmu_root(kvm, root) {
+ for_each_tdp_mmu_root_yield_safe(kvm, root) {
root_as_id = kvm_mmu_page_as_id(root);
if (root_as_id != slot->as_id)
continue;
- /*
- * Take a reference on the root so that it cannot be freed if
- * this thread releases the MMU lock and yields in this loop.
- */
- kvm_mmu_get_root(kvm, root);
-
zap_collapsible_spte_range(kvm, root, slot->base_gfn,
slot->base_gfn + slot->npages);
-
- kvm_mmu_put_root(kvm, root);
}
}
@@ -1160,16 +1152,19 @@ bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm,
* Return the level of the lowest level SPTE added to sptes.
* That SPTE may be non-present.
*/
-int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes)
+int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes,
+ int *root_level)
{
struct tdp_iter iter;
struct kvm_mmu *mmu = vcpu->arch.mmu;
- int leaf = vcpu->arch.mmu->shadow_root_level;
gfn_t gfn = addr >> PAGE_SHIFT;
+ int leaf = -1;
+
+ *root_level = vcpu->arch.mmu->shadow_root_level;
tdp_mmu_for_each_pte(iter, mmu, gfn, gfn + 1) {
leaf = iter.level;
- sptes[leaf - 1] = iter.old_spte;
+ sptes[leaf] = iter.old_spte;
}
return leaf;
diff --git a/arch/x86/kvm/mmu/tdp_mmu.h b/arch/x86/kvm/mmu/tdp_mmu.h
index 556e065503f6..cbbdbadd1526 100644
--- a/arch/x86/kvm/mmu/tdp_mmu.h
+++ b/arch/x86/kvm/mmu/tdp_mmu.h
@@ -44,5 +44,7 @@ void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm,
struct kvm_memory_slot *slot, gfn_t gfn);
-int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes);
+int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes,
+ int *root_level);
+
#endif /* __KVM_X86_MMU_TDP_MMU_H */
diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
index b0b667456b2e..cb4c6ee10029 100644
--- a/arch/x86/kvm/svm/nested.c
+++ b/arch/x86/kvm/svm/nested.c
@@ -199,6 +199,7 @@ static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm)
static bool svm_get_nested_state_pages(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
+
if (!nested_svm_vmrun_msrpm(svm)) {
vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
vcpu->run->internal.suberror =
@@ -595,6 +596,8 @@ int nested_svm_vmexit(struct vcpu_svm *svm)
svm->nested.vmcb12_gpa = 0;
WARN_ON_ONCE(svm->nested.nested_run_pending);
+ kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES, &svm->vcpu);
+
/* in case we halted in L2 */
svm->vcpu.arch.mp_state = KVM_MP_STATE_RUNNABLE;
@@ -754,6 +757,7 @@ void svm_leave_nested(struct vcpu_svm *svm)
leave_guest_mode(&svm->vcpu);
copy_vmcb_control_area(&vmcb->control, &hsave->control);
nested_svm_uninit_mmu_context(&svm->vcpu);
+ vmcb_mark_all_dirty(svm->vmcb);
}
kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES, &svm->vcpu);
@@ -1194,6 +1198,10 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu,
* in the registers, the save area of the nested state instead
* contains saved L1 state.
*/
+
+ svm->nested.nested_run_pending =
+ !!(kvm_state->flags & KVM_STATE_NESTED_RUN_PENDING);
+
copy_vmcb_control_area(&hsave->control, &svm->vmcb->control);
hsave->save = *save;
diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
index 9858d5ae9ddd..c8ffdbc81709 100644
--- a/arch/x86/kvm/svm/sev.c
+++ b/arch/x86/kvm/svm/sev.c
@@ -1563,6 +1563,7 @@ static int sev_es_validate_vmgexit(struct vcpu_svm *svm)
goto vmgexit_err;
break;
case SVM_VMGEXIT_NMI_COMPLETE:
+ case SVM_VMGEXIT_AP_HLT_LOOP:
case SVM_VMGEXIT_AP_JUMP_TABLE:
case SVM_VMGEXIT_UNSUPPORTED_EVENT:
break;
@@ -1888,6 +1889,9 @@ int sev_handle_vmgexit(struct vcpu_svm *svm)
case SVM_VMGEXIT_NMI_COMPLETE:
ret = svm_invoke_exit_handler(svm, SVM_EXIT_IRET);
break;
+ case SVM_VMGEXIT_AP_HLT_LOOP:
+ ret = kvm_emulate_ap_reset_hold(&svm->vcpu);
+ break;
case SVM_VMGEXIT_AP_JUMP_TABLE: {
struct kvm_sev_info *sev = &to_kvm_svm(svm->vcpu.kvm)->sev_info;
@@ -2001,7 +2005,7 @@ void sev_es_vcpu_load(struct vcpu_svm *svm, int cpu)
* of which one step is to perform a VMLOAD. Since hardware does not
* perform a VMSAVE on VMRUN, the host savearea must be updated.
*/
- asm volatile(__ex("vmsave") : : "a" (__sme_page_pa(sd->save_area)) : "memory");
+ asm volatile(__ex("vmsave %0") : : "a" (__sme_page_pa(sd->save_area)) : "memory");
/*
* Certain MSRs are restored on VMEXIT, only save ones that aren't
@@ -2040,3 +2044,21 @@ void sev_es_vcpu_put(struct vcpu_svm *svm)
wrmsrl(host_save_user_msrs[i].index, svm->host_user_msrs[i]);
}
}
+
+void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector)
+{
+ struct vcpu_svm *svm = to_svm(vcpu);
+
+ /* First SIPI: Use the values as initially set by the VMM */
+ if (!svm->received_first_sipi) {
+ svm->received_first_sipi = true;
+ return;
+ }
+
+ /*
+ * Subsequent SIPI: Return from an AP Reset Hold VMGEXIT, where
+ * the guest will set the CS and RIP. Set SW_EXIT_INFO_2 to a
+ * non-zero value.
+ */
+ ghcb_set_sw_exit_info_2(svm->ghcb, 1);
+}
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index cce0143a6f80..7ef171790d02 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -3677,8 +3677,6 @@ static fastpath_t svm_exit_handlers_fastpath(struct kvm_vcpu *vcpu)
return EXIT_FASTPATH_NONE;
}
-void __svm_vcpu_run(unsigned long vmcb_pa, unsigned long *regs);
-
static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu,
struct vcpu_svm *svm)
{
@@ -4384,6 +4382,14 @@ static bool svm_apic_init_signal_blocked(struct kvm_vcpu *vcpu)
(vmcb_is_intercept(&svm->vmcb->control, INTERCEPT_INIT));
}
+static void svm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector)
+{
+ if (!sev_es_guest(vcpu->kvm))
+ return kvm_vcpu_deliver_sipi_vector(vcpu, vector);
+
+ sev_vcpu_deliver_sipi_vector(vcpu, vector);
+}
+
static void svm_vm_destroy(struct kvm *kvm)
{
avic_vm_destroy(kvm);
@@ -4526,6 +4532,8 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
.msr_filter_changed = svm_msr_filter_changed,
.complete_emulated_msr = svm_complete_emulated_msr,
+
+ .vcpu_deliver_sipi_vector = svm_vcpu_deliver_sipi_vector,
};
static struct kvm_x86_init_ops svm_init_ops __initdata = {
diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
index 5431e6335e2e..0fe874ae5498 100644
--- a/arch/x86/kvm/svm/svm.h
+++ b/arch/x86/kvm/svm/svm.h
@@ -185,6 +185,7 @@ struct vcpu_svm {
struct vmcb_save_area *vmsa;
struct ghcb *ghcb;
struct kvm_host_map ghcb_map;
+ bool received_first_sipi;
/* SEV-ES scratch area support */
void *ghcb_sa;
@@ -591,6 +592,7 @@ void sev_es_init_vmcb(struct vcpu_svm *svm);
void sev_es_create_vcpu(struct vcpu_svm *svm);
void sev_es_vcpu_load(struct vcpu_svm *svm, int cpu);
void sev_es_vcpu_put(struct vcpu_svm *svm);
+void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector);
/* vmenter.S */
diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
index e2f26564a12d..0fbb46990dfc 100644
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -4442,6 +4442,8 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason,
/* trying to cancel vmlaunch/vmresume is a bug */
WARN_ON_ONCE(vmx->nested.nested_run_pending);
+ kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
+
/* Service the TLB flush request for L2 before switching to L1. */
if (kvm_check_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu))
kvm_vcpu_flush_tlb_current(vcpu);
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 75c9c6a0a3a4..2af05d3b0590 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -7707,6 +7707,8 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = {
.msr_filter_changed = vmx_msr_filter_changed,
.complete_emulated_msr = kvm_complete_insn_gp,
.cpu_dirty_log_size = vmx_cpu_dirty_log_size,
+
+ .vcpu_deliver_sipi_vector = kvm_vcpu_deliver_sipi_vector,
};
static __init int hardware_setup(void)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 3f7c1fc7a3ce..9a8969a6dd06 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -7976,17 +7976,22 @@ void kvm_arch_exit(void)
kmem_cache_destroy(x86_fpu_cache);
}
-int kvm_vcpu_halt(struct kvm_vcpu *vcpu)
+static int __kvm_vcpu_halt(struct kvm_vcpu *vcpu, int state, int reason)
{
++vcpu->stat.halt_exits;
if (lapic_in_kernel(vcpu)) {
- vcpu->arch.mp_state = KVM_MP_STATE_HALTED;
+ vcpu->arch.mp_state = state;
return 1;
} else {
- vcpu->run->exit_reason = KVM_EXIT_HLT;
+ vcpu->run->exit_reason = reason;
return 0;
}
}
+
+int kvm_vcpu_halt(struct kvm_vcpu *vcpu)
+{
+ return __kvm_vcpu_halt(vcpu, KVM_MP_STATE_HALTED, KVM_EXIT_HLT);
+}
EXPORT_SYMBOL_GPL(kvm_vcpu_halt);
int kvm_emulate_halt(struct kvm_vcpu *vcpu)
@@ -8000,6 +8005,14 @@ int kvm_emulate_halt(struct kvm_vcpu *vcpu)
}
EXPORT_SYMBOL_GPL(kvm_emulate_halt);
+int kvm_emulate_ap_reset_hold(struct kvm_vcpu *vcpu)
+{
+ int ret = kvm_skip_emulated_instruction(vcpu);
+
+ return __kvm_vcpu_halt(vcpu, KVM_MP_STATE_AP_RESET_HOLD, KVM_EXIT_AP_RESET_HOLD) && ret;
+}
+EXPORT_SYMBOL_GPL(kvm_emulate_ap_reset_hold);
+
#ifdef CONFIG_X86_64
static int kvm_pv_clock_pairing(struct kvm_vcpu *vcpu, gpa_t paddr,
unsigned long clock_type)
@@ -8789,7 +8802,9 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
if (kvm_request_pending(vcpu)) {
if (kvm_check_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu)) {
- if (unlikely(!kvm_x86_ops.nested_ops->get_nested_state_pages(vcpu))) {
+ if (WARN_ON_ONCE(!is_guest_mode(vcpu)))
+ ;
+ else if (unlikely(!kvm_x86_ops.nested_ops->get_nested_state_pages(vcpu))) {
r = 0;
goto out;
}
@@ -9094,6 +9109,7 @@ static inline int vcpu_block(struct kvm *kvm, struct kvm_vcpu *vcpu)
kvm_apic_accept_events(vcpu);
switch(vcpu->arch.mp_state) {
case KVM_MP_STATE_HALTED:
+ case KVM_MP_STATE_AP_RESET_HOLD:
vcpu->arch.pv.pv_unhalted = false;
vcpu->arch.mp_state =
KVM_MP_STATE_RUNNABLE;
@@ -9520,8 +9536,9 @@ int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
kvm_load_guest_fpu(vcpu);
kvm_apic_accept_events(vcpu);
- if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED &&
- vcpu->arch.pv.pv_unhalted)
+ if ((vcpu->arch.mp_state == KVM_MP_STATE_HALTED ||
+ vcpu->arch.mp_state == KVM_MP_STATE_AP_RESET_HOLD) &&
+ vcpu->arch.pv.pv_unhalted)
mp_state->mp_state = KVM_MP_STATE_RUNNABLE;
else
mp_state->mp_state = vcpu->arch.mp_state;
@@ -10152,6 +10169,7 @@ void kvm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector)
kvm_set_segment(vcpu, &cs, VCPU_SREG_CS);
kvm_rip_write(vcpu, 0);
}
+EXPORT_SYMBOL_GPL(kvm_vcpu_deliver_sipi_vector);
int kvm_arch_hardware_enable(void)
{
diff --git a/arch/x86/lib/mmx_32.c b/arch/x86/lib/mmx_32.c
index 4321fa02e18d..419365c48b2a 100644
--- a/arch/x86/lib/mmx_32.c
+++ b/arch/x86/lib/mmx_32.c
@@ -26,6 +26,16 @@
#include <asm/fpu/api.h>
#include <asm/asm.h>
+/*
+ * Use KFPU_387. MMX instructions are not affected by MXCSR,
+ * but both AMD and Intel documentation states that even integer MMX
+ * operations will result in #MF if an exception is pending in FCW.
+ *
+ * EMMS is not needed afterwards because, after calling kernel_fpu_end(),
+ * any subsequent user of the 387 stack will reinitialize it using
+ * KFPU_387.
+ */
+
void *_mmx_memcpy(void *to, const void *from, size_t len)
{
void *p;
@@ -37,7 +47,7 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
p = to;
i = len >> 6; /* len/64 */
- kernel_fpu_begin();
+ kernel_fpu_begin_mask(KFPU_387);
__asm__ __volatile__ (
"1: prefetch (%0)\n" /* This set is 28 bytes */
@@ -127,7 +137,7 @@ static void fast_clear_page(void *page)
{
int i;
- kernel_fpu_begin();
+ kernel_fpu_begin_mask(KFPU_387);
__asm__ __volatile__ (
" pxor %%mm0, %%mm0\n" : :
@@ -160,7 +170,7 @@ static void fast_copy_page(void *to, void *from)
{
int i;
- kernel_fpu_begin();
+ kernel_fpu_begin_mask(KFPU_387);
/*
* maybe the prefetch stuff can go before the expensive fnsave...
@@ -247,7 +257,7 @@ static void fast_clear_page(void *page)
{
int i;
- kernel_fpu_begin();
+ kernel_fpu_begin_mask(KFPU_387);
__asm__ __volatile__ (
" pxor %%mm0, %%mm0\n" : :
@@ -282,7 +292,7 @@ static void fast_copy_page(void *to, void *from)
{
int i;
- kernel_fpu_begin();
+ kernel_fpu_begin_mask(KFPU_387);
__asm__ __volatile__ (
"1: prefetch (%0)\n"
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
index dfd82f51ba66..f6a9e2e36642 100644
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -829,6 +829,8 @@ int pud_free_pmd_page(pud_t *pud, unsigned long addr)
}
free_page((unsigned long)pmd_sv);
+
+ pgtable_pmd_page_dtor(virt_to_page(pmd));
free_page((unsigned long)pmd);
return 1;
diff --git a/arch/x86/xen/enlighten_hvm.c b/arch/x86/xen/enlighten_hvm.c
index 9e87ab010c82..e68ea5f4ad1c 100644
--- a/arch/x86/xen/enlighten_hvm.c
+++ b/arch/x86/xen/enlighten_hvm.c
@@ -164,10 +164,10 @@ static int xen_cpu_up_prepare_hvm(unsigned int cpu)
else
per_cpu(xen_vcpu_id, cpu) = cpu;
rc = xen_vcpu_setup(cpu);
- if (rc)
+ if (rc || !xen_have_vector_callback)
return rc;
- if (xen_have_vector_callback && xen_feature(XENFEAT_hvm_safe_pvclock))
+ if (xen_feature(XENFEAT_hvm_safe_pvclock))
xen_setup_timer(cpu);
rc = xen_smp_intr_init(cpu);
@@ -188,6 +188,8 @@ static int xen_cpu_dead_hvm(unsigned int cpu)
return 0;
}
+static bool no_vector_callback __initdata;
+
static void __init xen_hvm_guest_init(void)
{
if (xen_pv_domain())
@@ -207,7 +209,7 @@ static void __init xen_hvm_guest_init(void)
xen_panic_handler_init();
- if (xen_feature(XENFEAT_hvm_callback_vector))
+ if (!no_vector_callback && xen_feature(XENFEAT_hvm_callback_vector))
xen_have_vector_callback = 1;
xen_hvm_smp_init();
@@ -233,6 +235,13 @@ static __init int xen_parse_nopv(char *arg)
}
early_param("xen_nopv", xen_parse_nopv);
+static __init int xen_parse_no_vector_callback(char *arg)
+{
+ no_vector_callback = true;
+ return 0;
+}
+early_param("xen_no_vector_callback", xen_parse_no_vector_callback);
+
bool __init xen_hvm_need_lapic(void)
{
if (xen_pv_domain())
diff --git a/arch/x86/xen/smp_hvm.c b/arch/x86/xen/smp_hvm.c
index f5e7db4f82ab..6ff3c887e0b9 100644
--- a/arch/x86/xen/smp_hvm.c
+++ b/arch/x86/xen/smp_hvm.c
@@ -33,9 +33,11 @@ static void __init xen_hvm_smp_prepare_cpus(unsigned int max_cpus)
int cpu;
native_smp_prepare_cpus(max_cpus);
- WARN_ON(xen_smp_intr_init(0));
- xen_init_lock_cpu(0);
+ if (xen_have_vector_callback) {
+ WARN_ON(xen_smp_intr_init(0));
+ xen_init_lock_cpu(0);
+ }
for_each_possible_cpu(cpu) {
if (cpu == 0)
@@ -50,9 +52,11 @@ static void __init xen_hvm_smp_prepare_cpus(unsigned int max_cpus)
static void xen_hvm_cpu_die(unsigned int cpu)
{
if (common_cpu_die(cpu) == 0) {
- xen_smp_intr_free(cpu);
- xen_uninit_lock_cpu(cpu);
- xen_teardown_timer(cpu);
+ if (xen_have_vector_callback) {
+ xen_smp_intr_free(cpu);
+ xen_uninit_lock_cpu(cpu);
+ xen_teardown_timer(cpu);
+ }
}
}
#else
@@ -64,14 +68,19 @@ static void xen_hvm_cpu_die(unsigned int cpu)
void __init xen_hvm_smp_init(void)
{
- if (!xen_have_vector_callback)
+ smp_ops.smp_prepare_boot_cpu = xen_hvm_smp_prepare_boot_cpu;
+ smp_ops.smp_prepare_cpus = xen_hvm_smp_prepare_cpus;
+ smp_ops.smp_cpus_done = xen_smp_cpus_done;
+ smp_ops.cpu_die = xen_hvm_cpu_die;
+
+ if (!xen_have_vector_callback) {
+#ifdef CONFIG_PARAVIRT_SPINLOCKS
+ nopvspin = true;
+#endif
return;
+ }
- smp_ops.smp_prepare_cpus = xen_hvm_smp_prepare_cpus;
smp_ops.smp_send_reschedule = xen_smp_send_reschedule;
- smp_ops.cpu_die = xen_hvm_cpu_die;
smp_ops.send_call_func_ipi = xen_smp_send_call_function_ipi;
smp_ops.send_call_func_single_ipi = xen_smp_send_call_function_single_ipi;
- smp_ops.smp_prepare_boot_cpu = xen_hvm_smp_prepare_boot_cpu;
- smp_ops.smp_cpus_done = xen_smp_cpus_done;
}
diff --git a/arch/xtensa/include/asm/Kbuild b/arch/xtensa/include/asm/Kbuild
index 9718e9593564..854c5e07e867 100644
--- a/arch/xtensa/include/asm/Kbuild
+++ b/arch/xtensa/include/asm/Kbuild
@@ -2,7 +2,6 @@
generated-y += syscall_table.h
generic-y += extable.h
generic-y += kvm_para.h
-generic-y += local64.h
generic-y += mcs_spinlock.h
generic-y += param.h
generic-y += qrwlock.h
diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
index 9e81d1052091..9e4eb0fc1c16 100644
--- a/block/bfq-iosched.c
+++ b/block/bfq-iosched.c
@@ -6332,13 +6332,13 @@ static unsigned int bfq_update_depths(struct bfq_data *bfqd,
* limit 'something'.
*/
/* no more than 50% of tags for async I/O */
- bfqd->word_depths[0][0] = max((1U << bt->sb.shift) >> 1, 1U);
+ bfqd->word_depths[0][0] = max(bt->sb.depth >> 1, 1U);
/*
* no more than 75% of tags for sync writes (25% extra tags
* w.r.t. async I/O, to prevent async I/O from starving sync
* writes)
*/
- bfqd->word_depths[0][1] = max(((1U << bt->sb.shift) * 3) >> 2, 1U);
+ bfqd->word_depths[0][1] = max((bt->sb.depth * 3) >> 2, 1U);
/*
* In-word depths in case some bfq_queue is being weight-
@@ -6348,9 +6348,9 @@ static unsigned int bfq_update_depths(struct bfq_data *bfqd,
* shortage.
*/
/* no more than ~18% of tags for async I/O */
- bfqd->word_depths[1][0] = max(((1U << bt->sb.shift) * 3) >> 4, 1U);
+ bfqd->word_depths[1][0] = max((bt->sb.depth * 3) >> 4, 1U);
/* no more than ~37% of tags for sync writes (~20% extra tags) */
- bfqd->word_depths[1][1] = max(((1U << bt->sb.shift) * 6) >> 4, 1U);
+ bfqd->word_depths[1][1] = max((bt->sb.depth * 6) >> 4, 1U);
for (i = 0; i < 2; i++)
for (j = 0; j < 2; j++)
diff --git a/block/blk-core.c b/block/blk-core.c
index 96e5fcd7f071..7663a9b94b80 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -18,6 +18,7 @@
#include <linux/bio.h>
#include <linux/blkdev.h>
#include <linux/blk-mq.h>
+#include <linux/blk-pm.h>
#include <linux/highmem.h>
#include <linux/mm.h>
#include <linux/pagemap.h>
@@ -424,11 +425,11 @@ EXPORT_SYMBOL(blk_cleanup_queue);
/**
* blk_queue_enter() - try to increase q->q_usage_counter
* @q: request queue pointer
- * @flags: BLK_MQ_REQ_NOWAIT and/or BLK_MQ_REQ_PREEMPT
+ * @flags: BLK_MQ_REQ_NOWAIT and/or BLK_MQ_REQ_PM
*/
int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
{
- const bool pm = flags & BLK_MQ_REQ_PREEMPT;
+ const bool pm = flags & BLK_MQ_REQ_PM;
while (true) {
bool success = false;
@@ -440,7 +441,8 @@ int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
* responsible for ensuring that that counter is
* globally visible before the queue is unfrozen.
*/
- if (pm || !blk_queue_pm_only(q)) {
+ if ((pm && queue_rpm_status(q) != RPM_SUSPENDED) ||
+ !blk_queue_pm_only(q)) {
success = true;
} else {
percpu_ref_put(&q->q_usage_counter);
@@ -465,8 +467,7 @@ int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
wait_event(q->mq_freeze_wq,
(!q->mq_freeze_depth &&
- (pm || (blk_pm_request_resume(q),
- !blk_queue_pm_only(q)))) ||
+ blk_pm_resume_queue(pm, q)) ||
blk_queue_dying(q));
if (blk_queue_dying(q))
return -ENODEV;
@@ -630,7 +631,7 @@ struct request *blk_get_request(struct request_queue *q, unsigned int op,
struct request *req;
WARN_ON_ONCE(op & REQ_NOWAIT);
- WARN_ON_ONCE(flags & ~(BLK_MQ_REQ_NOWAIT | BLK_MQ_REQ_PREEMPT));
+ WARN_ON_ONCE(flags & ~(BLK_MQ_REQ_NOWAIT | BLK_MQ_REQ_PM));
req = blk_mq_alloc_request(q, op, flags);
if (!IS_ERR(req) && q->mq_ops->initialize_rq_fn)
diff --git a/block/blk-iocost.c b/block/blk-iocost.c
index ac6078a34939..98d656bdb42b 100644
--- a/block/blk-iocost.c
+++ b/block/blk-iocost.c
@@ -2551,8 +2551,8 @@ static void ioc_rqos_throttle(struct rq_qos *rqos, struct bio *bio)
bool use_debt, ioc_locked;
unsigned long flags;
- /* bypass IOs if disabled or for root cgroup */
- if (!ioc->enabled || !iocg->level)
+ /* bypass IOs if disabled, still initializing, or for root cgroup */
+ if (!ioc->enabled || !iocg || !iocg->level)
return;
/* calculate the absolute vtime cost */
@@ -2679,14 +2679,14 @@ static void ioc_rqos_merge(struct rq_qos *rqos, struct request *rq,
struct bio *bio)
{
struct ioc_gq *iocg = blkg_to_iocg(bio->bi_blkg);
- struct ioc *ioc = iocg->ioc;
+ struct ioc *ioc = rqos_to_ioc(rqos);
sector_t bio_end = bio_end_sector(bio);
struct ioc_now now;
u64 vtime, abs_cost, cost;
unsigned long flags;
- /* bypass if disabled or for root cgroup */
- if (!ioc->enabled || !iocg->level)
+ /* bypass if disabled, still initializing, or for root cgroup */
+ if (!ioc->enabled || !iocg || !iocg->level)
return;
abs_cost = calc_vtime_cost(bio, iocg, true);
@@ -2863,6 +2863,12 @@ static int blk_iocost_init(struct request_queue *q)
ioc_refresh_params(ioc, true);
spin_unlock_irq(&ioc->lock);
+ /*
+ * rqos must be added before activation to allow iocg_pd_init() to
+ * lookup the ioc from q. This means that the rqos methods may get
+ * called before policy activation completion, can't assume that the
+ * target bio has an iocg associated and need to test for NULL iocg.
+ */
rq_qos_add(q, rqos);
ret = blkcg_activate_policy(q, &blkcg_policy_iocost);
if (ret) {
diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c
index 3094542e12ae..4de03da9a624 100644
--- a/block/blk-mq-debugfs.c
+++ b/block/blk-mq-debugfs.c
@@ -129,6 +129,7 @@ static const char *const blk_queue_flag_name[] = {
QUEUE_FLAG_NAME(PCI_P2PDMA),
QUEUE_FLAG_NAME(ZONE_RESETALL),
QUEUE_FLAG_NAME(RQ_ALLOC_TIME),
+ QUEUE_FLAG_NAME(NOWAIT),
};
#undef QUEUE_FLAG_NAME
@@ -245,6 +246,7 @@ static const char *const hctx_flag_name[] = {
HCTX_FLAG_NAME(BLOCKING),
HCTX_FLAG_NAME(NO_SCHED),
HCTX_FLAG_NAME(STACKING),
+ HCTX_FLAG_NAME(TAG_HCTX_SHARED),
};
#undef HCTX_FLAG_NAME
@@ -297,7 +299,6 @@ static const char *const rqf_name[] = {
RQF_NAME(MIXED_MERGE),
RQF_NAME(MQ_INFLIGHT),
RQF_NAME(DONTPREP),
- RQF_NAME(PREEMPT),
RQF_NAME(FAILED),
RQF_NAME(QUIET),
RQF_NAME(ELVPRIV),
diff --git a/block/blk-mq.c b/block/blk-mq.c
index c338c9bc5a2c..f285a9123a8b 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -294,8 +294,8 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
rq->mq_hctx = data->hctx;
rq->rq_flags = 0;
rq->cmd_flags = data->cmd_flags;
- if (data->flags & BLK_MQ_REQ_PREEMPT)
- rq->rq_flags |= RQF_PREEMPT;
+ if (data->flags & BLK_MQ_REQ_PM)
+ rq->rq_flags |= RQF_PM;
if (blk_queue_io_stat(data->q))
rq->rq_flags |= RQF_IO_STAT;
INIT_LIST_HEAD(&rq->queuelist);
diff --git a/block/blk-pm.c b/block/blk-pm.c
index b85234d758f7..17bd020268d4 100644
--- a/block/blk-pm.c
+++ b/block/blk-pm.c
@@ -67,6 +67,10 @@ int blk_pre_runtime_suspend(struct request_queue *q)
WARN_ON_ONCE(q->rpm_status != RPM_ACTIVE);
+ spin_lock_irq(&q->queue_lock);
+ q->rpm_status = RPM_SUSPENDING;
+ spin_unlock_irq(&q->queue_lock);
+
/*
* Increase the pm_only counter before checking whether any
* non-PM blk_queue_enter() calls are in progress to avoid that any
@@ -89,15 +93,14 @@ int blk_pre_runtime_suspend(struct request_queue *q)
/* Switch q_usage_counter back to per-cpu mode. */
blk_mq_unfreeze_queue(q);
- spin_lock_irq(&q->queue_lock);
- if (ret < 0)
+ if (ret < 0) {
+ spin_lock_irq(&q->queue_lock);
+ q->rpm_status = RPM_ACTIVE;
pm_runtime_mark_last_busy(q->dev);
- else
- q->rpm_status = RPM_SUSPENDING;
- spin_unlock_irq(&q->queue_lock);
+ spin_unlock_irq(&q->queue_lock);
- if (ret)
blk_clear_pm_only(q);
+ }
return ret;
}
diff --git a/block/blk-pm.h b/block/blk-pm.h
index ea5507d23e75..a2283cc9f716 100644
--- a/block/blk-pm.h
+++ b/block/blk-pm.h
@@ -6,11 +6,14 @@
#include <linux/pm_runtime.h>
#ifdef CONFIG_PM
-static inline void blk_pm_request_resume(struct request_queue *q)
+static inline int blk_pm_resume_queue(const bool pm, struct request_queue *q)
{
- if (q->dev && (q->rpm_status == RPM_SUSPENDED ||
- q->rpm_status == RPM_SUSPENDING))
- pm_request_resume(q->dev);
+ if (!q->dev || !blk_queue_pm_only(q))
+ return 1; /* Nothing to do */
+ if (pm && q->rpm_status != RPM_SUSPENDED)
+ return 1; /* Request allowed */
+ pm_request_resume(q->dev);
+ return 0;
}
static inline void blk_pm_mark_last_busy(struct request *rq)
@@ -44,8 +47,9 @@ static inline void blk_pm_put_request(struct request *rq)
--rq->q->nr_pending;
}
#else
-static inline void blk_pm_request_resume(struct request_queue *q)
+static inline int blk_pm_resume_queue(const bool pm, struct request_queue *q)
{
+ return 1;
}
static inline void blk_pm_mark_last_busy(struct request *rq)
diff --git a/block/genhd.c b/block/genhd.c
index 73faec438e49..419548e92d82 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -246,15 +246,18 @@ struct block_device *disk_part_iter_next(struct disk_part_iter *piter)
part = rcu_dereference(ptbl->part[piter->idx]);
if (!part)
continue;
+ piter->part = bdgrab(part);
+ if (!piter->part)
+ continue;
if (!bdev_nr_sectors(part) &&
!(piter->flags & DISK_PITER_INCL_EMPTY) &&
!(piter->flags & DISK_PITER_INCL_EMPTY_PART0 &&
- piter->idx == 0))
+ piter->idx == 0)) {
+ bdput(piter->part);
+ piter->part = NULL;
continue;
+ }
- piter->part = bdgrab(part);
- if (!piter->part)
- continue;
piter->idx += inc;
break;
}
diff --git a/crypto/asymmetric_keys/asym_tpm.c b/crypto/asymmetric_keys/asym_tpm.c
index 511932aa94a6..0959613560b9 100644
--- a/crypto/asymmetric_keys/asym_tpm.c
+++ b/crypto/asymmetric_keys/asym_tpm.c
@@ -354,7 +354,7 @@ static uint32_t derive_pub_key(const void *pub_key, uint32_t len, uint8_t *buf)
memcpy(cur, e, sizeof(e));
cur += sizeof(e);
/* Zero parameters to satisfy set_pub_key ABI. */
- memset(cur, 0, SETKEY_PARAMS_SIZE);
+ memzero_explicit(cur, SETKEY_PARAMS_SIZE);
return cur - buf;
}
diff --git a/crypto/asymmetric_keys/public_key.c b/crypto/asymmetric_keys/public_key.c
index 8892908ad58c..788a4ba1e2e7 100644
--- a/crypto/asymmetric_keys/public_key.c
+++ b/crypto/asymmetric_keys/public_key.c
@@ -356,7 +356,8 @@ int public_key_verify_signature(const struct public_key *pkey,
if (ret)
goto error_free_key;
- if (strcmp(sig->pkey_algo, "sm2") == 0 && sig->data_size) {
+ if (sig->pkey_algo && strcmp(sig->pkey_algo, "sm2") == 0 &&
+ sig->data_size) {
ret = cert_sig_digest_update(sig, tfm);
if (ret)
goto error_free_key;
diff --git a/crypto/ecdh.c b/crypto/ecdh.c
index d56b8603dec9..96f80c8f8e30 100644
--- a/crypto/ecdh.c
+++ b/crypto/ecdh.c
@@ -39,7 +39,8 @@ static int ecdh_set_secret(struct crypto_kpp *tfm, const void *buf,
struct ecdh params;
unsigned int ndigits;
- if (crypto_ecdh_decode_key(buf, len, &params) < 0)
+ if (crypto_ecdh_decode_key(buf, len, &params) < 0 ||
+ params.key_size > sizeof(ctx->private_key))
return -EINVAL;
ndigits = ecdh_supported_curve(params.curve_id);
diff --git a/crypto/xor.c b/crypto/xor.c
index eacbf4f93990..8f899f898ec9 100644
--- a/crypto/xor.c
+++ b/crypto/xor.c
@@ -107,6 +107,8 @@ do_xor_speed(struct xor_block_template *tmpl, void *b1, void *b2)
preempt_enable();
// bytes/ns == GB/s, multiply by 1000 to get MB/s [not MiB/s]
+ if (!min)
+ min = 1;
speed = (1000 * REPS * BENCH_SIZE) / (unsigned int)ktime_to_ns(min);
tmpl->speed = speed;
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index edf1558c1105..ebcf534514be 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -395,9 +395,6 @@ config ACPI_CONTAINER
This helps support hotplug of nodes, CPUs, and memory.
- To compile this driver as a module, choose M here:
- the module will be called container.
-
config ACPI_HOTPLUG_MEMORY
bool "Memory Hotplug"
depends on MEMORY_HOTPLUG
@@ -411,9 +408,6 @@ config ACPI_HOTPLUG_MEMORY
removing memory devices at runtime, you need not enable
this driver.
- To compile this driver as a module, choose M here:
- the module will be called acpi_memhotplug.
-
config ACPI_HOTPLUG_IOAPIC
bool
depends on PCI
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index cb229e24c563..e6a5d997241c 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -97,7 +97,7 @@ void acpi_scan_table_handler(u32 event, void *table, void *context);
extern struct list_head acpi_bus_id_list;
struct acpi_device_bus_id {
- char bus_id[15];
+ const char *bus_id;
unsigned int instance_no;
struct list_head node;
};
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 80b668c80073..1db063b02f63 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -486,6 +486,7 @@ static void acpi_device_del(struct acpi_device *device)
acpi_device_bus_id->instance_no--;
else {
list_del(&acpi_device_bus_id->node);
+ kfree_const(acpi_device_bus_id->bus_id);
kfree(acpi_device_bus_id);
}
break;
@@ -585,6 +586,8 @@ static int acpi_get_device_data(acpi_handle handle, struct acpi_device **device,
if (!device)
return -EINVAL;
+ *device = NULL;
+
status = acpi_get_data_full(handle, acpi_scan_drop_device,
(void **)device, callback);
if (ACPI_FAILURE(status) || !*device) {
@@ -674,7 +677,14 @@ int acpi_device_add(struct acpi_device *device,
}
if (!found) {
acpi_device_bus_id = new_bus_id;
- strcpy(acpi_device_bus_id->bus_id, acpi_device_hid(device));
+ acpi_device_bus_id->bus_id =
+ kstrdup_const(acpi_device_hid(device), GFP_KERNEL);
+ if (!acpi_device_bus_id->bus_id) {
+ pr_err(PREFIX "Memory allocation error for bus id\n");
+ result = -ENOMEM;
+ goto err_free_new_bus_id;
+ }
+
acpi_device_bus_id->instance_no = 0;
list_add_tail(&acpi_device_bus_id->node, &acpi_bus_id_list);
}
@@ -709,6 +719,11 @@ int acpi_device_add(struct acpi_device *device,
if (device->parent)
list_del(&device->node);
list_del(&device->wakeup_list);
+
+ err_free_new_bus_id:
+ if (!found)
+ kfree(new_bus_id);
+
mutex_unlock(&acpi_device_lock);
err_detach:
diff --git a/drivers/acpi/x86/s2idle.c b/drivers/acpi/x86/s2idle.c
index 25fea34b544c..2b69536cdccb 100644
--- a/drivers/acpi/x86/s2idle.c
+++ b/drivers/acpi/x86/s2idle.c
@@ -105,18 +105,8 @@ static void lpi_device_get_constraints_amd(void)
for (i = 0; i < out_obj->package.count; i++) {
union acpi_object *package = &out_obj->package.elements[i];
- struct lpi_device_info_amd info = { };
- if (package->type == ACPI_TYPE_INTEGER) {
- switch (i) {
- case 0:
- info.revision = package->integer.value;
- break;
- case 1:
- info.count = package->integer.value;
- break;
- }
- } else if (package->type == ACPI_TYPE_PACKAGE) {
+ if (package->type == ACPI_TYPE_PACKAGE) {
lpi_constraints_table = kcalloc(package->package.count,
sizeof(*lpi_constraints_table),
GFP_KERNEL);
@@ -135,12 +125,10 @@ static void lpi_device_get_constraints_amd(void)
for (k = 0; k < info_obj->package.count; ++k) {
union acpi_object *obj = &info_obj->package.elements[k];
- union acpi_object *obj_new;
list = &lpi_constraints_table[lpi_constraints_table_size];
list->min_dstate = -1;
- obj_new = &obj[k];
switch (k) {
case 0:
dev_info.enabled = obj->integer.value;
diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
index 65a3886f68c9..5f0472c18bcb 100644
--- a/drivers/atm/idt77252.c
+++ b/drivers/atm/idt77252.c
@@ -3607,7 +3607,7 @@ static int idt77252_init_one(struct pci_dev *pcidev,
if ((err = dma_set_mask_and_coherent(&pcidev->dev, DMA_BIT_MASK(32)))) {
printk("idt77252: can't enable DMA for PCI device at %s\n", pci_name(pcidev));
- return err;
+ goto err_out_disable_pdev;
}
card = kzalloc(sizeof(struct idt77252_dev), GFP_KERNEL);
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 25e08e5f40bd..6eb4c7a904c5 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -208,6 +208,16 @@ int device_links_read_lock_held(void)
#endif
#endif /* !CONFIG_SRCU */
+static bool device_is_ancestor(struct device *dev, struct device *target)
+{
+ while (target->parent) {
+ target = target->parent;
+ if (dev == target)
+ return true;
+ }
+ return false;
+}
+
/**
* device_is_dependent - Check if one device depends on another one
* @dev: Device to check dependencies for.
@@ -221,7 +231,12 @@ int device_is_dependent(struct device *dev, void *target)
struct device_link *link;
int ret;
- if (dev == target)
+ /*
+ * The "ancestors" check is needed to catch the case when the target
+ * device has not been completely initialized yet and it is still
+ * missing from the list of children of its parent device.
+ */
+ if (dev == target || device_is_ancestor(dev, target))
return 1;
ret = device_for_each_child(dev, target, device_is_dependent);
@@ -456,7 +471,9 @@ static int devlink_add_symlinks(struct device *dev,
struct device *con = link->consumer;
char *buf;
- len = max(strlen(dev_name(sup)), strlen(dev_name(con)));
+ len = max(strlen(dev_bus_name(sup)) + strlen(dev_name(sup)),
+ strlen(dev_bus_name(con)) + strlen(dev_name(con)));
+ len += strlen(":");
len += strlen("supplier:") + 1;
buf = kzalloc(len, GFP_KERNEL);
if (!buf)
@@ -470,12 +487,12 @@ static int devlink_add_symlinks(struct device *dev,
if (ret)
goto err_con;
- snprintf(buf, len, "consumer:%s", dev_name(con));
+ snprintf(buf, len, "consumer:%s:%s", dev_bus_name(con), dev_name(con));
ret = sysfs_create_link(&sup->kobj, &link->link_dev.kobj, buf);
if (ret)
goto err_con_dev;
- snprintf(buf, len, "supplier:%s", dev_name(sup));
+ snprintf(buf, len, "supplier:%s:%s", dev_bus_name(sup), dev_name(sup));
ret = sysfs_create_link(&con->kobj, &link->link_dev.kobj, buf);
if (ret)
goto err_sup_dev;
@@ -483,7 +500,7 @@ static int devlink_add_symlinks(struct device *dev,
goto out;
err_sup_dev:
- snprintf(buf, len, "consumer:%s", dev_name(con));
+ snprintf(buf, len, "consumer:%s:%s", dev_bus_name(con), dev_name(con));
sysfs_remove_link(&sup->kobj, buf);
err_con_dev:
sysfs_remove_link(&link->link_dev.kobj, "consumer");
@@ -506,7 +523,9 @@ static void devlink_remove_symlinks(struct device *dev,
sysfs_remove_link(&link->link_dev.kobj, "consumer");
sysfs_remove_link(&link->link_dev.kobj, "supplier");
- len = max(strlen(dev_name(sup)), strlen(dev_name(con)));
+ len = max(strlen(dev_bus_name(sup)) + strlen(dev_name(sup)),
+ strlen(dev_bus_name(con)) + strlen(dev_name(con)));
+ len += strlen(":");
len += strlen("supplier:") + 1;
buf = kzalloc(len, GFP_KERNEL);
if (!buf) {
@@ -514,9 +533,9 @@ static void devlink_remove_symlinks(struct device *dev,
return;
}
- snprintf(buf, len, "supplier:%s", dev_name(sup));
+ snprintf(buf, len, "supplier:%s:%s", dev_bus_name(sup), dev_name(sup));
sysfs_remove_link(&con->kobj, buf);
- snprintf(buf, len, "consumer:%s", dev_name(con));
+ snprintf(buf, len, "consumer:%s:%s", dev_bus_name(con), dev_name(con));
sysfs_remove_link(&sup->kobj, buf);
kfree(buf);
}
@@ -737,8 +756,9 @@ struct device_link *device_link_add(struct device *consumer,
link->link_dev.class = &devlink_class;
device_set_pm_not_required(&link->link_dev);
- dev_set_name(&link->link_dev, "%s--%s",
- dev_name(supplier), dev_name(consumer));
+ dev_set_name(&link->link_dev, "%s:%s--%s:%s",
+ dev_bus_name(supplier), dev_name(supplier),
+ dev_bus_name(consumer), dev_name(consumer));
if (device_register(&link->link_dev)) {
put_device(consumer);
put_device(supplier);
@@ -1808,9 +1828,7 @@ const char *dev_driver_string(const struct device *dev)
* never change once they are set, so they don't need special care.
*/
drv = READ_ONCE(dev->driver);
- return drv ? drv->name :
- (dev->bus ? dev->bus->name :
- (dev->class ? dev->class->name : ""));
+ return drv ? drv->name : dev_bus_name(dev);
}
EXPORT_SYMBOL(dev_driver_string);
@@ -4414,6 +4432,12 @@ static inline bool fwnode_is_primary(struct fwnode_handle *fwnode)
*
* Set the device's firmware node pointer to @fwnode, but if a secondary
* firmware node of the device is present, preserve it.
+ *
+ * Valid fwnode cases are:
+ * - primary --> secondary --> -ENODEV
+ * - primary --> NULL
+ * - secondary --> -ENODEV
+ * - NULL
*/
void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode)
{
@@ -4432,8 +4456,9 @@ void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode)
} else {
if (fwnode_is_primary(fn)) {
dev->fwnode = fn->secondary;
+ /* Set fn->secondary = NULL, so fn remains the primary fwnode */
if (!(parent && fn == parent->fwnode))
- fn->secondary = ERR_PTR(-ENODEV);
+ fn->secondary = NULL;
} else {
dev->fwnode = NULL;
}
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index 2f32f38a11ed..9179825ff646 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -371,13 +371,6 @@ static void driver_bound(struct device *dev)
device_pm_check_callbacks(dev);
/*
- * Reorder successfully probed devices to the end of the device list.
- * This ensures that suspend/resume order matches probe order, which
- * is usually what drivers rely on.
- */
- device_pm_move_to_tail(dev);
-
- /*
* Make sure the device is no longer in one of the deferred lists and
* kick off retrying all pending devices
*/
@@ -619,6 +612,8 @@ dev_groups_failed:
else if (drv->remove)
drv->remove(dev);
probe_failed:
+ kfree(dev->dma_range_map);
+ dev->dma_range_map = NULL;
if (dev->bus)
blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
BUS_NOTIFY_DRIVER_NOT_BOUND, dev);
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index 95fd1549f87d..8456d8384ac8 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -366,6 +366,8 @@ int devm_platform_get_irqs_affinity(struct platform_device *dev,
return -ERANGE;
nvec = platform_irq_count(dev);
+ if (nvec < 0)
+ return nvec;
if (nvec < minvec)
return -ENOSPC;
diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c
index 8dfac7f3ed7a..ff2ee87987c7 100644
--- a/drivers/base/regmap/regmap-debugfs.c
+++ b/drivers/base/regmap/regmap-debugfs.c
@@ -582,8 +582,12 @@ void regmap_debugfs_init(struct regmap *map)
devname = dev_name(map->dev);
if (name) {
- map->debugfs_name = kasprintf(GFP_KERNEL, "%s-%s",
+ if (!map->debugfs_name) {
+ map->debugfs_name = kasprintf(GFP_KERNEL, "%s-%s",
devname, name);
+ if (!map->debugfs_name)
+ return;
+ }
name = map->debugfs_name;
} else {
name = devname;
@@ -591,9 +595,10 @@ void regmap_debugfs_init(struct regmap *map)
if (!strcmp(name, "dummy")) {
kfree(map->debugfs_name);
-
map->debugfs_name = kasprintf(GFP_KERNEL, "dummy%d",
dummy_index);
+ if (!map->debugfs_name)
+ return;
name = map->debugfs_name;
dummy_index++;
}
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index 262326973ee0..583b671b1d2d 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -445,6 +445,7 @@ config BLK_DEV_RBD
config BLK_DEV_RSXX
tristate "IBM Flash Adapter 900GB Full Height PCIe Device Driver"
depends on PCI
+ select CRC32
help
Device driver for IBM's high speed PCIe SSD
storage device: Flash Adapter 900GB Full Height.
diff --git a/drivers/block/rnbd/Kconfig b/drivers/block/rnbd/Kconfig
index 4b6d3d816d1f..2ff05a0d2646 100644
--- a/drivers/block/rnbd/Kconfig
+++ b/drivers/block/rnbd/Kconfig
@@ -7,6 +7,7 @@ config BLK_DEV_RNBD_CLIENT
tristate "RDMA Network Block Device driver client"
depends on INFINIBAND_RTRS_CLIENT
select BLK_DEV_RNBD
+ select SG_POOL
help
RNBD client is a network block device driver using rdma transport.
diff --git a/drivers/block/rnbd/README b/drivers/block/rnbd/README
index 1773c0aa0bd4..080f58a5400a 100644
--- a/drivers/block/rnbd/README
+++ b/drivers/block/rnbd/README
@@ -90,3 +90,4 @@ Kleber Souza <[email protected]>
Lutz Pogrell <[email protected]>
Milind Dumbare <[email protected]>
Roman Penyaev <[email protected]>
+Swapnil Ingle <[email protected]>
diff --git a/drivers/block/rnbd/rnbd-clt.c b/drivers/block/rnbd/rnbd-clt.c
index 96e3f9fe8241..45a470076652 100644
--- a/drivers/block/rnbd/rnbd-clt.c
+++ b/drivers/block/rnbd/rnbd-clt.c
@@ -375,12 +375,19 @@ static struct rnbd_iu *rnbd_get_iu(struct rnbd_clt_session *sess,
init_waitqueue_head(&iu->comp.wait);
iu->comp.errno = INT_MAX;
+ if (sg_alloc_table(&iu->sgt, 1, GFP_KERNEL)) {
+ rnbd_put_permit(sess, permit);
+ kfree(iu);
+ return NULL;
+ }
+
return iu;
}
static void rnbd_put_iu(struct rnbd_clt_session *sess, struct rnbd_iu *iu)
{
if (atomic_dec_and_test(&iu->refcount)) {
+ sg_free_table(&iu->sgt);
rnbd_put_permit(sess, iu->permit);
kfree(iu);
}
@@ -487,8 +494,6 @@ static int send_msg_close(struct rnbd_clt_dev *dev, u32 device_id, bool wait)
iu->buf = NULL;
iu->dev = dev;
- sg_alloc_table(&iu->sgt, 1, GFP_KERNEL);
-
msg.hdr.type = cpu_to_le16(RNBD_MSG_CLOSE);
msg.device_id = cpu_to_le32(device_id);
@@ -502,7 +507,6 @@ static int send_msg_close(struct rnbd_clt_dev *dev, u32 device_id, bool wait)
err = errno;
}
- sg_free_table(&iu->sgt);
rnbd_put_iu(sess, iu);
return err;
}
@@ -575,7 +579,6 @@ static int send_msg_open(struct rnbd_clt_dev *dev, bool wait)
iu->buf = rsp;
iu->dev = dev;
- sg_alloc_table(&iu->sgt, 1, GFP_KERNEL);
sg_init_one(iu->sgt.sgl, rsp, sizeof(*rsp));
msg.hdr.type = cpu_to_le16(RNBD_MSG_OPEN);
@@ -594,7 +597,6 @@ static int send_msg_open(struct rnbd_clt_dev *dev, bool wait)
err = errno;
}
- sg_free_table(&iu->sgt);
rnbd_put_iu(sess, iu);
return err;
}
@@ -622,8 +624,6 @@ static int send_msg_sess_info(struct rnbd_clt_session *sess, bool wait)
iu->buf = rsp;
iu->sess = sess;
-
- sg_alloc_table(&iu->sgt, 1, GFP_KERNEL);
sg_init_one(iu->sgt.sgl, rsp, sizeof(*rsp));
msg.hdr.type = cpu_to_le16(RNBD_MSG_SESS_INFO);
@@ -650,7 +650,6 @@ put_iu:
} else {
err = errno;
}
- sg_free_table(&iu->sgt);
rnbd_put_iu(sess, iu);
return err;
}
@@ -1698,7 +1697,8 @@ static void rnbd_destroy_sessions(void)
*/
list_for_each_entry_safe(sess, sn, &sess_list, list) {
- WARN_ON(!rnbd_clt_get_sess(sess));
+ if (!rnbd_clt_get_sess(sess))
+ continue;
close_rtrs(sess);
list_for_each_entry_safe(dev, tn, &sess->devs_list, list) {
/*
diff --git a/drivers/block/rnbd/rnbd-srv.c b/drivers/block/rnbd/rnbd-srv.c
index b8e44331e494..a6a68d44f517 100644
--- a/drivers/block/rnbd/rnbd-srv.c
+++ b/drivers/block/rnbd/rnbd-srv.c
@@ -338,10 +338,12 @@ static int rnbd_srv_link_ev(struct rtrs_srv *rtrs,
void rnbd_srv_sess_dev_force_close(struct rnbd_srv_sess_dev *sess_dev)
{
- mutex_lock(&sess_dev->sess->lock);
- rnbd_srv_destroy_dev_session_sysfs(sess_dev);
- mutex_unlock(&sess_dev->sess->lock);
+ struct rnbd_srv_session *sess = sess_dev->sess;
+
sess_dev->keep_id = true;
+ mutex_lock(&sess->lock);
+ rnbd_srv_destroy_dev_session_sysfs(sess_dev);
+ mutex_unlock(&sess->lock);
}
static int process_msg_close(struct rtrs_srv *rtrs,
diff --git a/drivers/clk/tegra/clk-tegra30.c b/drivers/clk/tegra/clk-tegra30.c
index 37244a7e68c2..9cf249c344d9 100644
--- a/drivers/clk/tegra/clk-tegra30.c
+++ b/drivers/clk/tegra/clk-tegra30.c
@@ -1256,6 +1256,8 @@ static struct tegra_clk_init_table init_table[] __initdata = {
{ TEGRA30_CLK_I2S3_SYNC, TEGRA30_CLK_CLK_MAX, 24000000, 0 },
{ TEGRA30_CLK_I2S4_SYNC, TEGRA30_CLK_CLK_MAX, 24000000, 0 },
{ TEGRA30_CLK_VIMCLK_SYNC, TEGRA30_CLK_CLK_MAX, 24000000, 0 },
+ { TEGRA30_CLK_HDA, TEGRA30_CLK_PLL_P, 102000000, 0 },
+ { TEGRA30_CLK_HDA2CODEC_2X, TEGRA30_CLK_PLL_P, 48000000, 0 },
/* must be the last entry */
{ TEGRA30_CLK_CLK_MAX, TEGRA30_CLK_CLK_MAX, 0, 0 },
};
diff --git a/drivers/counter/ti-eqep.c b/drivers/counter/ti-eqep.c
index a60aee1a1a29..65df9ef5b5bc 100644
--- a/drivers/counter/ti-eqep.c
+++ b/drivers/counter/ti-eqep.c
@@ -235,36 +235,6 @@ static ssize_t ti_eqep_position_ceiling_write(struct counter_device *counter,
return len;
}
-static ssize_t ti_eqep_position_floor_read(struct counter_device *counter,
- struct counter_count *count,
- void *ext_priv, char *buf)
-{
- struct ti_eqep_cnt *priv = counter->priv;
- u32 qposinit;
-
- regmap_read(priv->regmap32, QPOSINIT, &qposinit);
-
- return sprintf(buf, "%u\n", qposinit);
-}
-
-static ssize_t ti_eqep_position_floor_write(struct counter_device *counter,
- struct counter_count *count,
- void *ext_priv, const char *buf,
- size_t len)
-{
- struct ti_eqep_cnt *priv = counter->priv;
- int err;
- u32 res;
-
- err = kstrtouint(buf, 0, &res);
- if (err < 0)
- return err;
-
- regmap_write(priv->regmap32, QPOSINIT, res);
-
- return len;
-}
-
static ssize_t ti_eqep_position_enable_read(struct counter_device *counter,
struct counter_count *count,
void *ext_priv, char *buf)
@@ -302,11 +272,6 @@ static struct counter_count_ext ti_eqep_position_ext[] = {
.write = ti_eqep_position_ceiling_write,
},
{
- .name = "floor",
- .read = ti_eqep_position_floor_read,
- .write = ti_eqep_position_floor_write,
- },
- {
.name = "enable",
.read = ti_eqep_position_enable_read,
.write = ti_eqep_position_enable_write,
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 6e23376548ce..be05e038d956 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -76,11 +76,6 @@ static inline int ceiling_fp(int32_t x)
return ret;
}
-static inline int32_t percent_fp(int percent)
-{
- return div_fp(percent, 100);
-}
-
static inline u64 mul_ext_fp(u64 x, u64 y)
{
return (x * y) >> EXT_FRAC_BITS;
@@ -91,11 +86,6 @@ static inline u64 div_ext_fp(u64 x, u64 y)
return div64_u64(x << EXT_FRAC_BITS, y);
}
-static inline int32_t percent_ext_fp(int percent)
-{
- return div_ext_fp(percent, 100);
-}
-
/**
* struct sample - Store performance sample
* @core_avg_perf: Ratio of APERF/MPERF which is the actual average
@@ -2653,12 +2643,13 @@ static void intel_cpufreq_adjust_perf(unsigned int cpunum,
unsigned long capacity)
{
struct cpudata *cpu = all_cpu_data[cpunum];
+ u64 hwp_cap = READ_ONCE(cpu->hwp_cap_cached);
int old_pstate = cpu->pstate.current_pstate;
int cap_pstate, min_pstate, max_pstate, target_pstate;
update_turbo_state();
- cap_pstate = global.turbo_disabled ? cpu->pstate.max_pstate :
- cpu->pstate.turbo_pstate;
+ cap_pstate = global.turbo_disabled ? HWP_GUARANTEED_PERF(hwp_cap) :
+ HWP_HIGHEST_PERF(hwp_cap);
/* Optimization: Avoid unnecessary divisions. */
@@ -3086,7 +3077,6 @@ static int __init intel_pstate_init(void)
intel_pstate.attr = hwp_cpufreq_attrs;
intel_cpufreq.attr = hwp_cpufreq_attrs;
intel_cpufreq.flags |= CPUFREQ_NEED_UPDATE_LIMITS;
- intel_cpufreq.fast_switch = NULL;
intel_cpufreq.adjust_perf = intel_cpufreq_adjust_perf;
if (!default_driver)
default_driver = &intel_pstate;
diff --git a/drivers/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c
index 0acc9e241cd7..b9ccb6a3dad9 100644
--- a/drivers/cpufreq/powernow-k8.c
+++ b/drivers/cpufreq/powernow-k8.c
@@ -878,9 +878,9 @@ static int get_transition_latency(struct powernow_k8_data *data)
/* Take a frequency, and issue the fid/vid transition command */
static int transition_frequency_fidvid(struct powernow_k8_data *data,
- unsigned int index)
+ unsigned int index,
+ struct cpufreq_policy *policy)
{
- struct cpufreq_policy *policy;
u32 fid = 0;
u32 vid = 0;
int res;
@@ -912,9 +912,6 @@ static int transition_frequency_fidvid(struct powernow_k8_data *data,
freqs.old = find_khz_freq_from_fid(data->currfid);
freqs.new = find_khz_freq_from_fid(fid);
- policy = cpufreq_cpu_get(smp_processor_id());
- cpufreq_cpu_put(policy);
-
cpufreq_freq_transition_begin(policy, &freqs);
res = transition_fid_vid(data, fid, vid);
cpufreq_freq_transition_end(policy, &freqs, res);
@@ -969,7 +966,7 @@ static long powernowk8_target_fn(void *arg)
powernow_k8_acpi_pst_values(data, newstate);
- ret = transition_frequency_fidvid(data, newstate);
+ ret = transition_frequency_fidvid(data, newstate, pol);
if (ret) {
pr_err("transition frequency failed\n");
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index bbd51703e738..e535f28a8028 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -366,6 +366,7 @@ if CRYPTO_DEV_OMAP
config CRYPTO_DEV_OMAP_SHAM
tristate "Support for OMAP MD5/SHA1/SHA2 hw accelerator"
depends on ARCH_OMAP2PLUS
+ select CRYPTO_ENGINE
select CRYPTO_SHA1
select CRYPTO_MD5
select CRYPTO_SHA256
diff --git a/drivers/dma-buf/Kconfig b/drivers/dma-buf/Kconfig
index 4f8224a6ac95..4e16c71c24b7 100644
--- a/drivers/dma-buf/Kconfig
+++ b/drivers/dma-buf/Kconfig
@@ -50,6 +50,14 @@ config DMABUF_MOVE_NOTIFY
This is marked experimental because we don't yet have a consistent
execution context and memory management between drivers.
+config DMABUF_DEBUG
+ bool "DMA-BUF debug checks"
+ default y if DMA_API_DEBUG
+ help
+ This option enables additional checks for DMA-BUF importers and
+ exporters. Specifically it validates that importers do not peek at the
+ underlying struct page when they import a buffer.
+
config DMABUF_SELFTESTS
tristate "Selftests for the dma-buf interfaces"
default n
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index e63684d4cd90..f264b70c383e 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -76,10 +76,6 @@ static void dma_buf_release(struct dentry *dentry)
dmabuf->ops->release(dmabuf);
- mutex_lock(&db_list.lock);
- list_del(&dmabuf->list_node);
- mutex_unlock(&db_list.lock);
-
if (dmabuf->resv == (struct dma_resv *)&dmabuf[1])
dma_resv_fini(dmabuf->resv);
@@ -88,6 +84,22 @@ static void dma_buf_release(struct dentry *dentry)
kfree(dmabuf);
}
+static int dma_buf_file_release(struct inode *inode, struct file *file)
+{
+ struct dma_buf *dmabuf;
+
+ if (!is_dma_buf_file(file))
+ return -EINVAL;
+
+ dmabuf = file->private_data;
+
+ mutex_lock(&db_list.lock);
+ list_del(&dmabuf->list_node);
+ mutex_unlock(&db_list.lock);
+
+ return 0;
+}
+
static const struct dentry_operations dma_buf_dentry_ops = {
.d_dname = dmabuffs_dname,
.d_release = dma_buf_release,
@@ -413,6 +425,7 @@ static void dma_buf_show_fdinfo(struct seq_file *m, struct file *file)
}
static const struct file_operations dma_buf_fops = {
+ .release = dma_buf_file_release,
.mmap = dma_buf_mmap_internal,
.llseek = dma_buf_llseek,
.poll = dma_buf_poll,
@@ -480,7 +493,7 @@ err_alloc_file:
*
* 4. Once a driver is done with a shared buffer it needs to call
* dma_buf_detach() (after cleaning up any mappings) and then release the
- * reference acquired with dma_buf_get by calling dma_buf_put().
+ * reference acquired with dma_buf_get() by calling dma_buf_put().
*
* For the detailed semantics exporters are expected to implement see
* &dma_buf_ops.
@@ -496,9 +509,10 @@ err_alloc_file:
* by the exporter. see &struct dma_buf_export_info
* for further details.
*
- * Returns, on success, a newly created dma_buf object, which wraps the
- * supplied private data and operations for dma_buf_ops. On either missing
- * ops, or error in allocating struct dma_buf, will return negative error.
+ * Returns, on success, a newly created struct dma_buf object, which wraps the
+ * supplied private data and operations for struct dma_buf_ops. On either
+ * missing ops, or error in allocating struct dma_buf, will return negative
+ * error.
*
* For most cases the easiest way to create @exp_info is through the
* %DEFINE_DMA_BUF_EXPORT_INFO macro.
@@ -584,7 +598,7 @@ err_module:
EXPORT_SYMBOL_GPL(dma_buf_export);
/**
- * dma_buf_fd - returns a file descriptor for the given dma_buf
+ * dma_buf_fd - returns a file descriptor for the given struct dma_buf
* @dmabuf: [in] pointer to dma_buf for which fd is required.
* @flags: [in] flags to give to fd
*
@@ -608,10 +622,10 @@ int dma_buf_fd(struct dma_buf *dmabuf, int flags)
EXPORT_SYMBOL_GPL(dma_buf_fd);
/**
- * dma_buf_get - returns the dma_buf structure related to an fd
- * @fd: [in] fd associated with the dma_buf to be returned
+ * dma_buf_get - returns the struct dma_buf related to an fd
+ * @fd: [in] fd associated with the struct dma_buf to be returned
*
- * On success, returns the dma_buf structure associated with an fd; uses
+ * On success, returns the struct dma_buf associated with an fd; uses
* file's refcounting done by fget to increase refcount. returns ERR_PTR
* otherwise.
*/
@@ -652,9 +666,36 @@ void dma_buf_put(struct dma_buf *dmabuf)
}
EXPORT_SYMBOL_GPL(dma_buf_put);
+static void mangle_sg_table(struct sg_table *sg_table)
+{
+#ifdef CONFIG_DMABUF_DEBUG
+ int i;
+ struct scatterlist *sg;
+
+ /* To catch abuse of the underlying struct page by importers mix
+ * up the bits, but take care to preserve the low SG_ bits to
+ * not corrupt the sgt. The mixing is undone in __unmap_dma_buf
+ * before passing the sgt back to the exporter. */
+ for_each_sgtable_sg(sg_table, sg, i)
+ sg->page_link ^= ~0xffUL;
+#endif
+
+}
+static struct sg_table * __map_dma_buf(struct dma_buf_attachment *attach,
+ enum dma_data_direction direction)
+{
+ struct sg_table *sg_table;
+
+ sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
+
+ if (!IS_ERR_OR_NULL(sg_table))
+ mangle_sg_table(sg_table);
+
+ return sg_table;
+}
+
/**
- * dma_buf_dynamic_attach - Add the device to dma_buf's attachments list; optionally,
- * calls attach() of dma_buf_ops to allow device-specific attach functionality
+ * dma_buf_dynamic_attach - Add the device to dma_buf's attachments list
* @dmabuf: [in] buffer to attach device to.
* @dev: [in] device to be attached.
* @importer_ops: [in] importer operations for the attachment
@@ -663,6 +704,9 @@ EXPORT_SYMBOL_GPL(dma_buf_put);
* Returns struct dma_buf_attachment pointer for this attachment. Attachments
* must be cleaned up by calling dma_buf_detach().
*
+ * Optionally this calls &dma_buf_ops.attach to allow device-specific attach
+ * functionality.
+ *
* Returns:
*
* A pointer to newly created &dma_buf_attachment on success, or a negative
@@ -721,7 +765,7 @@ dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,
goto err_unlock;
}
- sgt = dmabuf->ops->map_dma_buf(attach, DMA_BIDIRECTIONAL);
+ sgt = __map_dma_buf(attach, DMA_BIDIRECTIONAL);
if (!sgt)
sgt = ERR_PTR(-ENOMEM);
if (IS_ERR(sgt)) {
@@ -768,13 +812,24 @@ struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
}
EXPORT_SYMBOL_GPL(dma_buf_attach);
+static void __unmap_dma_buf(struct dma_buf_attachment *attach,
+ struct sg_table *sg_table,
+ enum dma_data_direction direction)
+{
+ /* uses XOR, hence this unmangles */
+ mangle_sg_table(sg_table);
+
+ attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction);
+}
+
/**
- * dma_buf_detach - Remove the given attachment from dmabuf's attachments list;
- * optionally calls detach() of dma_buf_ops for device-specific detach
+ * dma_buf_detach - Remove the given attachment from dmabuf's attachments list
* @dmabuf: [in] buffer to detach from.
* @attach: [in] attachment to be detached; is free'd after this call.
*
* Clean up a device attachment obtained by calling dma_buf_attach().
+ *
+ * Optionally this calls &dma_buf_ops.detach for device-specific detach.
*/
void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
{
@@ -785,7 +840,7 @@ void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
if (dma_buf_is_dynamic(attach->dmabuf))
dma_resv_lock(attach->dmabuf->resv, NULL);
- dmabuf->ops->unmap_dma_buf(attach, attach->sgt, attach->dir);
+ __unmap_dma_buf(attach, attach->sgt, attach->dir);
if (dma_buf_is_dynamic(attach->dmabuf)) {
dma_buf_unpin(attach);
@@ -805,9 +860,15 @@ EXPORT_SYMBOL_GPL(dma_buf_detach);
/**
* dma_buf_pin - Lock down the DMA-buf
- *
* @attach: [in] attachment which should be pinned
*
+ * Only dynamic importers (who set up @attach with dma_buf_dynamic_attach()) may
+ * call this, and only for limited use cases like scanout and not for temporary
+ * pin operations. It is not permitted to allow userspace to pin arbitrary
+ * amounts of buffers through this interface.
+ *
+ * Buffers must be unpinned by calling dma_buf_unpin().
+ *
* Returns:
* 0 on success, negative error code on failure.
*/
@@ -816,6 +877,8 @@ int dma_buf_pin(struct dma_buf_attachment *attach)
struct dma_buf *dmabuf = attach->dmabuf;
int ret = 0;
+ WARN_ON(!dma_buf_attachment_is_dynamic(attach));
+
dma_resv_assert_held(dmabuf->resv);
if (dmabuf->ops->pin)
@@ -826,14 +889,19 @@ int dma_buf_pin(struct dma_buf_attachment *attach)
EXPORT_SYMBOL_GPL(dma_buf_pin);
/**
- * dma_buf_unpin - Remove lock from DMA-buf
- *
+ * dma_buf_unpin - Unpin a DMA-buf
* @attach: [in] attachment which should be unpinned
+ *
+ * This unpins a buffer pinned by dma_buf_pin() and allows the exporter to move
+ * any mapping of @attach again and inform the importer through
+ * &dma_buf_attach_ops.move_notify.
*/
void dma_buf_unpin(struct dma_buf_attachment *attach)
{
struct dma_buf *dmabuf = attach->dmabuf;
+ WARN_ON(!dma_buf_attachment_is_dynamic(attach));
+
dma_resv_assert_held(dmabuf->resv);
if (dmabuf->ops->unpin)
@@ -894,7 +962,7 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
}
}
- sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
+ sg_table = __map_dma_buf(attach, direction);
if (!sg_table)
sg_table = ERR_PTR(-ENOMEM);
@@ -957,7 +1025,7 @@ void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
if (dma_buf_is_dynamic(attach->dmabuf))
dma_resv_assert_held(attach->dmabuf->resv);
- attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction);
+ __unmap_dma_buf(attach, sg_table, direction);
if (dma_buf_is_dynamic(attach->dmabuf) &&
!IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY))
@@ -1001,15 +1069,15 @@ EXPORT_SYMBOL_GPL(dma_buf_move_notify);
* vmalloc space might be limited and result in vmap calls failing.
*
* Interfaces::
+ *
* void \*dma_buf_vmap(struct dma_buf \*dmabuf)
* void dma_buf_vunmap(struct dma_buf \*dmabuf, void \*vaddr)
*
* The vmap call can fail if there is no vmap support in the exporter, or if
- * it runs out of vmalloc space. Fallback to kmap should be implemented. Note
- * that the dma-buf layer keeps a reference count for all vmap access and
- * calls down into the exporter's vmap function only when no vmapping exists,
- * and only unmaps it once. Protection against concurrent vmap/vunmap calls is
- * provided by taking the dma_buf->lock mutex.
+ * it runs out of vmalloc space. Note that the dma-buf layer keeps a reference
+ * count for all vmap access and calls down into the exporter's vmap function
+ * only when no vmapping exists, and only unmaps it once. Protection against
+ * concurrent vmap/vunmap calls is provided by taking the &dma_buf.lock mutex.
*
* - For full compatibility on the importer side with existing userspace
* interfaces, which might already support mmap'ing buffers. This is needed in
@@ -1061,11 +1129,12 @@ EXPORT_SYMBOL_GPL(dma_buf_move_notify);
* shootdowns would increase the complexity quite a bit.
*
* Interface::
+ *
* int dma_buf_mmap(struct dma_buf \*, struct vm_area_struct \*,
* unsigned long);
*
* If the importing subsystem simply provides a special-purpose mmap call to
- * set up a mapping in userspace, calling do_mmap with dma_buf->file will
+ * set up a mapping in userspace, calling do_mmap with &dma_buf.file will
* equally achieve that for a dma-buf object.
*/
@@ -1098,6 +1167,11 @@ static int __dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
* dma_buf_end_cpu_access(). Only when cpu access is braketed by both calls is
* it guaranteed to be coherent with other DMA access.
*
+ * This function will also wait for any DMA transactions tracked through
+ * implicit synchronization in &dma_buf.resv. For DMA transactions with explicit
+ * synchronization this function will only ensure cache coherency, callers must
+ * ensure synchronization with such DMA transactions on their own.
+ *
* Can return negative error values, returns 0 on success.
*/
int dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
@@ -1108,6 +1182,8 @@ int dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
if (WARN_ON(!dmabuf))
return -EINVAL;
+ might_lock(&dmabuf->resv->lock.base);
+
if (dmabuf->ops->begin_cpu_access)
ret = dmabuf->ops->begin_cpu_access(dmabuf, direction);
@@ -1141,6 +1217,8 @@ int dma_buf_end_cpu_access(struct dma_buf *dmabuf,
WARN_ON(!dmabuf);
+ might_lock(&dmabuf->resv->lock.base);
+
if (dmabuf->ops->end_cpu_access)
ret = dmabuf->ops->end_cpu_access(dmabuf, direction);
@@ -1199,7 +1277,10 @@ EXPORT_SYMBOL_GPL(dma_buf_mmap);
* This call may fail due to lack of virtual mapping address space.
* These calls are optional in drivers. The intended use for them
* is for mapping objects linear in kernel space for high use objects.
- * Please attempt to use kmap/kunmap before thinking about these interfaces.
+ *
+ * To ensure coherency users must call dma_buf_begin_cpu_access() and
+ * dma_buf_end_cpu_access() around any cpu access performed through this
+ * mapping.
*
* Returns 0 on success, or a negative errno code otherwise.
*/
diff --git a/drivers/dma-buf/heaps/cma_heap.c b/drivers/dma-buf/heaps/cma_heap.c
index 3c4e34301172..364fc2f3e499 100644
--- a/drivers/dma-buf/heaps/cma_heap.c
+++ b/drivers/dma-buf/heaps/cma_heap.c
@@ -251,6 +251,9 @@ static void cma_heap_dma_buf_release(struct dma_buf *dmabuf)
buffer->vaddr = NULL;
}
+ /* free page list */
+ kfree(buffer->pages);
+ /* release memory */
cma_release(cma_heap->cma, buffer->cma_pages, buffer->pagecount);
kfree(buffer);
}
diff --git a/drivers/dma/dw-edma/dw-edma-core.c b/drivers/dma/dw-edma/dw-edma-core.c
index b971505b8715..08d71dafa001 100644
--- a/drivers/dma/dw-edma/dw-edma-core.c
+++ b/drivers/dma/dw-edma/dw-edma-core.c
@@ -86,12 +86,12 @@ static struct dw_edma_chunk *dw_edma_alloc_chunk(struct dw_edma_desc *desc)
if (desc->chunk) {
/* Create and add new element into the linked list */
- desc->chunks_alloc++;
- list_add_tail(&chunk->list, &desc->chunk->list);
if (!dw_edma_alloc_burst(chunk)) {
kfree(chunk);
return NULL;
}
+ desc->chunks_alloc++;
+ list_add_tail(&chunk->list, &desc->chunk->list);
} else {
/* List head */
chunk->burst = NULL;
diff --git a/drivers/dma/idxd/sysfs.c b/drivers/dma/idxd/sysfs.c
index 266423a2cabc..4dbb03c545e4 100644
--- a/drivers/dma/idxd/sysfs.c
+++ b/drivers/dma/idxd/sysfs.c
@@ -434,7 +434,7 @@ int idxd_register_driver(void)
return 0;
drv_fail:
- for (; i > 0; i--)
+ while (--i >= 0)
driver_unregister(&idxd_drvs[i]->drv);
return rc;
}
@@ -1840,7 +1840,7 @@ int idxd_register_bus_type(void)
return 0;
bus_err:
- for (; i > 0; i--)
+ while (--i >= 0)
bus_unregister(idxd_bus_types[i]);
return rc;
}
diff --git a/drivers/dma/mediatek/mtk-hsdma.c b/drivers/dma/mediatek/mtk-hsdma.c
index f133ae8dece1..6ad8afbb95f2 100644
--- a/drivers/dma/mediatek/mtk-hsdma.c
+++ b/drivers/dma/mediatek/mtk-hsdma.c
@@ -1007,6 +1007,7 @@ static int mtk_hsdma_probe(struct platform_device *pdev)
return 0;
err_free:
+ mtk_hsdma_hw_deinit(hsdma);
of_dma_controller_free(pdev->dev.of_node);
err_unregister:
dma_async_device_unregister(dd);
diff --git a/drivers/dma/milbeaut-xdmac.c b/drivers/dma/milbeaut-xdmac.c
index 584c931e807a..d29d01e730aa 100644
--- a/drivers/dma/milbeaut-xdmac.c
+++ b/drivers/dma/milbeaut-xdmac.c
@@ -350,7 +350,7 @@ static int milbeaut_xdmac_probe(struct platform_device *pdev)
ret = dma_async_device_register(ddev);
if (ret)
- return ret;
+ goto disable_xdmac;
ret = of_dma_controller_register(dev->of_node,
of_dma_simple_xlate, mdev);
@@ -363,6 +363,8 @@ static int milbeaut_xdmac_probe(struct platform_device *pdev)
unregister_dmac:
dma_async_device_unregister(ddev);
+disable_xdmac:
+ disable_xdmac(mdev);
return ret;
}
diff --git a/drivers/dma/qcom/bam_dma.c b/drivers/dma/qcom/bam_dma.c
index d5773d474d8f..88579857ca1d 100644
--- a/drivers/dma/qcom/bam_dma.c
+++ b/drivers/dma/qcom/bam_dma.c
@@ -630,7 +630,7 @@ static struct dma_async_tx_descriptor *bam_prep_slave_sg(struct dma_chan *chan,
GFP_NOWAIT);
if (!async_desc)
- goto err_out;
+ return NULL;
if (flags & DMA_PREP_FENCE)
async_desc->flags |= DESC_FLAG_NWD;
@@ -670,10 +670,6 @@ static struct dma_async_tx_descriptor *bam_prep_slave_sg(struct dma_chan *chan,
}
return vchan_tx_prep(&bchan->vc, &async_desc->vd, flags);
-
-err_out:
- kfree(async_desc);
- return NULL;
}
/**
diff --git a/drivers/dma/qcom/gpi.c b/drivers/dma/qcom/gpi.c
index d2334f535de2..1a0bf6b0567a 100644
--- a/drivers/dma/qcom/gpi.c
+++ b/drivers/dma/qcom/gpi.c
@@ -1416,7 +1416,7 @@ static int gpi_alloc_ring(struct gpi_ring *ring, u32 elements,
len = 1 << bit;
ring->alloc_size = (len + (len - 1));
dev_dbg(gpii->gpi_dev->dev,
- "#el:%u el_size:%u len:%u actual_len:%llu alloc_size:%lu\n",
+ "#el:%u el_size:%u len:%u actual_len:%llu alloc_size:%zu\n",
elements, el_size, (elements * el_size), len,
ring->alloc_size);
@@ -1424,7 +1424,7 @@ static int gpi_alloc_ring(struct gpi_ring *ring, u32 elements,
ring->alloc_size,
&ring->dma_handle, GFP_KERNEL);
if (!ring->pre_aligned) {
- dev_err(gpii->gpi_dev->dev, "could not alloc size:%lu mem for ring\n",
+ dev_err(gpii->gpi_dev->dev, "could not alloc size:%zu mem for ring\n",
ring->alloc_size);
return -ENOMEM;
}
@@ -1444,8 +1444,8 @@ static int gpi_alloc_ring(struct gpi_ring *ring, u32 elements,
smp_wmb();
dev_dbg(gpii->gpi_dev->dev,
- "phy_pre:0x%0llx phy_alig:0x%0llx len:%u el_size:%u elements:%u\n",
- ring->dma_handle, ring->phys_addr, ring->len,
+ "phy_pre:%pad phy_alig:%pa len:%u el_size:%u elements:%u\n",
+ &ring->dma_handle, &ring->phys_addr, ring->len,
ring->el_size, ring->elements);
return 0;
@@ -1948,7 +1948,7 @@ static int gpi_ch_init(struct gchan *gchan)
return ret;
error_start_chan:
- for (i = i - 1; i >= 0; i++) {
+ for (i = i - 1; i >= 0; i--) {
gpi_stop_chan(&gpii->gchan[i]);
gpi_send_cmd(gpii, gchan, GPI_CH_CMD_RESET);
}
diff --git a/drivers/dma/stm32-mdma.c b/drivers/dma/stm32-mdma.c
index e4637ec786d3..36ba8b43e78d 100644
--- a/drivers/dma/stm32-mdma.c
+++ b/drivers/dma/stm32-mdma.c
@@ -199,7 +199,7 @@
#define STM32_MDMA_MAX_CHANNELS 63
#define STM32_MDMA_MAX_REQUESTS 256
#define STM32_MDMA_MAX_BURST 128
-#define STM32_MDMA_VERY_HIGH_PRIORITY 0x11
+#define STM32_MDMA_VERY_HIGH_PRIORITY 0x3
enum stm32_mdma_trigger_mode {
STM32_MDMA_BUFFER,
diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c
index 87157cbae1b8..298460438bb4 100644
--- a/drivers/dma/ti/k3-udma.c
+++ b/drivers/dma/ti/k3-udma.c
@@ -4698,9 +4698,9 @@ static int pktdma_setup_resources(struct udma_dev *ud)
ud->tchan_tpl.levels = 1;
}
- ud->tchan_tpl.levels = ud->tchan_tpl.levels;
- ud->tchan_tpl.start_idx[0] = ud->tchan_tpl.start_idx[0];
- ud->tchan_tpl.start_idx[1] = ud->tchan_tpl.start_idx[1];
+ ud->rchan_tpl.levels = ud->tchan_tpl.levels;
+ ud->rchan_tpl.start_idx[0] = ud->tchan_tpl.start_idx[0];
+ ud->rchan_tpl.start_idx[1] = ud->tchan_tpl.start_idx[1];
ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt),
sizeof(unsigned long), GFP_KERNEL);
diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
index 22faea653ea8..79777550a6ff 100644
--- a/drivers/dma/xilinx/xilinx_dma.c
+++ b/drivers/dma/xilinx/xilinx_dma.c
@@ -2781,7 +2781,7 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
has_dre = false;
if (!has_dre)
- xdev->common.copy_align = fls(width - 1);
+ xdev->common.copy_align = (enum dmaengine_alignment)fls(width - 1);
if (of_device_is_compatible(node, "xlnx,axi-vdma-mm2s-channel") ||
of_device_is_compatible(node, "xlnx,axi-dma-mm2s-channel") ||
@@ -2900,7 +2900,8 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
static int xilinx_dma_child_probe(struct xilinx_dma_device *xdev,
struct device_node *node)
{
- int ret, i, nr_channels = 1;
+ int ret, i;
+ u32 nr_channels = 1;
ret = of_property_read_u32(node, "dma-channels", &nr_channels);
if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA && ret < 0)
@@ -3112,7 +3113,11 @@ static int xilinx_dma_probe(struct platform_device *pdev)
}
/* Register the DMA engine with the core */
- dma_async_device_register(&xdev->common);
+ err = dma_async_device_register(&xdev->common);
+ if (err) {
+ dev_err(xdev->dev, "failed to register the dma device\n");
+ goto error;
+ }
err = of_dma_controller_register(node, of_dma_xilinx_xlate,
xdev);
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index c70f46e80a3b..dea65d85594f 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -521,7 +521,8 @@ config GPIO_SAMA5D2_PIOBU
config GPIO_SIFIVE
bool "SiFive GPIO support"
- depends on OF_GPIO && IRQ_DOMAIN_HIERARCHY
+ depends on OF_GPIO
+ select IRQ_DOMAIN_HIERARCHY
select GPIO_GENERIC
select GPIOLIB_IRQCHIP
select REGMAP_MMIO
@@ -597,6 +598,8 @@ config GPIO_TEGRA
default ARCH_TEGRA
depends on ARCH_TEGRA || COMPILE_TEST
depends on OF_GPIO
+ select GPIOLIB_IRQCHIP
+ select IRQ_DOMAIN_HIERARCHY
help
Say yes here to support GPIO pins on NVIDIA Tegra SoCs.
diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c
index 672681a976f5..a912a8fed197 100644
--- a/drivers/gpio/gpio-mvebu.c
+++ b/drivers/gpio/gpio-mvebu.c
@@ -676,20 +676,17 @@ static void mvebu_pwm_get_state(struct pwm_chip *chip,
else
state->duty_cycle = 1;
+ val = (unsigned long long) u; /* on duration */
regmap_read(mvpwm->regs, mvebu_pwmreg_blink_off_duration(mvpwm), &u);
- val = (unsigned long long) u * NSEC_PER_SEC;
+ val += (unsigned long long) u; /* period = on + off duration */
+ val *= NSEC_PER_SEC;
do_div(val, mvpwm->clk_rate);
- if (val < state->duty_cycle) {
+ if (val > UINT_MAX)
+ state->period = UINT_MAX;
+ else if (val)
+ state->period = val;
+ else
state->period = 1;
- } else {
- val -= state->duty_cycle;
- if (val > UINT_MAX)
- state->period = UINT_MAX;
- else if (val)
- state->period = val;
- else
- state->period = 1;
- }
regmap_read(mvchip->regs, GPIO_BLINK_EN_OFF + mvchip->offset, &u);
if (u)
diff --git a/drivers/gpio/gpiolib-cdev.c b/drivers/gpio/gpiolib-cdev.c
index 12b679ca552c..1a7b51163528 100644
--- a/drivers/gpio/gpiolib-cdev.c
+++ b/drivers/gpio/gpiolib-cdev.c
@@ -1979,6 +1979,21 @@ struct gpio_chardev_data {
#endif
};
+static int chipinfo_get(struct gpio_chardev_data *cdev, void __user *ip)
+{
+ struct gpio_device *gdev = cdev->gdev;
+ struct gpiochip_info chipinfo;
+
+ memset(&chipinfo, 0, sizeof(chipinfo));
+
+ strscpy(chipinfo.name, dev_name(&gdev->dev), sizeof(chipinfo.name));
+ strscpy(chipinfo.label, gdev->label, sizeof(chipinfo.label));
+ chipinfo.lines = gdev->ngpio;
+ if (copy_to_user(ip, &chipinfo, sizeof(chipinfo)))
+ return -EFAULT;
+ return 0;
+}
+
#ifdef CONFIG_GPIO_CDEV_V1
/*
* returns 0 if the versions match, else the previously selected ABI version
@@ -1993,6 +2008,41 @@ static int lineinfo_ensure_abi_version(struct gpio_chardev_data *cdata,
return abiv;
}
+
+static int lineinfo_get_v1(struct gpio_chardev_data *cdev, void __user *ip,
+ bool watch)
+{
+ struct gpio_desc *desc;
+ struct gpioline_info lineinfo;
+ struct gpio_v2_line_info lineinfo_v2;
+
+ if (copy_from_user(&lineinfo, ip, sizeof(lineinfo)))
+ return -EFAULT;
+
+ /* this doubles as a range check on line_offset */
+ desc = gpiochip_get_desc(cdev->gdev->chip, lineinfo.line_offset);
+ if (IS_ERR(desc))
+ return PTR_ERR(desc);
+
+ if (watch) {
+ if (lineinfo_ensure_abi_version(cdev, 1))
+ return -EPERM;
+
+ if (test_and_set_bit(lineinfo.line_offset, cdev->watched_lines))
+ return -EBUSY;
+ }
+
+ gpio_desc_to_lineinfo(desc, &lineinfo_v2);
+ gpio_v2_line_info_to_v1(&lineinfo_v2, &lineinfo);
+
+ if (copy_to_user(ip, &lineinfo, sizeof(lineinfo))) {
+ if (watch)
+ clear_bit(lineinfo.line_offset, cdev->watched_lines);
+ return -EFAULT;
+ }
+
+ return 0;
+}
#endif
static int lineinfo_get(struct gpio_chardev_data *cdev, void __user *ip,
@@ -2030,6 +2080,22 @@ static int lineinfo_get(struct gpio_chardev_data *cdev, void __user *ip,
return 0;
}
+static int lineinfo_unwatch(struct gpio_chardev_data *cdev, void __user *ip)
+{
+ __u32 offset;
+
+ if (copy_from_user(&offset, ip, sizeof(offset)))
+ return -EFAULT;
+
+ if (offset >= cdev->gdev->ngpio)
+ return -EINVAL;
+
+ if (!test_and_clear_bit(offset, cdev->watched_lines))
+ return -EBUSY;
+
+ return 0;
+}
+
/*
* gpio_ioctl() - ioctl handler for the GPIO chardev
*/
@@ -2037,80 +2103,24 @@ static long gpio_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct gpio_chardev_data *cdev = file->private_data;
struct gpio_device *gdev = cdev->gdev;
- struct gpio_chip *gc = gdev->chip;
void __user *ip = (void __user *)arg;
- __u32 offset;
/* We fail any subsequent ioctl():s when the chip is gone */
- if (!gc)
+ if (!gdev->chip)
return -ENODEV;
/* Fill in the struct and pass to userspace */
if (cmd == GPIO_GET_CHIPINFO_IOCTL) {
- struct gpiochip_info chipinfo;
-
- memset(&chipinfo, 0, sizeof(chipinfo));
-
- strscpy(chipinfo.name, dev_name(&gdev->dev),
- sizeof(chipinfo.name));
- strscpy(chipinfo.label, gdev->label,
- sizeof(chipinfo.label));
- chipinfo.lines = gdev->ngpio;
- if (copy_to_user(ip, &chipinfo, sizeof(chipinfo)))
- return -EFAULT;
- return 0;
+ return chipinfo_get(cdev, ip);
#ifdef CONFIG_GPIO_CDEV_V1
- } else if (cmd == GPIO_GET_LINEINFO_IOCTL) {
- struct gpio_desc *desc;
- struct gpioline_info lineinfo;
- struct gpio_v2_line_info lineinfo_v2;
-
- if (copy_from_user(&lineinfo, ip, sizeof(lineinfo)))
- return -EFAULT;
-
- /* this doubles as a range check on line_offset */
- desc = gpiochip_get_desc(gc, lineinfo.line_offset);
- if (IS_ERR(desc))
- return PTR_ERR(desc);
-
- gpio_desc_to_lineinfo(desc, &lineinfo_v2);
- gpio_v2_line_info_to_v1(&lineinfo_v2, &lineinfo);
-
- if (copy_to_user(ip, &lineinfo, sizeof(lineinfo)))
- return -EFAULT;
- return 0;
} else if (cmd == GPIO_GET_LINEHANDLE_IOCTL) {
return linehandle_create(gdev, ip);
} else if (cmd == GPIO_GET_LINEEVENT_IOCTL) {
return lineevent_create(gdev, ip);
- } else if (cmd == GPIO_GET_LINEINFO_WATCH_IOCTL) {
- struct gpio_desc *desc;
- struct gpioline_info lineinfo;
- struct gpio_v2_line_info lineinfo_v2;
-
- if (copy_from_user(&lineinfo, ip, sizeof(lineinfo)))
- return -EFAULT;
-
- /* this doubles as a range check on line_offset */
- desc = gpiochip_get_desc(gc, lineinfo.line_offset);
- if (IS_ERR(desc))
- return PTR_ERR(desc);
-
- if (lineinfo_ensure_abi_version(cdev, 1))
- return -EPERM;
-
- if (test_and_set_bit(lineinfo.line_offset, cdev->watched_lines))
- return -EBUSY;
-
- gpio_desc_to_lineinfo(desc, &lineinfo_v2);
- gpio_v2_line_info_to_v1(&lineinfo_v2, &lineinfo);
-
- if (copy_to_user(ip, &lineinfo, sizeof(lineinfo))) {
- clear_bit(lineinfo.line_offset, cdev->watched_lines);
- return -EFAULT;
- }
-
- return 0;
+ } else if (cmd == GPIO_GET_LINEINFO_IOCTL ||
+ cmd == GPIO_GET_LINEINFO_WATCH_IOCTL) {
+ return lineinfo_get_v1(cdev, ip,
+ cmd == GPIO_GET_LINEINFO_WATCH_IOCTL);
#endif /* CONFIG_GPIO_CDEV_V1 */
} else if (cmd == GPIO_V2_GET_LINEINFO_IOCTL ||
cmd == GPIO_V2_GET_LINEINFO_WATCH_IOCTL) {
@@ -2119,16 +2129,7 @@ static long gpio_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
} else if (cmd == GPIO_V2_GET_LINE_IOCTL) {
return linereq_create(gdev, ip);
} else if (cmd == GPIO_GET_LINEINFO_UNWATCH_IOCTL) {
- if (copy_from_user(&offset, ip, sizeof(offset)))
- return -EFAULT;
-
- if (offset >= cdev->gdev->ngpio)
- return -EINVAL;
-
- if (!test_and_clear_bit(offset, cdev->watched_lines))
- return -EBUSY;
-
- return 0;
+ return lineinfo_unwatch(cdev, ip);
}
return -EINVAL;
}
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index b02cc2abd3b6..b78a634cca24 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -1489,6 +1489,9 @@ static int gpiochip_add_irqchip(struct gpio_chip *gc,
type = IRQ_TYPE_NONE;
}
+ if (gc->to_irq)
+ chip_warn(gc, "to_irq is redefined in %s and you shouldn't rely on it\n", __func__);
+
gc->to_irq = gpiochip_to_irq;
gc->irq.default_type = type;
gc->irq.lock_key = lock_key;
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 0973f408d75f..8bf103de1594 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -214,10 +214,6 @@ config DRM_GEM_SHMEM_HELPER
help
Choose this if you need the GEM shmem helper functions
-config DRM_VM
- bool
- depends on DRM && MMU
-
config DRM_SCHED
tristate
depends on DRM
@@ -391,7 +387,6 @@ source "drivers/gpu/drm/xlnx/Kconfig"
menuconfig DRM_LEGACY
bool "Enable legacy drivers (DANGEROUS)"
depends on DRM && MMU
- select DRM_VM
help
Enable legacy DRI1 drivers. Those drivers expose unsafe and dangerous
APIs to user-space, which can be used to circumvent access
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index fefaff4c832d..926adef289db 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -5,7 +5,7 @@
drm-y := drm_auth.o drm_cache.o \
drm_file.o drm_gem.o drm_ioctl.o drm_irq.o \
- drm_memory.o drm_drv.o \
+ drm_drv.o \
drm_sysfs.o drm_hashtab.o drm_mm.o \
drm_crtc.o drm_fourcc.o drm_modes.o drm_edid.o \
drm_encoder_slave.o \
@@ -20,9 +20,9 @@ drm-y := drm_auth.o drm_cache.o \
drm_client_modeset.o drm_atomic_uapi.o drm_hdcp.o \
drm_managed.o drm_vblank_work.o
-drm-$(CONFIG_DRM_LEGACY) += drm_legacy_misc.o drm_bufs.o drm_context.o drm_dma.o drm_scatter.o drm_lock.o
+drm-$(CONFIG_DRM_LEGACY) += drm_bufs.o drm_context.o drm_dma.o drm_legacy_misc.o drm_lock.o \
+ drm_memory.o drm_scatter.o drm_vm.o
drm-$(CONFIG_DRM_LIB_RANDOM) += lib/drm_random.o
-drm-$(CONFIG_DRM_VM) += drm_vm.o
drm-$(CONFIG_COMPAT) += drm_ioc32.o
drm-$(CONFIG_DRM_GEM_CMA_HELPER) += drm_gem_cma_helper.o
drm-$(CONFIG_DRM_GEM_SHMEM_HELPER) += drm_gem_shmem_helper.o
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index 6bf6cfaea3f1..f8903abcdb88 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -56,7 +56,7 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \
amdgpu_gmc.o amdgpu_mmhub.o amdgpu_xgmi.o amdgpu_csa.o amdgpu_ras.o amdgpu_vm_cpu.o \
amdgpu_vm_sdma.o amdgpu_discovery.o amdgpu_ras_eeprom.o amdgpu_nbio.o \
amdgpu_umc.o smu_v11_0_i2c.o amdgpu_fru_eeprom.o amdgpu_rap.o \
- amdgpu_fw_attestation.o
+ amdgpu_fw_attestation.o amdgpu_securedisplay.o
amdgpu-$(CONFIG_PERF_EVENTS) += amdgpu_pmu.o
@@ -71,7 +71,7 @@ amdgpu-y += \
vi.o mxgpu_vi.o nbio_v6_1.o soc15.o emu_soc.o mxgpu_ai.o nbio_v7_0.o vega10_reg_init.o \
vega20_reg_init.o nbio_v7_4.o nbio_v2_3.o nv.o navi10_reg_init.o navi14_reg_init.o \
arct_reg_init.o navi12_reg_init.o mxgpu_nv.o sienna_cichlid_reg_init.o vangogh_reg_init.o \
- nbio_v7_2.o dimgrey_cavefish_reg_init.o
+ nbio_v7_2.o dimgrey_cavefish_reg_init.o hdp_v4_0.o hdp_v5_0.o
# add DF block
amdgpu-y += \
@@ -97,6 +97,7 @@ amdgpu-y += \
tonga_ih.o \
cz_ih.o \
vega10_ih.o \
+ vega20_ih.o \
navi10_ih.o
# add PSP block
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 5993dd0fdd8e..f4ff8ddb52d4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -55,7 +55,6 @@
#include <drm/ttm/ttm_bo_api.h>
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_placement.h>
-#include <drm/ttm/ttm_module.h>
#include <drm/ttm/ttm_execbuf_util.h>
#include <drm/amdgpu_drm.h>
@@ -89,6 +88,7 @@
#include "amdgpu_gfx.h"
#include "amdgpu_sdma.h"
#include "amdgpu_nbio.h"
+#include "amdgpu_hdp.h"
#include "amdgpu_dm.h"
#include "amdgpu_virt.h"
#include "amdgpu_csa.h"
@@ -107,6 +107,7 @@
#include "amdgpu_gfxhub.h"
#include "amdgpu_df.h"
#include "amdgpu_smuio.h"
+#include "amdgpu_hdp.h"
#define MAX_GPU_INSTANCE 16
@@ -608,7 +609,6 @@ struct amdgpu_asic_funcs {
/* invalidate hdp read cache */
void (*invalidate_hdp)(struct amdgpu_device *adev,
struct amdgpu_ring *ring);
- void (*reset_hdp_ras_error_count)(struct amdgpu_device *adev);
/* check if the asic needs a full reset of if soft reset will work */
bool (*need_full_reset)(struct amdgpu_device *adev);
/* initialize doorbell layout for specific asic*/
@@ -921,6 +921,9 @@ struct amdgpu_device {
/* nbio */
struct amdgpu_nbio nbio;
+ /* hdp */
+ struct amdgpu_hdp hdp;
+
/* smuio */
struct amdgpu_smuio smuio;
@@ -1202,8 +1205,10 @@ int emu_soc_asic_init(struct amdgpu_device *adev);
#define amdgpu_asic_read_bios_from_rom(adev, b, l) (adev)->asic_funcs->read_bios_from_rom((adev), (b), (l))
#define amdgpu_asic_read_register(adev, se, sh, offset, v)((adev)->asic_funcs->read_register((adev), (se), (sh), (offset), (v)))
#define amdgpu_asic_get_config_memsize(adev) (adev)->asic_funcs->get_config_memsize((adev))
-#define amdgpu_asic_flush_hdp(adev, r) (adev)->asic_funcs->flush_hdp((adev), (r))
-#define amdgpu_asic_invalidate_hdp(adev, r) (adev)->asic_funcs->invalidate_hdp((adev), (r))
+#define amdgpu_asic_flush_hdp(adev, r) \
+ ((adev)->asic_funcs->flush_hdp ? (adev)->asic_funcs->flush_hdp((adev), (r)) : (adev)->hdp.funcs->flush_hdp((adev), (r)))
+#define amdgpu_asic_invalidate_hdp(adev, r) \
+ ((adev)->asic_funcs->invalidate_hdp ? (adev)->asic_funcs->invalidate_hdp((adev), (r)) : (adev)->hdp.funcs->invalidate_hdp((adev), (r)))
#define amdgpu_asic_need_full_reset(adev) (adev)->asic_funcs->need_full_reset((adev))
#define amdgpu_asic_init_doorbell_index(adev) (adev)->asic_funcs->init_doorbell_index((adev))
#define amdgpu_asic_get_pcie_usage(adev, cnt0, cnt1) ((adev)->asic_funcs->get_pcie_usage((adev), (cnt0), (cnt1)))
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
index 4763bab7a4d0..62aa1a6f64ed 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
@@ -23,7 +23,6 @@
#include "amdgpu_amdkfd.h"
#include "gc/gc_10_1_0_offset.h"
#include "gc/gc_10_1_0_sh_mask.h"
-#include "navi10_enum.h"
#include "athub/athub_2_0_0_offset.h"
#include "athub/athub_2_0_0_sh_mask.h"
#include "oss/osssys_5_0_0_offset.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c
index 50016bf9c427..fad3b91f74f5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c
@@ -24,7 +24,6 @@
#include "amdgpu_amdkfd.h"
#include "gc/gc_10_3_0_offset.h"
#include "gc/gc_10_3_0_sh_mask.h"
-#include "navi10_enum.h"
#include "oss/osssys_5_0_0_offset.h"
#include "oss/osssys_5_0_0_sh_mask.h"
#include "soc15_common.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index 2d991da2cead..0849b68e784f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -453,7 +453,7 @@ static int add_bo_to_vm(struct amdgpu_device *adev, struct kgd_mem *mem,
struct amdgpu_bo *bo = mem->bo;
uint64_t va = mem->va;
struct list_head *list_bo_va = &mem->bo_va_list;
- unsigned long bo_size = bo->tbo.mem.size;
+ unsigned long bo_size = bo->tbo.base.size;
if (!va) {
pr_err("Invalid VA when adding BO to VM\n");
@@ -1281,7 +1281,7 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
struct kgd_dev *kgd, struct kgd_mem *mem, uint64_t *size)
{
struct amdkfd_process_info *process_info = mem->process_info;
- unsigned long bo_size = mem->bo->tbo.mem.size;
+ unsigned long bo_size = mem->bo->tbo.base.size;
struct kfd_bo_va_list *entry, *tmp;
struct bo_vm_reservation_context ctx;
struct ttm_validate_buffer *bo_list_entry;
@@ -1402,7 +1402,7 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
mutex_lock(&mem->lock);
domain = mem->domain;
- bo_size = bo->tbo.mem.size;
+ bo_size = bo->tbo.base.size;
pr_debug("Map VA 0x%llx - 0x%llx to vm %p domain %s\n",
mem->va,
@@ -1506,7 +1506,7 @@ int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct amdkfd_process_info *process_info =
((struct amdgpu_vm *)vm)->process_info;
- unsigned long bo_size = mem->bo->tbo.mem.size;
+ unsigned long bo_size = mem->bo->tbo.base.size;
struct kfd_bo_va_list *entry;
struct bo_vm_reservation_context ctx;
int ret;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
index 306077884a67..6107ac91db25 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
@@ -112,6 +112,7 @@ int amdgpu_atomfirmware_allocate_fb_scratch(struct amdgpu_device *adev)
union igp_info {
struct atom_integrated_system_info_v1_11 v11;
struct atom_integrated_system_info_v1_12 v12;
+ struct atom_integrated_system_info_v2_1 v21;
};
union umc_info {
@@ -209,24 +210,42 @@ amdgpu_atomfirmware_get_vram_info(struct amdgpu_device *adev,
if (adev->flags & AMD_IS_APU) {
igp_info = (union igp_info *)
(mode_info->atom_context->bios + data_offset);
- switch (crev) {
- case 11:
- mem_channel_number = igp_info->v11.umachannelnumber;
- /* channel width is 64 */
- if (vram_width)
- *vram_width = mem_channel_number * 64;
- mem_type = igp_info->v11.memorytype;
- if (vram_type)
- *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
+ switch (frev) {
+ case 1:
+ switch (crev) {
+ case 11:
+ case 12:
+ mem_channel_number = igp_info->v11.umachannelnumber;
+ if (!mem_channel_number)
+ mem_channel_number = 1;
+ /* channel width is 64 */
+ if (vram_width)
+ *vram_width = mem_channel_number * 64;
+ mem_type = igp_info->v11.memorytype;
+ if (vram_type)
+ *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
+ break;
+ default:
+ return -EINVAL;
+ }
break;
- case 12:
- mem_channel_number = igp_info->v12.umachannelnumber;
- /* channel width is 64 */
- if (vram_width)
- *vram_width = mem_channel_number * 64;
- mem_type = igp_info->v12.memorytype;
- if (vram_type)
- *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
+ case 2:
+ switch (crev) {
+ case 1:
+ case 2:
+ mem_channel_number = igp_info->v21.umachannelnumber;
+ if (!mem_channel_number)
+ mem_channel_number = 1;
+ /* channel width is 64 */
+ if (vram_width)
+ *vram_width = mem_channel_number * 64;
+ mem_type = igp_info->v21.memorytype;
+ if (vram_type)
+ *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
+ break;
+ default:
+ return -EINVAL;
+ }
break;
default:
return -EINVAL;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
index 6333cada1e09..efdf639f6593 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
@@ -155,7 +155,7 @@ static bool amdgpu_read_bios_from_rom(struct amdgpu_device *adev)
u8 header[AMD_VBIOS_SIGNATURE_END+1] = {0};
int len;
- if (!adev->asic_funcs->read_bios_from_rom)
+ if (!adev->asic_funcs || !adev->asic_funcs->read_bios_from_rom)
return false;
/* validate VBIOS signature */
@@ -348,7 +348,8 @@ static bool amdgpu_read_disabled_bios(struct amdgpu_device *adev)
if (adev->flags & AMD_IS_APU)
return igp_read_bios_from_vram(adev);
else
- return amdgpu_asic_read_disabled_bios(adev);
+ return (!adev->asic_funcs || !adev->asic_funcs->read_disabled_bios) ?
+ false : amdgpu_asic_read_disabled_bios(adev);
}
#ifdef CONFIG_ACPI
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
index a6667a2ca0db..0a25fecf488a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -35,6 +35,7 @@
#include "amdgpu_dm_debugfs.h"
#include "amdgpu_ras.h"
#include "amdgpu_rap.h"
+#include "amdgpu_securedisplay.h"
#include "amdgpu_fw_attestation.h"
/**
@@ -1427,7 +1428,7 @@ static void amdgpu_ib_preempt_job_recovery(struct drm_gpu_scheduler *sched)
struct dma_fence *fence;
spin_lock(&sched->job_list_lock);
- list_for_each_entry(s_job, &sched->ring_mirror_list, node) {
+ list_for_each_entry(s_job, &sched->pending_list, list) {
fence = sched->ops->run_job(s_job);
dma_fence_put(fence);
}
@@ -1459,10 +1460,10 @@ static void amdgpu_ib_preempt_mark_partial_job(struct amdgpu_ring *ring)
no_preempt:
spin_lock(&sched->job_list_lock);
- list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
+ list_for_each_entry_safe(s_job, tmp, &sched->pending_list, list) {
if (dma_fence_is_signaled(&s_job->s_fence->finished)) {
/* remove job from ring_mirror_list */
- list_del_init(&s_job->node);
+ list_del_init(&s_job->list);
sched->ops->free_job(s_job);
continue;
}
@@ -1669,6 +1670,8 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev)
amdgpu_rap_debugfs_init(adev);
+ amdgpu_securedisplay_debugfs_init(adev);
+
amdgpu_fw_attestation_debugfs_init(adev);
return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_list,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 1cb7d73f7317..00b6ba5740f3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -81,7 +81,6 @@ MODULE_FIRMWARE("amdgpu/navi10_gpu_info.bin");
MODULE_FIRMWARE("amdgpu/navi14_gpu_info.bin");
MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin");
MODULE_FIRMWARE("amdgpu/vangogh_gpu_info.bin");
-MODULE_FIRMWARE("amdgpu/green_sardine_gpu_info.bin");
#define AMDGPU_RESUME_MS 2000
@@ -1106,8 +1105,7 @@ void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb)
*/
int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
{
- u64 space_needed = roundup_pow_of_two(adev->gmc.real_vram_size);
- u32 rbar_size = order_base_2(((space_needed >> 20) | 1)) - 1;
+ int rbar_size = pci_rebar_bytes_to_size(adev->gmc.real_vram_size);
struct pci_bus *root;
struct resource *res;
unsigned i;
@@ -1138,6 +1136,10 @@ int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
if (!res)
return 0;
+ /* Limit the BAR size to what is available */
+ rbar_size = min(fls(pci_rebar_get_possible_sizes(adev->pdev, 0)) - 1,
+ rbar_size);
+
/* Disable memory decoding while we change the BAR addresses and size */
pci_read_config_word(adev->pdev, PCI_COMMAND, &cmd);
pci_write_config_word(adev->pdev, PCI_COMMAND,
@@ -1423,9 +1425,9 @@ static void amdgpu_switcheroo_set_state(struct pci_dev *pdev,
/* don't suspend or resume card normally */
dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
- pci_set_power_state(dev->pdev, PCI_D0);
- amdgpu_device_load_pci_state(dev->pdev);
- r = pci_enable_device(dev->pdev);
+ pci_set_power_state(pdev, PCI_D0);
+ amdgpu_device_load_pci_state(pdev);
+ r = pci_enable_device(pdev);
if (r)
DRM_WARN("pci_enable_device failed (%d)\n", r);
amdgpu_device_resume(dev, true);
@@ -1437,10 +1439,10 @@ static void amdgpu_switcheroo_set_state(struct pci_dev *pdev,
drm_kms_helper_poll_disable(dev);
dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
amdgpu_device_suspend(dev, true);
- amdgpu_device_cache_pci_state(dev->pdev);
+ amdgpu_device_cache_pci_state(pdev);
/* Shut down the device */
- pci_disable_device(dev->pdev);
- pci_set_power_state(dev->pdev, PCI_D3cold);
+ pci_disable_device(pdev);
+ pci_set_power_state(pdev, PCI_D3cold);
dev->switch_power_state = DRM_SWITCH_POWER_OFF;
}
}
@@ -1703,8 +1705,7 @@ static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
adev->enable_virtual_display = false;
if (amdgpu_virtual_display) {
- struct drm_device *ddev = adev_to_drm(adev);
- const char *pci_address_name = pci_name(ddev->pdev);
+ const char *pci_address_name = pci_name(adev->pdev);
char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
@@ -2548,11 +2549,11 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
if (adev->gmc.xgmi.num_physical_nodes > 1)
amdgpu_xgmi_remove_device(adev);
- amdgpu_amdkfd_device_fini(adev);
-
amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
+ amdgpu_amdkfd_device_fini(adev);
+
/* need to disable SMC first */
for (i = 0; i < adev->num_ip_blocks; i++) {
if (!adev->ip_blocks[i].status.hw)
@@ -3034,7 +3035,7 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
#endif
default:
if (amdgpu_dc > 0)
- DRM_INFO("Display Core has been requested via kernel parameter "
+ DRM_INFO_ONCE("Display Core has been requested via kernel parameter "
"but isn't supported by ASIC, ignoring\n");
return false;
}
@@ -3117,7 +3118,10 @@ static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev)
*/
adev->gfx_timeout = msecs_to_jiffies(10000);
adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
- if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev))
+ if (amdgpu_sriov_vf(adev))
+ adev->compute_timeout = amdgpu_sriov_is_pp_one_vf(adev) ?
+ msecs_to_jiffies(60000) : msecs_to_jiffies(10000);
+ else if (amdgpu_passthrough(adev))
adev->compute_timeout = msecs_to_jiffies(60000);
else
adev->compute_timeout = MAX_SCHEDULE_TIMEOUT;
@@ -3397,7 +3401,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
}
}
- pci_enable_pcie_error_reporting(adev->ddev.pdev);
+ pci_enable_pcie_error_reporting(adev->pdev);
/* Post card if necessary */
if (amdgpu_device_need_post(adev)) {
@@ -4155,8 +4159,8 @@ bool amdgpu_device_has_job_running(struct amdgpu_device *adev)
continue;
spin_lock(&ring->sched.job_list_lock);
- job = list_first_entry_or_null(&ring->sched.ring_mirror_list,
- struct drm_sched_job, node);
+ job = list_first_entry_or_null(&ring->sched.pending_list,
+ struct drm_sched_job, list);
spin_unlock(&ring->sched.job_list_lock);
if (job)
return true;
@@ -4206,6 +4210,8 @@ bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev)
case CHIP_NAVI14:
case CHIP_NAVI12:
case CHIP_SIENNA_CICHLID:
+ case CHIP_NAVY_FLOUNDER:
+ case CHIP_VANGOGH:
break;
default:
goto disabled;
@@ -4950,8 +4956,8 @@ pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_sta
case pci_channel_io_normal:
return PCI_ERS_RESULT_CAN_RECOVER;
/* Fatal error, prepare for slot reset */
- case pci_channel_io_frozen:
- /*
+ case pci_channel_io_frozen:
+ /*
* Cancel and wait for all TDRs in progress if failing to
* set adev->in_gpu_reset in amdgpu_device_lock_adev
*
@@ -5042,7 +5048,7 @@ pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev)
goto out;
}
- adev->in_pci_err_recovery = true;
+ adev->in_pci_err_recovery = true;
r = amdgpu_device_pre_asic_reset(adev, NULL, &need_full_reset);
adev->in_pci_err_recovery = false;
if (r)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
index e42175e1acf1..47e0b48dc26f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
@@ -40,6 +40,7 @@
#include <linux/dma-buf.h>
#include <linux/dma-fence-array.h>
#include <linux/pci-p2pdma.h>
+#include <linux/pm_runtime.h>
/**
* amdgpu_gem_prime_mmap - &drm_driver.gem_prime_mmap implementation
@@ -151,9 +152,13 @@ static int amdgpu_dma_buf_attach(struct dma_buf *dmabuf,
if (attach->dev->driver == adev->dev->driver)
return 0;
+ r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
+ if (r < 0)
+ goto out;
+
r = amdgpu_bo_reserve(bo, false);
if (unlikely(r != 0))
- return r;
+ goto out;
/*
* We only create shared fences for internal use, but importers
@@ -165,11 +170,15 @@ static int amdgpu_dma_buf_attach(struct dma_buf *dmabuf,
*/
r = __dma_resv_make_exclusive(bo->tbo.base.resv);
if (r)
- return r;
+ goto out;
bo->prime_shared_count++;
amdgpu_bo_unreserve(bo);
return 0;
+
+out:
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ return r;
}
/**
@@ -189,6 +198,9 @@ static void amdgpu_dma_buf_detach(struct dma_buf *dmabuf,
if (attach->dev->driver != adev->dev->driver && bo->prime_shared_count)
bo->prime_shared_count--;
+
+ pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
}
/**
@@ -269,7 +281,7 @@ static struct sg_table *amdgpu_dma_buf_map(struct dma_buf_attachment *attach,
case TTM_PL_TT:
sgt = drm_prime_pages_to_sg(obj->dev,
bo->tbo.ttm->pages,
- bo->tbo.num_pages);
+ bo->tbo.ttm->num_pages);
if (IS_ERR(sgt))
return sgt;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 72efd579ec5e..effa9062f541 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -1085,6 +1085,8 @@ static const struct pci_device_id pciidlist[] = {
/* Renoir */
{0x1002, 0x1636, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU},
+ {0x1002, 0x1638, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU},
+ {0x1002, 0x164C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU},
/* Navi12 */
{0x1002, 0x7360, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI12},
@@ -1204,7 +1206,6 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
if (ret)
return ret;
- ddev->pdev = pdev;
pci_set_drvdata(pdev, ddev);
ret = amdgpu_driver_load_kms(adev, ent->driver_data);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
index 0bf7d36c6686..51cd49c6f38f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
@@ -271,7 +271,7 @@ static int amdgpufb_create(struct drm_fb_helper *helper,
DRM_INFO("fb depth is %d\n", fb->format->depth);
DRM_INFO(" pitch is %d\n", fb->pitches[0]);
- vga_switcheroo_client_fb_set(adev_to_drm(adev)->pdev, info);
+ vga_switcheroo_client_fb_set(adev->pdev, info);
return 0;
out:
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fw_attestation.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fw_attestation.c
index 7c6e02e35573..8d1ad294cb02 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fw_attestation.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fw_attestation.c
@@ -47,10 +47,9 @@ typedef struct FW_ATT_RECORD
uint16_t AttFwIdV2; /* V2 FW ID field */
uint32_t AttFWVersion; /* FW Version */
uint16_t AttFWActiveFunctionID; /* The VF ID (only in VF Attestation Table) */
- uint16_t AttSource; /* FW source indicator */
- uint16_t RecordValid; /* Indicates whether the record is a valid entry */
- uint8_t AttFwTaId; /* Ta ID (only in TA Attestation Table) */
- uint8_t Reserved;
+ uint8_t AttSource; /* FW source indicator */
+ uint8_t RecordValid; /* Indicates whether the record is a valid entry */
+ uint32_t AttFwTaId; /* Ta ID (only in TA Attestation Table) */
} FW_ATT_RECORD;
static ssize_t amdgpu_fw_attestation_debugfs_read(struct file *f,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index d0a1fee1f5f6..a5c42c3004a0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -619,7 +619,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
int r = 0;
if (args->va_address < AMDGPU_VA_RESERVED_SIZE) {
- dev_dbg(&dev->pdev->dev,
+ dev_dbg(dev->dev,
"va_address 0x%LX is in reserved area 0x%LX\n",
args->va_address, AMDGPU_VA_RESERVED_SIZE);
return -EINVAL;
@@ -627,7 +627,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
if (args->va_address >= AMDGPU_GMC_HOLE_START &&
args->va_address < AMDGPU_GMC_HOLE_END) {
- dev_dbg(&dev->pdev->dev,
+ dev_dbg(dev->dev,
"va_address 0x%LX is in VA hole 0x%LX-0x%LX\n",
args->va_address, AMDGPU_GMC_HOLE_START,
AMDGPU_GMC_HOLE_END);
@@ -639,14 +639,14 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
vm_size = adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE;
vm_size -= AMDGPU_VA_RESERVED_SIZE;
if (args->va_address + args->map_size > vm_size) {
- dev_dbg(&dev->pdev->dev,
+ dev_dbg(dev->dev,
"va_address 0x%llx is in top reserved area 0x%llx\n",
args->va_address + args->map_size, vm_size);
return -EINVAL;
}
if ((args->flags & ~valid_flags) && (args->flags & ~prt_flags)) {
- dev_dbg(&dev->pdev->dev, "invalid flags combination 0x%08X\n",
+ dev_dbg(dev->dev, "invalid flags combination 0x%08X\n",
args->flags);
return -EINVAL;
}
@@ -658,7 +658,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
case AMDGPU_VA_OP_REPLACE:
break;
default:
- dev_dbg(&dev->pdev->dev, "unsupported operation %d\n",
+ dev_dbg(dev->dev, "unsupported operation %d\n",
args->operation);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
index 6e679db5e46f..fe1a39ffda72 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
@@ -120,7 +120,7 @@ uint64_t amdgpu_gmc_agp_addr(struct ttm_buffer_object *bo)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
- if (bo->num_pages != 1 || bo->ttm->caching == ttm_cached)
+ if (bo->ttm->num_pages != 1 || bo->ttm->caching == ttm_cached)
return AMDGPU_BO_INVALID_OFFSET;
if (bo->ttm->dma_address[0] + PAGE_SIZE >= adev->gmc.agp_size)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.h
new file mode 100644
index 000000000000..43caf9f8cc11
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __AMDGPU_HDP_H__
+#define __AMDGPU_HDP_H__
+
+struct amdgpu_hdp_funcs {
+ void (*flush_hdp)(struct amdgpu_device *adev, struct amdgpu_ring *ring);
+ void (*invalidate_hdp)(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring);
+ void (*reset_ras_error_count)(struct amdgpu_device *adev);
+ void (*update_clock_gating)(struct amdgpu_device *adev, bool enable);
+ void (*get_clock_gating_state)(struct amdgpu_device *adev, u32 *flags);
+ void (*init_registers)(struct amdgpu_device *adev);
+};
+
+struct amdgpu_hdp {
+ const struct amdgpu_hdp_funcs *funcs;
+};
+
+#endif /* __AMDGPU_HDP_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c
index 47cad23a6b9e..bca4dddd5a15 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c
@@ -176,7 +176,7 @@ struct amdgpu_i2c_chan *amdgpu_i2c_create(struct drm_device *dev,
i2c->rec = *rec;
i2c->adapter.owner = THIS_MODULE;
i2c->adapter.class = I2C_CLASS_DDC;
- i2c->adapter.dev.parent = &dev->pdev->dev;
+ i2c->adapter.dev.parent = dev->dev;
i2c->dev = dev;
i2c_set_adapdata(&i2c->adapter, i2c);
mutex_init(&i2c->mutex);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
index dcd9b4a8e20b..dc852af4f3b7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
@@ -205,3 +205,48 @@ restart_ih:
return IRQ_HANDLED;
}
+/**
+ * amdgpu_ih_decode_iv_helper - decode an interrupt vector
+ *
+ * @adev: amdgpu_device pointer
+ * @ih: ih ring to process
+ * @entry: IV entry
+ *
+ * Decodes the interrupt vector at the current rptr
+ * position and also advance the position for for Vega10
+ * and later GPUs.
+ */
+void amdgpu_ih_decode_iv_helper(struct amdgpu_device *adev,
+ struct amdgpu_ih_ring *ih,
+ struct amdgpu_iv_entry *entry)
+{
+ /* wptr/rptr are in bytes! */
+ u32 ring_index = ih->rptr >> 2;
+ uint32_t dw[8];
+
+ dw[0] = le32_to_cpu(ih->ring[ring_index + 0]);
+ dw[1] = le32_to_cpu(ih->ring[ring_index + 1]);
+ dw[2] = le32_to_cpu(ih->ring[ring_index + 2]);
+ dw[3] = le32_to_cpu(ih->ring[ring_index + 3]);
+ dw[4] = le32_to_cpu(ih->ring[ring_index + 4]);
+ dw[5] = le32_to_cpu(ih->ring[ring_index + 5]);
+ dw[6] = le32_to_cpu(ih->ring[ring_index + 6]);
+ dw[7] = le32_to_cpu(ih->ring[ring_index + 7]);
+
+ entry->client_id = dw[0] & 0xff;
+ entry->src_id = (dw[0] >> 8) & 0xff;
+ entry->ring_id = (dw[0] >> 16) & 0xff;
+ entry->vmid = (dw[0] >> 24) & 0xf;
+ entry->vmid_src = (dw[0] >> 31);
+ entry->timestamp = dw[1] | ((u64)(dw[2] & 0xffff) << 32);
+ entry->timestamp_src = dw[2] >> 31;
+ entry->pasid = dw[3] & 0xffff;
+ entry->pasid_src = dw[3] >> 31;
+ entry->src_data[0] = dw[4];
+ entry->src_data[1] = dw[5];
+ entry->src_data[2] = dw[6];
+ entry->src_data[3] = dw[7];
+
+ /* wptr/rptr are in bytes! */
+ ih->rptr += 32;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
index 3c9cfe7eecff..6ed4a85fc7c3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
@@ -30,6 +30,18 @@
struct amdgpu_device;
struct amdgpu_iv_entry;
+struct amdgpu_ih_regs {
+ uint32_t ih_rb_base;
+ uint32_t ih_rb_base_hi;
+ uint32_t ih_rb_cntl;
+ uint32_t ih_rb_wptr;
+ uint32_t ih_rb_rptr;
+ uint32_t ih_doorbell_rptr;
+ uint32_t ih_rb_wptr_addr_lo;
+ uint32_t ih_rb_wptr_addr_hi;
+ uint32_t psp_reg_id;
+};
+
/*
* R6xx+ IH ring
*/
@@ -53,6 +65,7 @@ struct amdgpu_ih_ring {
bool enabled;
unsigned rptr;
atomic_t lock;
+ struct amdgpu_ih_regs ih_regs;
};
/* provided by the ih block */
@@ -75,5 +88,7 @@ void amdgpu_ih_ring_fini(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih);
void amdgpu_ih_ring_write(struct amdgpu_ih_ring *ih, const uint32_t *iv,
unsigned int num_dw);
int amdgpu_ih_process(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih);
-
+void amdgpu_ih_decode_iv_helper(struct amdgpu_device *adev,
+ struct amdgpu_ih_ring *ih,
+ struct amdgpu_iv_entry *entry);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
index bea57e8e793f..afbbec82a289 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
@@ -444,7 +444,8 @@ void amdgpu_irq_dispatch(struct amdgpu_device *adev,
} else if (src_id >= AMDGPU_MAX_IRQ_SRC_ID) {
DRM_DEBUG("Invalid src_id in IV: %d\n", src_id);
- } else if (adev->irq.virq[src_id]) {
+ } else if ((client_id == AMDGPU_IRQ_CLIENTID_LEGACY) &&
+ adev->irq.virq[src_id]) {
generic_handle_irq(irq_find_mapping(adev->irq.domain, src_id));
} else if (!adev->irq.client[client_id].sources) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index dcfe8a3b03ff..ff48101bab55 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -271,7 +271,7 @@ void amdgpu_job_stop_all_jobs_on_sched(struct drm_gpu_scheduler *sched)
}
/* Signal all jobs already scheduled to HW */
- list_for_each_entry(s_job, &sched->ring_mirror_list, node) {
+ list_for_each_entry(s_job, &sched->pending_list, list) {
struct drm_sched_fence *s_fence = s_job->s_fence;
dma_fence_set_error(&s_fence->finished, -EHWPOISON);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index b16b32797624..3c37cf1ae8b7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -142,7 +142,7 @@ int amdgpu_driver_load_kms(struct amdgpu_device *adev, unsigned long flags)
(amdgpu_is_atpx_hybrid() ||
amdgpu_has_atpx_dgpu_power_cntl()) &&
((flags & AMD_IS_APU) == 0) &&
- !pci_is_thunderbolt_attached(dev->pdev))
+ !pci_is_thunderbolt_attached(to_pci_dev(dev->dev)))
flags |= AMD_IS_PX;
parent = pci_upstream_bridge(adev->pdev);
@@ -156,7 +156,7 @@ int amdgpu_driver_load_kms(struct amdgpu_device *adev, unsigned long flags)
*/
r = amdgpu_device_init(adev, flags);
if (r) {
- dev_err(&dev->pdev->dev, "Fatal error during GPU init\n");
+ dev_err(dev->dev, "Fatal error during GPU init\n");
goto out;
}
@@ -199,7 +199,7 @@ int amdgpu_driver_load_kms(struct amdgpu_device *adev, unsigned long flags)
acpi_status = amdgpu_acpi_init(adev);
if (acpi_status)
- dev_dbg(&dev->pdev->dev, "Error during ACPI methods call\n");
+ dev_dbg(dev->dev, "Error during ACPI methods call\n");
if (adev->runpm) {
/* only need to skip on ATPX */
@@ -735,10 +735,10 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
if (!dev_info)
return -ENOMEM;
- dev_info->device_id = dev->pdev->device;
+ dev_info->device_id = adev->pdev->device;
dev_info->chip_rev = adev->rev_id;
dev_info->external_rev = adev->external_rev_id;
- dev_info->pci_rev = dev->pdev->revision;
+ dev_info->pci_rev = adev->pdev->revision;
dev_info->family = adev->family;
dev_info->num_shader_engines = adev->gfx.config.max_shader_engines;
dev_info->num_shader_arrays_per_engine = adev->gfx.config.max_sh_per_se;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h
index e62cc0e1a5ad..4ba0024aedf1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h
@@ -57,7 +57,6 @@ struct amdgpu_nbio_funcs {
u32 (*get_pcie_port_data_offset)(struct amdgpu_device *adev);
u32 (*get_rev_id)(struct amdgpu_device *adev);
void (*mc_access_enable)(struct amdgpu_device *adev, bool enable);
- void (*hdp_flush)(struct amdgpu_device *adev, struct amdgpu_ring *ring);
u32 (*get_memsize)(struct amdgpu_device *adev);
void (*sdma_doorbell_range)(struct amdgpu_device *adev, int instance,
bool use_doorbell, int doorbell_index, int doorbell_size);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 25ec4d57333f..6cc9919b12cc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -787,7 +787,7 @@ int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
if (r < 0)
return r;
- r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
+ r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.mem.num_pages, &bo->kmap);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index 79120ec41396..9ac37569823f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -174,12 +174,12 @@ static inline void amdgpu_bo_unreserve(struct amdgpu_bo *bo)
static inline unsigned long amdgpu_bo_size(struct amdgpu_bo *bo)
{
- return bo->tbo.num_pages << PAGE_SHIFT;
+ return bo->tbo.base.size;
}
static inline unsigned amdgpu_bo_ngpu_pages(struct amdgpu_bo *bo)
{
- return (bo->tbo.num_pages << PAGE_SHIFT) / AMDGPU_GPU_PAGE_SIZE;
+ return bo->tbo.base.size / AMDGPU_GPU_PAGE_SIZE;
}
static inline unsigned amdgpu_bo_gpu_page_alignment(struct amdgpu_bo *bo)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index 523d22db094b..839917eb7bc3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -36,6 +36,7 @@
#include "psp_v12_0.h"
#include "amdgpu_ras.h"
+#include "amdgpu_securedisplay.h"
static int psp_sysfs_init(struct amdgpu_device *adev);
static void psp_sysfs_fini(struct amdgpu_device *adev);
@@ -249,7 +250,7 @@ psp_cmd_submit_buf(struct psp_context *psp,
{
int ret;
int index;
- int timeout = 2000;
+ int timeout = 20000;
bool ras_intr = false;
bool skip_unsupport = false;
@@ -282,7 +283,7 @@ psp_cmd_submit_buf(struct psp_context *psp,
ras_intr = amdgpu_ras_intr_triggered();
if (ras_intr)
break;
- msleep(1);
+ usleep_range(10, 100);
amdgpu_asic_invalidate_hdp(psp->adev, NULL);
}
@@ -563,7 +564,7 @@ static int psp_asd_load(struct psp_context *psp)
* add workaround to bypass it for sriov now.
* TODO: add version check to make it common
*/
- if (amdgpu_sriov_vf(psp->adev) || !psp->asd_fw)
+ if (amdgpu_sriov_vf(psp->adev) || !psp->asd_ucode_size)
return 0;
cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
@@ -1315,8 +1316,12 @@ static int psp_hdcp_terminate(struct psp_context *psp)
if (amdgpu_sriov_vf(psp->adev))
return 0;
- if (!psp->hdcp_context.hdcp_initialized)
- return 0;
+ if (!psp->hdcp_context.hdcp_initialized) {
+ if (psp->hdcp_context.hdcp_shared_buf)
+ goto out;
+ else
+ return 0;
+ }
ret = psp_hdcp_unload(psp);
if (ret)
@@ -1324,6 +1329,7 @@ static int psp_hdcp_terminate(struct psp_context *psp)
psp->hdcp_context.hdcp_initialized = false;
+out:
/* free hdcp shared memory */
amdgpu_bo_free_kernel(&psp->hdcp_context.hdcp_shared_bo,
&psp->hdcp_context.hdcp_shared_mc_addr,
@@ -1462,8 +1468,12 @@ static int psp_dtm_terminate(struct psp_context *psp)
if (amdgpu_sriov_vf(psp->adev))
return 0;
- if (!psp->dtm_context.dtm_initialized)
- return 0;
+ if (!psp->dtm_context.dtm_initialized) {
+ if (psp->dtm_context.dtm_shared_buf)
+ goto out;
+ else
+ return 0;
+ }
ret = psp_dtm_unload(psp);
if (ret)
@@ -1471,6 +1481,7 @@ static int psp_dtm_terminate(struct psp_context *psp)
psp->dtm_context.dtm_initialized = false;
+out:
/* free hdcp shared memory */
amdgpu_bo_free_kernel(&psp->dtm_context.dtm_shared_bo,
&psp->dtm_context.dtm_shared_mc_addr,
@@ -1642,6 +1653,175 @@ int psp_rap_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
}
// RAP end
+/* securedisplay start */
+static int psp_securedisplay_init_shared_buf(struct psp_context *psp)
+{
+ int ret;
+
+ /*
+ * Allocate 16k memory aligned to 4k from Frame Buffer (local
+ * physical) for sa ta <-> Driver
+ */
+ ret = amdgpu_bo_create_kernel(psp->adev, PSP_SECUREDISPLAY_SHARED_MEM_SIZE,
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
+ &psp->securedisplay_context.securedisplay_shared_bo,
+ &psp->securedisplay_context.securedisplay_shared_mc_addr,
+ &psp->securedisplay_context.securedisplay_shared_buf);
+
+ return ret;
+}
+
+static int psp_securedisplay_load(struct psp_context *psp)
+{
+ int ret;
+ struct psp_gfx_cmd_resp *cmd;
+
+ cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ memset(psp->fw_pri_buf, 0, PSP_1_MEG);
+ memcpy(psp->fw_pri_buf, psp->ta_securedisplay_start_addr, psp->ta_securedisplay_ucode_size);
+
+ psp_prep_ta_load_cmd_buf(cmd,
+ psp->fw_pri_mc_addr,
+ psp->ta_securedisplay_ucode_size,
+ psp->securedisplay_context.securedisplay_shared_mc_addr,
+ PSP_SECUREDISPLAY_SHARED_MEM_SIZE);
+
+ ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
+
+ if (ret)
+ goto failed;
+
+ psp->securedisplay_context.securedisplay_initialized = true;
+ psp->securedisplay_context.session_id = cmd->resp.session_id;
+ mutex_init(&psp->securedisplay_context.mutex);
+
+failed:
+ kfree(cmd);
+ return ret;
+}
+
+static int psp_securedisplay_unload(struct psp_context *psp)
+{
+ int ret;
+ struct psp_gfx_cmd_resp *cmd;
+
+ cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ psp_prep_ta_unload_cmd_buf(cmd, psp->securedisplay_context.session_id);
+
+ ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
+
+ kfree(cmd);
+
+ return ret;
+}
+
+static int psp_securedisplay_initialize(struct psp_context *psp)
+{
+ int ret;
+ struct securedisplay_cmd *securedisplay_cmd;
+
+ /*
+ * TODO: bypass the initialize in sriov for now
+ */
+ if (amdgpu_sriov_vf(psp->adev))
+ return 0;
+
+ if (!psp->adev->psp.ta_securedisplay_ucode_size ||
+ !psp->adev->psp.ta_securedisplay_start_addr) {
+ dev_info(psp->adev->dev, "SECUREDISPLAY: securedisplay ta ucode is not available\n");
+ return 0;
+ }
+
+ if (!psp->securedisplay_context.securedisplay_initialized) {
+ ret = psp_securedisplay_init_shared_buf(psp);
+ if (ret)
+ return ret;
+ }
+
+ ret = psp_securedisplay_load(psp);
+ if (ret)
+ return ret;
+
+ psp_prep_securedisplay_cmd_buf(psp, &securedisplay_cmd,
+ TA_SECUREDISPLAY_COMMAND__QUERY_TA);
+
+ ret = psp_securedisplay_invoke(psp, TA_SECUREDISPLAY_COMMAND__QUERY_TA);
+ if (ret) {
+ psp_securedisplay_unload(psp);
+
+ amdgpu_bo_free_kernel(&psp->securedisplay_context.securedisplay_shared_bo,
+ &psp->securedisplay_context.securedisplay_shared_mc_addr,
+ &psp->securedisplay_context.securedisplay_shared_buf);
+
+ psp->securedisplay_context.securedisplay_initialized = false;
+
+ dev_err(psp->adev->dev, "SECUREDISPLAY TA initialize fail.\n");
+ return -EINVAL;
+ }
+
+ if (securedisplay_cmd->status != TA_SECUREDISPLAY_STATUS__SUCCESS) {
+ psp_securedisplay_parse_resp_status(psp, securedisplay_cmd->status);
+ dev_err(psp->adev->dev, "SECUREDISPLAY: query securedisplay TA failed. ret 0x%x\n",
+ securedisplay_cmd->securedisplay_out_message.query_ta.query_cmd_ret);
+ }
+
+ return 0;
+}
+
+static int psp_securedisplay_terminate(struct psp_context *psp)
+{
+ int ret;
+
+ /*
+ * TODO:bypass the terminate in sriov for now
+ */
+ if (amdgpu_sriov_vf(psp->adev))
+ return 0;
+
+ if (!psp->securedisplay_context.securedisplay_initialized)
+ return 0;
+
+ ret = psp_securedisplay_unload(psp);
+ if (ret)
+ return ret;
+
+ psp->securedisplay_context.securedisplay_initialized = false;
+
+ /* free securedisplay shared memory */
+ amdgpu_bo_free_kernel(&psp->securedisplay_context.securedisplay_shared_bo,
+ &psp->securedisplay_context.securedisplay_shared_mc_addr,
+ &psp->securedisplay_context.securedisplay_shared_buf);
+
+ return ret;
+}
+
+int psp_securedisplay_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
+{
+ int ret;
+
+ if (!psp->securedisplay_context.securedisplay_initialized)
+ return -EINVAL;
+
+ if (ta_cmd_id != TA_SECUREDISPLAY_COMMAND__QUERY_TA &&
+ ta_cmd_id != TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC)
+ return -EINVAL;
+
+ mutex_lock(&psp->securedisplay_context.mutex);
+
+ ret = psp_ta_invoke(psp, ta_cmd_id, psp->securedisplay_context.session_id);
+
+ mutex_unlock(&psp->securedisplay_context.mutex);
+
+ return ret;
+}
+/* SECUREDISPLAY end */
+
static int psp_hw_start(struct psp_context *psp)
{
struct amdgpu_device *adev = psp->adev;
@@ -2116,6 +2296,11 @@ skip_memalloc:
if (ret)
dev_err(psp->adev->dev,
"RAP: Failed to initialize RAP\n");
+
+ ret = psp_securedisplay_initialize(psp);
+ if (ret)
+ dev_err(psp->adev->dev,
+ "SECUREDISPLAY: Failed to initialize SECUREDISPLAY\n");
}
return 0;
@@ -2166,6 +2351,7 @@ static int psp_hw_fini(void *handle)
if (psp->adev->psp.ta_fw) {
psp_ras_terminate(psp);
+ psp_securedisplay_terminate(psp);
psp_rap_terminate(psp);
psp_dtm_terminate(psp);
psp_hdcp_terminate(psp);
@@ -2230,6 +2416,11 @@ static int psp_suspend(void *handle)
DRM_ERROR("Failed to terminate rap ta\n");
return ret;
}
+ ret = psp_securedisplay_terminate(psp);
+ if (ret) {
+ DRM_ERROR("Failed to terminate securedisplay ta\n");
+ return ret;
+ }
}
ret = psp_asd_unload(psp);
@@ -2313,6 +2504,11 @@ static int psp_resume(void *handle)
if (ret)
dev_err(psp->adev->dev,
"RAP: Failed to initialize RAP\n");
+
+ ret = psp_securedisplay_initialize(psp);
+ if (ret)
+ dev_err(psp->adev->dev,
+ "SECUREDISPLAY: Failed to initialize SECUREDISPLAY\n");
}
mutex_unlock(&adev->firmware.mutex);
@@ -2589,11 +2785,10 @@ static int parse_ta_bin_descriptor(struct psp_context *psp,
switch (desc->fw_type) {
case TA_FW_TYPE_PSP_ASD:
- psp->asd_fw_version = le32_to_cpu(desc->fw_version);
+ psp->asd_fw_version = le32_to_cpu(desc->fw_version);
psp->asd_feature_version = le32_to_cpu(desc->fw_version);
- psp->asd_ucode_size = le32_to_cpu(desc->size_bytes);
+ psp->asd_ucode_size = le32_to_cpu(desc->size_bytes);
psp->asd_start_addr = ucode_start_addr;
- psp->asd_fw = psp->ta_fw;
break;
case TA_FW_TYPE_PSP_XGMI:
psp->ta_xgmi_ucode_version = le32_to_cpu(desc->fw_version);
@@ -2620,6 +2815,11 @@ static int parse_ta_bin_descriptor(struct psp_context *psp,
psp->ta_rap_ucode_size = le32_to_cpu(desc->size_bytes);
psp->ta_rap_start_addr = ucode_start_addr;
break;
+ case TA_FW_TYPE_PSP_SECUREDISPLAY:
+ psp->ta_securedisplay_ucode_version = le32_to_cpu(desc->fw_version);
+ psp->ta_securedisplay_ucode_size = le32_to_cpu(desc->size_bytes);
+ psp->ta_securedisplay_start_addr = ucode_start_addr;
+ break;
default:
dev_warn(psp->adev->dev, "Unsupported TA type: %d\n", desc->fw_type);
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
index da250bc1ac57..cb50ba445f8c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
@@ -30,6 +30,7 @@
#include "ta_xgmi_if.h"
#include "ta_ras_if.h"
#include "ta_rap_if.h"
+#include "ta_secureDisplay_if.h"
#define PSP_FENCE_BUFFER_SIZE 0x1000
#define PSP_CMD_BUFFER_SIZE 0x1000
@@ -40,6 +41,7 @@
#define PSP_HDCP_SHARED_MEM_SIZE 0x4000
#define PSP_DTM_SHARED_MEM_SIZE 0x4000
#define PSP_RAP_SHARED_MEM_SIZE 0x4000
+#define PSP_SECUREDISPLAY_SHARED_MEM_SIZE 0x4000
#define PSP_SHARED_MEM_SIZE 0x4000
#define PSP_FW_NAME_LEN 0x24
@@ -171,6 +173,15 @@ struct psp_rap_context {
struct mutex mutex;
};
+struct psp_securedisplay_context {
+ bool securedisplay_initialized;
+ uint32_t session_id;
+ struct amdgpu_bo *securedisplay_shared_bo;
+ uint64_t securedisplay_shared_mc_addr;
+ void *securedisplay_shared_buf;
+ struct mutex mutex;
+};
+
#define MEM_TRAIN_SYSTEM_SIGNATURE 0x54534942
#define GDDR6_MEM_TRAINING_DATA_SIZE_IN_BYTES 0x1000
#define GDDR6_MEM_TRAINING_OFFSET 0x8000
@@ -298,12 +309,17 @@ struct psp_context
uint32_t ta_rap_ucode_size;
uint8_t *ta_rap_start_addr;
+ uint32_t ta_securedisplay_ucode_version;
+ uint32_t ta_securedisplay_ucode_size;
+ uint8_t *ta_securedisplay_start_addr;
+
struct psp_asd_context asd_context;
struct psp_xgmi_context xgmi_context;
struct psp_ras_context ras;
struct psp_hdcp_context hdcp_context;
struct psp_dtm_context dtm_context;
struct psp_rap_context rap_context;
+ struct psp_securedisplay_context securedisplay_context;
struct mutex mutex;
struct psp_memory_training_context mem_train_ctx;
};
@@ -380,6 +396,7 @@ int psp_ras_trigger_error(struct psp_context *psp,
int psp_hdcp_invoke(struct psp_context *psp, uint32_t ta_cmd_id);
int psp_dtm_invoke(struct psp_context *psp, uint32_t ta_cmd_id);
int psp_rap_invoke(struct psp_context *psp, uint32_t ta_cmd_id);
+int psp_securedisplay_invoke(struct psp_context *psp, uint32_t ta_cmd_id);
int psp_rlc_autoload_start(struct psp_context *psp);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
index c136bd449744..1fb2a91ad30a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
@@ -846,7 +846,7 @@ static int amdgpu_ras_error_inject_xgmi(struct amdgpu_device *adev,
if (amdgpu_dpm_allow_xgmi_power_down(adev, true))
dev_warn(adev->dev, "Failed to allow XGMI power down");
- if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_DISALLOW))
+ if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_ALLOW))
dev_warn(adev->dev, "Failed to allow df cstate");
return ret;
@@ -1518,7 +1518,7 @@ static int amdgpu_ras_badpages_read(struct amdgpu_device *adev,
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
struct ras_err_handler_data *data;
int i = 0;
- int ret = 0;
+ int ret = 0, status;
if (!con || !con->eh_data || !bps || !count)
return -EINVAL;
@@ -1543,12 +1543,12 @@ static int amdgpu_ras_badpages_read(struct amdgpu_device *adev,
.size = AMDGPU_GPU_PAGE_SIZE,
.flags = AMDGPU_RAS_RETIRE_PAGE_RESERVED,
};
- ret = amdgpu_vram_mgr_query_page_status(
+ status = amdgpu_vram_mgr_query_page_status(
ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM),
data->bps[i].retired_page);
- if (ret == -EBUSY)
+ if (status == -EBUSY)
(*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_PENDING;
- else if (ret == -ENOENT)
+ else if (status == -ENOENT)
(*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_FAULT;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
index 1dd040166c63..19d9aa76cfbf 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
@@ -30,6 +30,7 @@
#define EEPROM_I2C_TARGET_ADDR_VEGA20 0xA0
#define EEPROM_I2C_TARGET_ADDR_ARCTURUS 0xA8
#define EEPROM_I2C_TARGET_ADDR_ARCTURUS_D342 0xA0
+#define EEPROM_I2C_TARGET_ADDR_SIENNA_CICHLID 0xA0
/*
* The 2 macros bellow represent the actual size in bytes that
@@ -62,7 +63,8 @@
static bool __is_ras_eeprom_supported(struct amdgpu_device *adev)
{
if ((adev->asic_type == CHIP_VEGA20) ||
- (adev->asic_type == CHIP_ARCTURUS))
+ (adev->asic_type == CHIP_ARCTURUS) ||
+ (adev->asic_type == CHIP_SIENNA_CICHLID))
return true;
return false;
@@ -100,6 +102,10 @@ static bool __get_eeprom_i2c_addr(struct amdgpu_device *adev,
case CHIP_ARCTURUS:
return __get_eeprom_i2c_addr_arct(adev, i2c_addr);
+ case CHIP_SIENNA_CICHLID:
+ *i2c_addr = EEPROM_I2C_TARGET_ADDR_SIENNA_CICHLID;
+ break;
+
default:
return false;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.c
new file mode 100644
index 000000000000..834440ab9ff7
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.c
@@ -0,0 +1,176 @@
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ *
+ */
+#include <linux/debugfs.h>
+#include <linux/pm_runtime.h>
+
+#include "amdgpu.h"
+#include "amdgpu_securedisplay.h"
+
+/**
+ * DOC: AMDGPU SECUREDISPLAY debugfs test interface
+ *
+ * how to use?
+ * echo opcode <value> > <debugfs_dir>/dri/xxx/securedisplay_test
+ * eg. echo 1 > <debugfs_dir>/dri/xxx/securedisplay_test
+ * eg. echo 2 phy_id > <debugfs_dir>/dri/xxx/securedisplay_test
+ *
+ * opcode:
+ * 1:Query whether TA is responding used only for validation pupose
+ * 2: Send region of Interest and CRC value to I2C. (uint32)phy_id is
+ * send to determine which DIO scratch register should be used to get
+ * ROI and receive i2c_buf as the output.
+ *
+ * You can refer more detail from header file ta_securedisplay_if.h
+ *
+ */
+
+void psp_securedisplay_parse_resp_status(struct psp_context *psp,
+ enum ta_securedisplay_status status)
+{
+ switch (status) {
+ case TA_SECUREDISPLAY_STATUS__SUCCESS:
+ break;
+ case TA_SECUREDISPLAY_STATUS__GENERIC_FAILURE:
+ dev_err(psp->adev->dev, "Secure display: Generic Failure.");
+ break;
+ case TA_SECUREDISPLAY_STATUS__INVALID_PARAMETER:
+ dev_err(psp->adev->dev, "Secure display: Invalid Parameter.");
+ break;
+ case TA_SECUREDISPLAY_STATUS__NULL_POINTER:
+ dev_err(psp->adev->dev, "Secure display: Null Pointer.");
+ break;
+ case TA_SECUREDISPLAY_STATUS__I2C_WRITE_ERROR:
+ dev_err(psp->adev->dev, "Secure display: Failed to write to I2C.");
+ break;
+ case TA_SECUREDISPLAY_STATUS__READ_DIO_SCRATCH_ERROR:
+ dev_err(psp->adev->dev, "Secure display: Failed to Read DIO Scratch Register.");
+ break;
+ case TA_SECUREDISPLAY_STATUS__READ_CRC_ERROR:
+ dev_err(psp->adev->dev, "Secure display: Failed to Read CRC");
+ break;
+ default:
+ dev_err(psp->adev->dev, "Secure display: Failed to parse status: %d\n", status);
+ }
+}
+
+void psp_prep_securedisplay_cmd_buf(struct psp_context *psp, struct securedisplay_cmd **cmd,
+ enum ta_securedisplay_command command_id)
+{
+ *cmd = (struct securedisplay_cmd *)psp->securedisplay_context.securedisplay_shared_buf;
+ memset(*cmd, 0, sizeof(struct securedisplay_cmd));
+ (*cmd)->status = TA_SECUREDISPLAY_STATUS__GENERIC_FAILURE;
+ (*cmd)->cmd_id = command_id;
+}
+
+static ssize_t amdgpu_securedisplay_debugfs_write(struct file *f, const char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private;
+ struct psp_context *psp = &adev->psp;
+ struct securedisplay_cmd *securedisplay_cmd;
+ struct drm_device *dev = adev_to_drm(adev);
+ uint32_t phy_id;
+ uint32_t op;
+ int i;
+ char str[64];
+ char i2c_output[256];
+ int ret;
+
+ if (*pos || size > sizeof(str) - 1)
+ return -EINVAL;
+
+ memset(str, 0, sizeof(str));
+ ret = copy_from_user(str, buf, size);
+ if (ret)
+ return -EFAULT;
+
+ ret = pm_runtime_get_sync(dev->dev);
+ if (ret < 0) {
+ pm_runtime_put_autosuspend(dev->dev);
+ return ret;
+ }
+
+ if (size < 3)
+ sscanf(str, "%u ", &op);
+ else
+ sscanf(str, "%u %u", &op, &phy_id);
+
+ switch (op) {
+ case 1:
+ psp_prep_securedisplay_cmd_buf(psp, &securedisplay_cmd,
+ TA_SECUREDISPLAY_COMMAND__QUERY_TA);
+ ret = psp_securedisplay_invoke(psp, TA_SECUREDISPLAY_COMMAND__QUERY_TA);
+ if (!ret) {
+ if (securedisplay_cmd->status == TA_SECUREDISPLAY_STATUS__SUCCESS)
+ dev_info(adev->dev, "SECUREDISPLAY: query securedisplay TA ret is 0x%X\n",
+ securedisplay_cmd->securedisplay_out_message.query_ta.query_cmd_ret);
+ else
+ psp_securedisplay_parse_resp_status(psp, securedisplay_cmd->status);
+ }
+ break;
+ case 2:
+ psp_prep_securedisplay_cmd_buf(psp, &securedisplay_cmd,
+ TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC);
+ securedisplay_cmd->securedisplay_in_message.send_roi_crc.phy_id = phy_id;
+ ret = psp_securedisplay_invoke(psp, TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC);
+ if (!ret) {
+ if (securedisplay_cmd->status == TA_SECUREDISPLAY_STATUS__SUCCESS) {
+ memset(i2c_output, 0, sizeof(i2c_output));
+ for (i = 0; i < TA_SECUREDISPLAY_I2C_BUFFER_SIZE; i++)
+ sprintf(i2c_output, "%s 0x%X", i2c_output,
+ securedisplay_cmd->securedisplay_out_message.send_roi_crc.i2c_buf[i]);
+ dev_info(adev->dev, "SECUREDISPLAY: I2C buffer out put is :%s\n", i2c_output);
+ } else {
+ psp_securedisplay_parse_resp_status(psp, securedisplay_cmd->status);
+ }
+ }
+ break;
+ default:
+ dev_err(adev->dev, "Invalid input: %s\n", str);
+ }
+
+ pm_runtime_mark_last_busy(dev->dev);
+ pm_runtime_put_autosuspend(dev->dev);
+
+ return size;
+}
+
+static const struct file_operations amdgpu_securedisplay_debugfs_ops = {
+ .owner = THIS_MODULE,
+ .read = NULL,
+ .write = amdgpu_securedisplay_debugfs_write,
+ .llseek = default_llseek
+};
+
+void amdgpu_securedisplay_debugfs_init(struct amdgpu_device *adev)
+{
+#if defined(CONFIG_DEBUG_FS)
+
+ if (!adev->psp.securedisplay_context.securedisplay_initialized)
+ return;
+
+ debugfs_create_file("securedisplay_test", S_IWUSR, adev_to_drm(adev)->primary->debugfs_root,
+ adev, &amdgpu_securedisplay_debugfs_ops);
+#endif
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.h
new file mode 100644
index 000000000000..fe98574748f4
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ *
+ */
+#ifndef _AMDGPU_SECUREDISPLAY_H
+#define _AMDGPU_SECUREDISPLAY_H
+
+#include "amdgpu.h"
+#include "ta_secureDisplay_if.h"
+
+void amdgpu_securedisplay_debugfs_init(struct amdgpu_device *adev);
+void psp_securedisplay_parse_resp_status(struct psp_context *psp,
+ enum ta_securedisplay_status status);
+void psp_prep_securedisplay_cmd_buf(struct psp_context *psp, struct securedisplay_cmd **cmd,
+ enum ta_securedisplay_command command_id);
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
index 6752d8b13118..792d20261846 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
@@ -21,7 +21,7 @@
*
*/
-#if !defined(_AMDGPU_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#if !defined(_AMDGPU_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
#define _AMDGPU_TRACE_H_
#include <linux/stringify.h>
@@ -127,7 +127,7 @@ TRACE_EVENT(amdgpu_bo_create,
TP_fast_assign(
__entry->bo = bo;
- __entry->pages = bo->tbo.num_pages;
+ __entry->pages = bo->tbo.mem.num_pages;
__entry->type = bo->tbo.mem.mem_type;
__entry->prefer = bo->preferred_domains;
__entry->allow = bo->allowed_domains;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 4d8f19ab1014..9fd2157b133a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -46,7 +46,6 @@
#include <drm/ttm/ttm_bo_api.h>
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_placement.h>
-#include <drm/ttm/ttm_module.h>
#include <drm/drm_debugfs.h>
#include <drm/amdgpu_drm.h>
@@ -637,7 +636,7 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
out:
/* update statistics */
- atomic64_add((u64)bo->num_pages << PAGE_SHIFT, &adev->num_bytes_moved);
+ atomic64_add(bo->base.size, &adev->num_bytes_moved);
amdgpu_bo_move_notify(bo, evict, new_mem);
return 0;
}
@@ -918,8 +917,8 @@ static int amdgpu_ttm_tt_pin_userptr(struct ttm_bo_device *bdev,
goto release_sg;
/* convert SG to linear array of pages and dma addresses */
- drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
- gtt->ttm.dma_address, ttm->num_pages);
+ drm_prime_sg_to_dma_addr_array(ttm->sg, gtt->ttm.dma_address,
+ ttm->num_pages);
return 0;
@@ -1265,9 +1264,8 @@ static int amdgpu_ttm_tt_populate(struct ttm_bo_device *bdev,
ttm->sg = sgt;
}
- drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
- gtt->ttm.dma_address,
- ttm->num_pages);
+ drm_prime_sg_to_dma_addr_array(ttm->sg, gtt->ttm.dma_address,
+ ttm->num_pages);
return 0;
}
@@ -2124,7 +2122,7 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
return r;
}
- num_pages = bo->tbo.num_pages;
+ num_pages = bo->tbo.mem.num_pages;
mm_node = bo->tbo.mem.mm_node;
num_loops = 0;
while (num_pages) {
@@ -2154,7 +2152,7 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
}
}
- num_pages = bo->tbo.num_pages;
+ num_pages = bo->tbo.mem.num_pages;
mm_node = bo->tbo.mem.mm_node;
while (num_pages) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
index 0e43b46d3ab5..46449e70348b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
@@ -122,6 +122,9 @@ struct ta_firmware_header_v1_0 {
uint32_t ta_dtm_ucode_version;
uint32_t ta_dtm_offset_bytes;
uint32_t ta_dtm_size_bytes;
+ uint32_t ta_securedisplay_ucode_version;
+ uint32_t ta_securedisplay_offset_bytes;
+ uint32_t ta_securedisplay_size_bytes;
};
enum ta_fw_type {
@@ -132,6 +135,7 @@ enum ta_fw_type {
TA_FW_TYPE_PSP_HDCP,
TA_FW_TYPE_PSP_DTM,
TA_FW_TYPE_PSP_RAP,
+ TA_FW_TYPE_PSP_SECUREDISPLAY,
};
struct ta_fw_bin_desc {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index 8b989670ed66..e2ed4689118a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -1170,7 +1170,7 @@ int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
int r, i;
r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM,
+ AMDGPU_GEM_DOMAIN_GTT,
&bo, NULL, (void **)&msg);
if (r)
return r;
@@ -1202,7 +1202,7 @@ int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
int r, i;
r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM,
+ AMDGPU_GEM_DOMAIN_GTT,
&bo, NULL, (void **)&msg);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index 0d5284b936e4..ea6a62f67e38 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -1160,6 +1160,6 @@ int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout)
error:
dma_fence_put(fence);
amdgpu_bo_unreserve(bo);
- amdgpu_bo_unref(&bo);
+ amdgpu_bo_free_kernel(&bo, NULL, NULL);
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index 4a77c7424dfc..99b82f3c2617 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -496,6 +496,7 @@ static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
struct amdgpu_job *job;
struct amdgpu_ib *ib;
uint64_t addr;
+ void *msg = NULL;
int i, r;
r = amdgpu_job_alloc_with_ib(adev, 64,
@@ -505,6 +506,7 @@ static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
ib = &job->ibs[0];
addr = amdgpu_bo_gpu_offset(bo);
+ msg = amdgpu_bo_kptr(bo);
ib->ptr[0] = PACKET0(adev->vcn.internal.data0, 0);
ib->ptr[1] = addr;
ib->ptr[2] = PACKET0(adev->vcn.internal.data1, 0);
@@ -523,7 +525,7 @@ static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
amdgpu_bo_fence(bo, f, false);
amdgpu_bo_unreserve(bo);
- amdgpu_bo_unref(&bo);
+ amdgpu_bo_free_kernel(&bo, NULL, (void **)&msg);
if (fence)
*fence = dma_fence_get(f);
@@ -536,7 +538,7 @@ err_free:
err:
amdgpu_bo_unreserve(bo);
- amdgpu_bo_unref(&bo);
+ amdgpu_bo_free_kernel(&bo, NULL, (void **)&msg);
return r;
}
@@ -890,6 +892,7 @@ int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
error:
dma_fence_put(fence);
amdgpu_bo_unreserve(bo);
- amdgpu_bo_unref(&bo);
+ amdgpu_bo_free_kernel(&bo, NULL, NULL);
+
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
index 2d51b7694d1f..e223fca3ed00 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
@@ -573,6 +573,7 @@ void amdgpu_virt_fini_data_exchange(struct amdgpu_device *adev)
DRM_INFO("clean up the vf2pf work item\n");
flush_delayed_work(&adev->virt.vf2pf_work);
cancel_delayed_work_sync(&adev->virt.vf2pf_work);
+ adev->virt.vf2pf_update_interval_ms = 0;
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 0768c8686983..ad91c0c3c423 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -653,9 +653,11 @@ void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
if (!bo->parent)
continue;
- ttm_bo_move_to_lru_tail(&bo->tbo, &vm->lru_bulk_move);
+ ttm_bo_move_to_lru_tail(&bo->tbo, &bo->tbo.mem,
+ &vm->lru_bulk_move);
if (bo->shadow)
ttm_bo_move_to_lru_tail(&bo->shadow->tbo,
+ &bo->shadow->tbo.mem,
&vm->lru_bulk_move);
}
spin_unlock(&ttm_bo_glob.lru_lock);
diff --git a/drivers/gpu/drm/amd/amdgpu/athub_v2_0.c b/drivers/gpu/drm/amd/amdgpu/athub_v2_0.c
index 921a69abda55..5b90efd6f6d0 100644
--- a/drivers/gpu/drm/amd/amdgpu/athub_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/athub_v2_0.c
@@ -27,7 +27,6 @@
#include "athub/athub_2_0_0_offset.h"
#include "athub/athub_2_0_0_sh_mask.h"
#include "athub/athub_2_0_0_default.h"
-#include "navi10_enum.h"
#include "soc15_common.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/athub_v2_1.c b/drivers/gpu/drm/amd/amdgpu/athub_v2_1.c
index 66c183ddd43e..7b1b18350bf9 100644
--- a/drivers/gpu/drm/amd/amdgpu/athub_v2_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/athub_v2_1.c
@@ -26,7 +26,6 @@
#include "athub/athub_2_1_0_offset.h"
#include "athub/athub_2_1_0_sh_mask.h"
-#include "navi10_enum.h"
#include "soc15_common.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_ih.c b/drivers/gpu/drm/amd/amdgpu/cz_ih.c
index da37f8a900af..307c01301c87 100644
--- a/drivers/gpu/drm/amd/amdgpu/cz_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/cz_ih.c
@@ -194,19 +194,30 @@ static u32 cz_ih_get_wptr(struct amdgpu_device *adev,
wptr = le32_to_cpu(*ih->wptr_cpu);
- if (REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW)) {
- wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0);
- /* When a ring buffer overflow happen start parsing interrupt
- * from the last not overwritten vector (wptr + 16). Hopefully
- * this should allow us to catchup.
- */
- dev_warn(adev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
- wptr, ih->rptr, (wptr + 16) & ih->ptr_mask);
- ih->rptr = (wptr + 16) & ih->ptr_mask;
- tmp = RREG32(mmIH_RB_CNTL);
- tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
- WREG32(mmIH_RB_CNTL, tmp);
- }
+ if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
+ goto out;
+
+ /* Double check that the overflow wasn't already cleared. */
+ wptr = RREG32(mmIH_RB_WPTR);
+
+ if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
+ goto out;
+
+ wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0);
+
+ /* When a ring buffer overflow happen start parsing interrupt
+ * from the last not overwritten vector (wptr + 16). Hopefully
+ * this should allow us to catchup.
+ */
+ dev_warn(adev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
+ wptr, ih->rptr, (wptr + 16) & ih->ptr_mask);
+ ih->rptr = (wptr + 16) & ih->ptr_mask;
+ tmp = RREG32(mmIH_RB_CNTL);
+ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
+ WREG32(mmIH_RB_CNTL, tmp);
+
+
+out:
return (wptr & ih->ptr_mask);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
index ffcc64ec6473..9810af712cc0 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
@@ -294,7 +294,7 @@ static int dce_virtual_get_modes(struct drm_connector *connector)
static const struct mode_size {
int w;
int h;
- } common_modes[21] = {
+ } common_modes[] = {
{ 640, 480},
{ 720, 480},
{ 800, 600},
@@ -312,13 +312,14 @@ static int dce_virtual_get_modes(struct drm_connector *connector)
{1600, 1200},
{1920, 1080},
{1920, 1200},
+ {2560, 1440},
{4096, 3112},
{3656, 2664},
{3840, 2160},
{4096, 2160},
};
- for (i = 0; i < 21; i++) {
+ for (i = 0; i < ARRAY_SIZE(common_modes); i++) {
mode = drm_cvt_mode(dev, common_modes[i].w, common_modes[i].h, 60, false, false, false);
drm_mode_probed_add(connector, mode);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index ba1086784525..1d604fcb9b77 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -38,7 +38,6 @@
#include "smuio/smuio_11_0_0_offset.h"
#include "smuio/smuio_11_0_0_sh_mask.h"
#include "navi10_enum.h"
-#include "hdp/hdp_5_0_0_offset.h"
#include "ivsrcid/gfx/irqsrcs_gfx_10_1.h"
#include "soc15.h"
@@ -99,6 +98,10 @@
#define mmGCR_GENERAL_CNTL_Sienna_Cichlid 0x1580
#define mmGCR_GENERAL_CNTL_Sienna_Cichlid_BASE_IDX 0
+#define mmGOLDEN_TSC_COUNT_UPPER_Vangogh 0x0025
+#define mmGOLDEN_TSC_COUNT_UPPER_Vangogh_BASE_IDX 1
+#define mmGOLDEN_TSC_COUNT_LOWER_Vangogh 0x0026
+#define mmGOLDEN_TSC_COUNT_LOWER_Vangogh_BASE_IDX 1
#define mmSPI_CONFIG_CNTL_1_Vangogh 0x2441
#define mmSPI_CONFIG_CNTL_1_Vangogh_BASE_IDX 1
#define mmVGT_TF_MEMORY_BASE_HI_Vangogh 0x2261
@@ -115,6 +118,8 @@
#define mmVGT_ESGS_RING_SIZE_Vangogh_BASE_IDX 1
#define mmSPI_CONFIG_CNTL_Vangogh 0x2440
#define mmSPI_CONFIG_CNTL_Vangogh_BASE_IDX 1
+#define mmGCR_GENERAL_CNTL_Vangogh 0x1580
+#define mmGCR_GENERAL_CNTL_Vangogh_BASE_IDX 0
#define mmCP_HYP_PFP_UCODE_ADDR 0x5814
#define mmCP_HYP_PFP_UCODE_ADDR_BASE_IDX 1
@@ -160,6 +165,9 @@
#define mmGCVM_L2_CGTT_CLK_CTRL_Sienna_Cichlid 0x15db
#define mmGCVM_L2_CGTT_CLK_CTRL_Sienna_Cichlid_BASE_IDX 0
+#define mmGC_THROTTLE_CTRL_Sienna_Cichlid 0x2030
+#define mmGC_THROTTLE_CTRL_Sienna_Cichlid_BASE_IDX 0
+
MODULE_FIRMWARE("amdgpu/navi10_ce.bin");
MODULE_FIRMWARE("amdgpu/navi10_pfp.bin");
MODULE_FIRMWARE("amdgpu/navi10_me.bin");
@@ -3237,7 +3245,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_3_vangogh[] =
SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG4, 0xffffffff, 0x00800000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_EXCEPTION_CONTROL, 0x7fff0f1f, 0x00b80000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0x0c1807ff, 0x00000142),
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL, 0x1ff1ffff, 0x00000500),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL_Vangogh, 0x1ff1ffff, 0x00000500),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL1_PIPE_STEER, 0x000000ff, 0x000000e4),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2_PIPE_STEER_0, 0x77777777, 0x32103210),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2_PIPE_STEER_1, 0x77777777, 0x32103210),
@@ -3324,6 +3332,7 @@ static void gfx_v10_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume);
static void gfx_v10_0_ring_emit_frame_cntl(struct amdgpu_ring *ring, bool start, bool secure);
static u32 gfx_v10_3_get_disabled_sa(struct amdgpu_device *adev);
static void gfx_v10_3_program_pbb_mode(struct amdgpu_device *adev);
+static void gfx_v10_3_set_power_brake_sequence(struct amdgpu_device *adev);
static void gfx10_kiq_set_resources(struct amdgpu_ring *kiq_ring, uint64_t queue_mask)
{
@@ -5691,7 +5700,7 @@ static int gfx_v10_0_cp_gfx_load_pfp_microcode(struct amdgpu_device *adev)
}
if (amdgpu_emu_mode == 1)
- adev->nbio.funcs->hdp_flush(adev, NULL);
+ adev->hdp.funcs->flush_hdp(adev, NULL);
tmp = RREG32_SOC15(GC, 0, mmCP_PFP_IC_BASE_CNTL);
tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, VMID, 0);
@@ -5769,7 +5778,7 @@ static int gfx_v10_0_cp_gfx_load_ce_microcode(struct amdgpu_device *adev)
}
if (amdgpu_emu_mode == 1)
- adev->nbio.funcs->hdp_flush(adev, NULL);
+ adev->hdp.funcs->flush_hdp(adev, NULL);
tmp = RREG32_SOC15(GC, 0, mmCP_CE_IC_BASE_CNTL);
tmp = REG_SET_FIELD(tmp, CP_CE_IC_BASE_CNTL, VMID, 0);
@@ -5846,7 +5855,7 @@ static int gfx_v10_0_cp_gfx_load_me_microcode(struct amdgpu_device *adev)
}
if (amdgpu_emu_mode == 1)
- adev->nbio.funcs->hdp_flush(adev, NULL);
+ adev->hdp.funcs->flush_hdp(adev, NULL);
tmp = RREG32_SOC15(GC, 0, mmCP_ME_IC_BASE_CNTL);
tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, VMID, 0);
@@ -6215,7 +6224,7 @@ static int gfx_v10_0_cp_compute_load_microcode(struct amdgpu_device *adev)
}
if (amdgpu_emu_mode == 1)
- adev->nbio.funcs->hdp_flush(adev, NULL);
+ adev->hdp.funcs->flush_hdp(adev, NULL);
tmp = RREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_CNTL);
tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
@@ -7192,6 +7201,9 @@ static int gfx_v10_0_hw_init(void *handle)
if (adev->asic_type == CHIP_SIENNA_CICHLID)
gfx_v10_3_program_pbb_mode(adev);
+ if (adev->asic_type >= CHIP_SIENNA_CICHLID)
+ gfx_v10_3_set_power_brake_sequence(adev);
+
return r;
}
@@ -7377,8 +7389,16 @@ static uint64_t gfx_v10_0_get_gpu_clock_counter(struct amdgpu_device *adev)
amdgpu_gfx_off_ctrl(adev, false);
mutex_lock(&adev->gfx.gpu_clock_mutex);
- clock = (uint64_t)RREG32_SOC15(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER) |
- ((uint64_t)RREG32_SOC15(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER) << 32ULL);
+ switch (adev->asic_type) {
+ case CHIP_VANGOGH:
+ clock = (uint64_t)RREG32_SOC15(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER_Vangogh) |
+ ((uint64_t)RREG32_SOC15(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER_Vangogh) << 32ULL);
+ break;
+ default:
+ clock = (uint64_t)RREG32_SOC15(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER) |
+ ((uint64_t)RREG32_SOC15(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER) << 32ULL);
+ break;
+ }
mutex_unlock(&adev->gfx.gpu_clock_mutex);
amdgpu_gfx_off_ctrl(adev, true);
return clock;
@@ -9169,6 +9189,31 @@ static void gfx_v10_3_program_pbb_mode(struct amdgpu_device *adev)
}
}
+static void gfx_v10_3_set_power_brake_sequence(struct amdgpu_device *adev)
+{
+ WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX,
+ (0x1 << GRBM_GFX_INDEX__SA_BROADCAST_WRITES__SHIFT) |
+ (0x1 << GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES__SHIFT) |
+ (0x1 << GRBM_GFX_INDEX__SE_BROADCAST_WRITES__SHIFT));
+
+ WREG32_SOC15(GC, 0, mmGC_CAC_IND_INDEX, ixPWRBRK_STALL_PATTERN_CTRL);
+ WREG32_SOC15(GC, 0, mmGC_CAC_IND_DATA,
+ (0x1 << PWRBRK_STALL_PATTERN_CTRL__PWRBRK_STEP_INTERVAL__SHIFT) |
+ (0x12 << PWRBRK_STALL_PATTERN_CTRL__PWRBRK_BEGIN_STEP__SHIFT) |
+ (0x13 << PWRBRK_STALL_PATTERN_CTRL__PWRBRK_END_STEP__SHIFT) |
+ (0xf << PWRBRK_STALL_PATTERN_CTRL__PWRBRK_THROTTLE_PATTERN_BIT_NUMS__SHIFT));
+
+ WREG32_SOC15(GC, 0, mmGC_THROTTLE_CTRL_Sienna_Cichlid,
+ (0x1 << GC_THROTTLE_CTRL__PWRBRK_STALL_EN__SHIFT) |
+ (0x1 << GC_THROTTLE_CTRL__PATTERN_MODE__SHIFT) |
+ (0x5 << GC_THROTTLE_CTRL__RELEASE_STEP_INTERVAL__SHIFT));
+
+ WREG32_SOC15(GC, 0, mmDIDT_IND_INDEX, ixDIDT_SQ_THROTTLE_CTRL);
+
+ WREG32_SOC15(GC, 0, mmDIDT_IND_DATA,
+ (0x1 << DIDT_SQ_THROTTLE_CTRL__PWRBRK_STALL_EN__SHIFT));
+}
+
const struct amdgpu_ip_block_version gfx_v10_0_ip_block =
{
.type = AMD_IP_BLOCK_TYPE_GFX,
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 5f4805e4d04a..a896e3d0fcf8 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -38,7 +38,6 @@
#include "gc/gc_9_0_sh_mask.h"
#include "vega10_enum.h"
-#include "hdp/hdp_4_0_offset.h"
#include "soc15_common.h"
#include "clearstate_gfx9.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
index 5648c48be77f..3b7c6c31fce1 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
@@ -27,8 +27,6 @@
#include "gmc_v10_0.h"
#include "umc_v8_7.h"
-#include "hdp/hdp_5_0_0_offset.h"
-#include "hdp/hdp_5_0_0_sh_mask.h"
#include "athub/athub_2_0_0_sh_mask.h"
#include "athub/athub_2_0_0_offset.h"
#include "dcn/dcn_2_0_0_offset.h"
@@ -312,7 +310,7 @@ static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
int r;
/* flush hdp cache */
- adev->nbio.funcs->hdp_flush(adev, NULL);
+ adev->hdp.funcs->flush_hdp(adev, NULL);
/* For SRIOV run time, driver shouldn't access the register through MMIO
* Directly use kiq to do the vm invalidation instead
@@ -995,7 +993,6 @@ static int gmc_v10_0_gart_enable(struct amdgpu_device *adev)
{
int r;
bool value;
- u32 tmp;
if (adev->gart.bo == NULL) {
dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
@@ -1014,15 +1011,10 @@ static int gmc_v10_0_gart_enable(struct amdgpu_device *adev)
if (r)
return r;
- tmp = RREG32_SOC15(HDP, 0, mmHDP_MISC_CNTL);
- tmp |= HDP_MISC_CNTL__FLUSH_INVALIDATE_CACHE_MASK;
- WREG32_SOC15(HDP, 0, mmHDP_MISC_CNTL, tmp);
-
- tmp = RREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL);
- WREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL, tmp);
+ adev->hdp.funcs->init_registers(adev);
/* Flush HDP after it is initialized */
- adev->nbio.funcs->hdp_flush(adev, NULL);
+ adev->hdp.funcs->flush_hdp(adev, NULL);
value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ?
false : true;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index e22268f9dba7..aedef9017c4c 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -31,8 +31,6 @@
#include "amdgpu_atomfirmware.h"
#include "amdgpu_gem.h"
-#include "hdp/hdp_4_0_offset.h"
-#include "hdp/hdp_4_0_sh_mask.h"
#include "gc/gc_9_0_sh_mask.h"
#include "dce/dce_12_0_offset.h"
#include "dce/dce_12_0_sh_mask.h"
@@ -283,20 +281,6 @@ static const char *mmhub_client_ids_arcturus[][2] = {
[224+15][1] = "SDMA7",
};
-static const u32 golden_settings_vega10_hdp[] =
-{
- 0xf64, 0x0fffffff, 0x00000000,
- 0xf65, 0x0fffffff, 0x00000000,
- 0xf66, 0x0fffffff, 0x00000000,
- 0xf67, 0x0fffffff, 0x00000000,
- 0xf68, 0x0fffffff, 0x00000000,
- 0xf6a, 0x0fffffff, 0x00000000,
- 0xf6b, 0x0fffffff, 0x00000000,
- 0xf6c, 0x0fffffff, 0x00000000,
- 0xf6d, 0x0fffffff, 0x00000000,
- 0xf6e, 0x0fffffff, 0x00000000,
-};
-
static const struct soc15_reg_golden golden_settings_mmhub_1_0_0[] =
{
SOC15_REG_GOLDEN_VALUE(MMHUB, 0, mmDAGB1_WRCLI2, 0x00000007, 0xfe5fe0fa),
@@ -1571,7 +1555,6 @@ static int gmc_v9_0_hw_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
bool value;
int r, i;
- u32 tmp;
/* The sequence of these two function calls matters.*/
gmc_v9_0_init_golden_registers(adev);
@@ -1583,31 +1566,13 @@ static int gmc_v9_0_hw_init(void *handle)
WREG32_FIELD15(DCE, 0, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
}
- amdgpu_device_program_register_sequence(adev,
- golden_settings_vega10_hdp,
- ARRAY_SIZE(golden_settings_vega10_hdp));
-
if (adev->mmhub.funcs->update_power_gating)
adev->mmhub.funcs->update_power_gating(adev, true);
- switch (adev->asic_type) {
- case CHIP_ARCTURUS:
- WREG32_FIELD15(HDP, 0, HDP_MMHUB_CNTL, HDP_MMHUB_GCC, 1);
- break;
- default:
- break;
- }
-
- WREG32_FIELD15(HDP, 0, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 1);
-
- tmp = RREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL);
- WREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL, tmp);
-
- WREG32_SOC15(HDP, 0, mmHDP_NONSURFACE_BASE, (adev->gmc.vram_start >> 8));
- WREG32_SOC15(HDP, 0, mmHDP_NONSURFACE_BASE_HI, (adev->gmc.vram_start >> 40));
+ adev->hdp.funcs->init_registers(adev);
/* After HDP is initialized, flush HDP.*/
- adev->nbio.funcs->hdp_flush(adev, NULL);
+ adev->hdp.funcs->flush_hdp(adev, NULL);
if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
value = false;
diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c b/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c
new file mode 100644
index 000000000000..e46621fed5b9
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c
@@ -0,0 +1,137 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "amdgpu.h"
+#include "amdgpu_atombios.h"
+#include "hdp_v4_0.h"
+#include "amdgpu_ras.h"
+
+#include "hdp/hdp_4_0_offset.h"
+#include "hdp/hdp_4_0_sh_mask.h"
+#include <uapi/linux/kfd_ioctl.h>
+
+/* for Vega20 register name change */
+#define mmHDP_MEM_POWER_CTRL 0x00d4
+#define HDP_MEM_POWER_CTRL__IPH_MEM_POWER_CTRL_EN_MASK 0x00000001L
+#define HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK 0x00000002L
+#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK 0x00010000L
+#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK 0x00020000L
+#define mmHDP_MEM_POWER_CTRL_BASE_IDX 0
+
+static void hdp_v4_0_flush_hdp(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring)
+{
+ if (!ring || !ring->funcs->emit_wreg)
+ WREG32_NO_KIQ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
+ else
+ amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
+}
+
+static void hdp_v4_0_invalidate_hdp(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring)
+{
+ if (!ring || !ring->funcs->emit_wreg)
+ WREG32_SOC15_NO_KIQ(HDP, 0, mmHDP_READ_CACHE_INVALIDATE, 1);
+ else
+ amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET(
+ HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 1);
+}
+
+static void hdp_v4_0_reset_ras_error_count(struct amdgpu_device *adev)
+{
+ if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__HDP))
+ return;
+ /*read back hdp ras counter to reset it to 0 */
+ RREG32_SOC15(HDP, 0, mmHDP_EDC_CNT);
+}
+
+static void hdp_v4_0_update_clock_gating(struct amdgpu_device *adev,
+ bool enable)
+{
+ uint32_t def, data;
+
+ if (adev->asic_type == CHIP_VEGA10 ||
+ adev->asic_type == CHIP_VEGA12 ||
+ adev->asic_type == CHIP_RAVEN) {
+ def = data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS));
+
+ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
+ data |= HDP_MEM_POWER_LS__LS_ENABLE_MASK;
+ else
+ data &= ~HDP_MEM_POWER_LS__LS_ENABLE_MASK;
+
+ if (def != data)
+ WREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS), data);
+ } else {
+ def = data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_CTRL));
+
+ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
+ data |= HDP_MEM_POWER_CTRL__IPH_MEM_POWER_CTRL_EN_MASK |
+ HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK |
+ HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK |
+ HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK;
+ else
+ data &= ~(HDP_MEM_POWER_CTRL__IPH_MEM_POWER_CTRL_EN_MASK |
+ HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK |
+ HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK |
+ HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK);
+
+ if (def != data)
+ WREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_CTRL), data);
+ }
+}
+
+static void hdp_v4_0_get_clockgating_state(struct amdgpu_device *adev,
+ u32 *flags)
+{
+ int data;
+
+ /* AMD_CG_SUPPORT_HDP_LS */
+ data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS));
+ if (data & HDP_MEM_POWER_LS__LS_ENABLE_MASK)
+ *flags |= AMD_CG_SUPPORT_HDP_LS;
+}
+
+static void hdp_v4_0_init_registers(struct amdgpu_device *adev)
+{
+ switch (adev->asic_type) {
+ case CHIP_ARCTURUS:
+ WREG32_FIELD15(HDP, 0, HDP_MMHUB_CNTL, HDP_MMHUB_GCC, 1);
+ break;
+ default:
+ break;
+ }
+
+ WREG32_FIELD15(HDP, 0, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 1);
+
+ WREG32_SOC15(HDP, 0, mmHDP_NONSURFACE_BASE, (adev->gmc.vram_start >> 8));
+ WREG32_SOC15(HDP, 0, mmHDP_NONSURFACE_BASE_HI, (adev->gmc.vram_start >> 40));
+}
+
+const struct amdgpu_hdp_funcs hdp_v4_0_funcs = {
+ .flush_hdp = hdp_v4_0_flush_hdp,
+ .invalidate_hdp = hdp_v4_0_invalidate_hdp,
+ .reset_ras_error_count = hdp_v4_0_reset_ras_error_count,
+ .update_clock_gating = hdp_v4_0_update_clock_gating,
+ .get_clock_gating_state = hdp_v4_0_get_clockgating_state,
+ .init_registers = hdp_v4_0_init_registers,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.h b/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.h
new file mode 100644
index 000000000000..d1e6399e8c46
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __HDP_V4_0_H__
+#define __HDP_V4_0_H__
+
+#include "soc15_common.h"
+
+extern const struct amdgpu_hdp_funcs hdp_v4_0_funcs;
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v5_0.c b/drivers/gpu/drm/amd/amdgpu/hdp_v5_0.c
new file mode 100644
index 000000000000..7a15e669b68d
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/hdp_v5_0.c
@@ -0,0 +1,212 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "amdgpu.h"
+#include "amdgpu_atombios.h"
+#include "hdp_v5_0.h"
+
+#include "hdp/hdp_5_0_0_offset.h"
+#include "hdp/hdp_5_0_0_sh_mask.h"
+#include <uapi/linux/kfd_ioctl.h>
+
+static void hdp_v5_0_flush_hdp(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring)
+{
+ if (!ring || !ring->funcs->emit_wreg)
+ WREG32_NO_KIQ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
+ else
+ amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
+}
+
+static void hdp_v5_0_invalidate_hdp(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring)
+{
+ if (!ring || !ring->funcs->emit_wreg) {
+ WREG32_SOC15_NO_KIQ(HDP, 0, mmHDP_READ_CACHE_INVALIDATE, 1);
+ } else {
+ amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET(
+ HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 1);
+ }
+}
+
+static void hdp_v5_0_update_mem_power_gating(struct amdgpu_device *adev,
+ bool enable)
+{
+ uint32_t hdp_clk_cntl, hdp_clk_cntl1;
+ uint32_t hdp_mem_pwr_cntl;
+
+ if (!(adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS |
+ AMD_CG_SUPPORT_HDP_DS |
+ AMD_CG_SUPPORT_HDP_SD)))
+ return;
+
+ hdp_clk_cntl = hdp_clk_cntl1 = RREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL);
+ hdp_mem_pwr_cntl = RREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL);
+
+ /* Before doing clock/power mode switch,
+ * forced on IPH & RC clock */
+ hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL,
+ IPH_MEM_CLK_SOFT_OVERRIDE, 1);
+ hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL,
+ RC_MEM_CLK_SOFT_OVERRIDE, 1);
+ WREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL, hdp_clk_cntl);
+
+ /* HDP 5.0 doesn't support dynamic power mode switch,
+ * disable clock and power gating before any changing */
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
+ IPH_MEM_POWER_CTRL_EN, 0);
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
+ IPH_MEM_POWER_LS_EN, 0);
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
+ IPH_MEM_POWER_DS_EN, 0);
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
+ IPH_MEM_POWER_SD_EN, 0);
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
+ RC_MEM_POWER_CTRL_EN, 0);
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
+ RC_MEM_POWER_LS_EN, 0);
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
+ RC_MEM_POWER_DS_EN, 0);
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
+ RC_MEM_POWER_SD_EN, 0);
+ WREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL, hdp_mem_pwr_cntl);
+
+ /* only one clock gating mode (LS/DS/SD) can be enabled */
+ if (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS) {
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
+ HDP_MEM_POWER_CTRL,
+ IPH_MEM_POWER_LS_EN, enable);
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
+ HDP_MEM_POWER_CTRL,
+ RC_MEM_POWER_LS_EN, enable);
+ } else if (adev->cg_flags & AMD_CG_SUPPORT_HDP_DS) {
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
+ HDP_MEM_POWER_CTRL,
+ IPH_MEM_POWER_DS_EN, enable);
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
+ HDP_MEM_POWER_CTRL,
+ RC_MEM_POWER_DS_EN, enable);
+ } else if (adev->cg_flags & AMD_CG_SUPPORT_HDP_SD) {
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
+ HDP_MEM_POWER_CTRL,
+ IPH_MEM_POWER_SD_EN, enable);
+ /* RC should not use shut down mode, fallback to ds */
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
+ HDP_MEM_POWER_CTRL,
+ RC_MEM_POWER_DS_EN, enable);
+ }
+
+ /* confirmed that IPH_MEM_POWER_CTRL_EN and RC_MEM_POWER_CTRL_EN have to
+ * be set for SRAM LS/DS/SD */
+ if (adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS | AMD_CG_SUPPORT_HDP_DS |
+ AMD_CG_SUPPORT_HDP_SD)) {
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
+ IPH_MEM_POWER_CTRL_EN, 1);
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
+ RC_MEM_POWER_CTRL_EN, 1);
+ }
+
+ WREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL, hdp_mem_pwr_cntl);
+
+ /* restore IPH & RC clock override after clock/power mode changing */
+ WREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL, hdp_clk_cntl1);
+}
+
+static void hdp_v5_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
+ bool enable)
+{
+ uint32_t hdp_clk_cntl;
+
+ if (!(adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG))
+ return;
+
+ hdp_clk_cntl = RREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL);
+
+ if (enable) {
+ hdp_clk_cntl &=
+ ~(uint32_t)
+ (HDP_CLK_CNTL__IPH_MEM_CLK_SOFT_OVERRIDE_MASK |
+ HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK |
+ HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK |
+ HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK |
+ HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK |
+ HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK);
+ } else {
+ hdp_clk_cntl |= HDP_CLK_CNTL__IPH_MEM_CLK_SOFT_OVERRIDE_MASK |
+ HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK |
+ HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK |
+ HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK |
+ HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK |
+ HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK;
+ }
+
+ WREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL, hdp_clk_cntl);
+}
+
+static void hdp_v5_0_update_clock_gating(struct amdgpu_device *adev,
+ bool enable)
+{
+ hdp_v5_0_update_mem_power_gating(adev, enable);
+ hdp_v5_0_update_medium_grain_clock_gating(adev, enable);
+}
+
+static void hdp_v5_0_get_clockgating_state(struct amdgpu_device *adev,
+ u32 *flags)
+{
+ uint32_t tmp;
+
+ /* AMD_CG_SUPPORT_HDP_MGCG */
+ tmp = RREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL);
+ if (!(tmp & (HDP_CLK_CNTL__IPH_MEM_CLK_SOFT_OVERRIDE_MASK |
+ HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK |
+ HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK |
+ HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK |
+ HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK |
+ HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK)))
+ *flags |= AMD_CG_SUPPORT_HDP_MGCG;
+
+ /* AMD_CG_SUPPORT_HDP_LS/DS/SD */
+ tmp = RREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL);
+ if (tmp & HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK)
+ *flags |= AMD_CG_SUPPORT_HDP_LS;
+ else if (tmp & HDP_MEM_POWER_CTRL__IPH_MEM_POWER_DS_EN_MASK)
+ *flags |= AMD_CG_SUPPORT_HDP_DS;
+ else if (tmp & HDP_MEM_POWER_CTRL__IPH_MEM_POWER_SD_EN_MASK)
+ *flags |= AMD_CG_SUPPORT_HDP_SD;
+}
+
+static void hdp_v5_0_init_registers(struct amdgpu_device *adev)
+{
+ u32 tmp;
+
+ tmp = RREG32_SOC15(HDP, 0, mmHDP_MISC_CNTL);
+ tmp |= HDP_MISC_CNTL__FLUSH_INVALIDATE_CACHE_MASK;
+ WREG32_SOC15(HDP, 0, mmHDP_MISC_CNTL, tmp);
+}
+
+const struct amdgpu_hdp_funcs hdp_v5_0_funcs = {
+ .flush_hdp = hdp_v5_0_flush_hdp,
+ .invalidate_hdp = hdp_v5_0_invalidate_hdp,
+ .update_clock_gating = hdp_v5_0_update_clock_gating,
+ .get_clock_gating_state = hdp_v5_0_get_clockgating_state,
+ .init_registers = hdp_v5_0_init_registers,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v5_0.h b/drivers/gpu/drm/amd/amdgpu/hdp_v5_0.h
new file mode 100644
index 000000000000..2d5ec2b419f3
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/hdp_v5_0.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __HDP_V5_0_H__
+#define __HDP_V5_0_H__
+
+#include "soc15_common.h"
+
+extern const struct amdgpu_hdp_funcs hdp_v5_0_funcs;
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
index 37d8b6ca4dab..cc957471f31e 100644
--- a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
@@ -194,19 +194,29 @@ static u32 iceland_ih_get_wptr(struct amdgpu_device *adev,
wptr = le32_to_cpu(*ih->wptr_cpu);
- if (REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW)) {
- wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0);
- /* When a ring buffer overflow happen start parsing interrupt
- * from the last not overwritten vector (wptr + 16). Hopefully
- * this should allow us to catchup.
- */
- dev_warn(adev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
- wptr, ih->rptr, (wptr + 16) & ih->ptr_mask);
- ih->rptr = (wptr + 16) & ih->ptr_mask;
- tmp = RREG32(mmIH_RB_CNTL);
- tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
- WREG32(mmIH_RB_CNTL, tmp);
- }
+ if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
+ goto out;
+
+ /* Double check that the overflow wasn't already cleared. */
+ wptr = RREG32(mmIH_RB_WPTR);
+
+ if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
+ goto out;
+
+ wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0);
+ /* When a ring buffer overflow happen start parsing interrupt
+ * from the last not overwritten vector (wptr + 16). Hopefully
+ * this should allow us to catchup.
+ */
+ dev_warn(adev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
+ wptr, ih->rptr, (wptr + 16) & ih->ptr_mask);
+ ih->rptr = (wptr + 16) & ih->ptr_mask;
+ tmp = RREG32(mmIH_RB_CNTL);
+ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
+ WREG32(mmIH_RB_CNTL, tmp);
+
+
+out:
return (wptr & ih->ptr_mask);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c b/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c
index 985e454463e1..7f30629f21a2 100644
--- a/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c
@@ -554,7 +554,7 @@ static int mes_v10_1_allocate_eop_buf(struct amdgpu_device *adev)
return r;
}
- memset(eop, 0, adev->mes.eop_gpu_obj->tbo.mem.size);
+ memset(eop, 0, adev->mes.eop_gpu_obj->tbo.base.size);
amdgpu_bo_kunmap(adev->mes.eop_gpu_obj);
amdgpu_bo_unreserve(adev->mes.eop_gpu_obj);
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c
index b72c8e4ca36b..1961745e89c7 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c
@@ -310,7 +310,7 @@ static void mmhub_v2_3_setup_vmid_config(struct amdgpu_device *adev)
/* Send no-retry XNACK on fault to suppress VM fault storm. */
tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL,
RETRY_PERMISSION_OR_INVALID_PAGE_FAULT,
- !amdgpu_noretry);
+ !adev->gmc.noretry);
WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT1_CNTL,
i * hub->ctx_distance, tmp);
WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32,
@@ -491,12 +491,11 @@ mmhub_v2_3_update_medium_grain_clock_gating(struct amdgpu_device *adev,
{
uint32_t def, data, def1, data1;
- def = data = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG);
+ def = data = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_CGTT_CLK_CTRL);
def1 = data1 = RREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2);
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG)) {
- data |= MM_ATC_L2_MISC_CG__ENABLE_MASK;
-
+ data &= ~MM_ATC_L2_CGTT_CLK_CTRL__SOFT_OVERRIDE_MASK;
data1 &= ~(DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
@@ -505,8 +504,7 @@ mmhub_v2_3_update_medium_grain_clock_gating(struct amdgpu_device *adev,
DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK);
} else {
- data &= ~MM_ATC_L2_MISC_CG__ENABLE_MASK;
-
+ data |= MM_ATC_L2_CGTT_CLK_CTRL__SOFT_OVERRIDE_MASK;
data1 |= (DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
@@ -516,7 +514,7 @@ mmhub_v2_3_update_medium_grain_clock_gating(struct amdgpu_device *adev,
}
if (def != data)
- WREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG, data);
+ WREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_CGTT_CLK_CTRL, data);
if (def1 != data1)
WREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2, data1);
}
@@ -525,17 +523,44 @@ static void
mmhub_v2_3_update_medium_grain_light_sleep(struct amdgpu_device *adev,
bool enable)
{
- uint32_t def, data;
-
- def = data = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG);
-
- if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS))
- data |= MM_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK;
- else
- data &= ~MM_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK;
+ uint32_t def, data, def1, data1, def2, data2;
+
+ def = data = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_CGTT_CLK_CTRL);
+ def1 = data1 = RREG32_SOC15(MMHUB, 0, mmDAGB0_WR_CGTT_CLK_CTRL);
+ def2 = data2 = RREG32_SOC15(MMHUB, 0, mmDAGB0_RD_CGTT_CLK_CTRL);
+
+ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS)) {
+ data &= ~MM_ATC_L2_CGTT_CLK_CTRL__MGLS_OVERRIDE_MASK;
+ data1 &= !(DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_MASK |
+ DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK |
+ DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK |
+ DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK |
+ DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK);
+ data2 &= !(DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_MASK |
+ DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK |
+ DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK |
+ DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK |
+ DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK);
+ } else {
+ data |= MM_ATC_L2_CGTT_CLK_CTRL__MGLS_OVERRIDE_MASK;
+ data1 |= (DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_MASK |
+ DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK |
+ DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK |
+ DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK |
+ DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK);
+ data2 |= (DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_MASK |
+ DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK |
+ DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK |
+ DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK |
+ DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK);
+ }
if (def != data)
- WREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG, data);
+ WREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_CGTT_CLK_CTRL, data);
+ if (def1 != data1)
+ WREG32_SOC15(MMHUB, 0, mmDAGB0_WR_CGTT_CLK_CTRL, data1);
+ if (def2 != data2)
+ WREG32_SOC15(MMHUB, 0, mmDAGB0_RD_CGTT_CLK_CTRL, data2);
}
static int mmhub_v2_3_set_clockgating(struct amdgpu_device *adev,
@@ -554,26 +579,39 @@ static int mmhub_v2_3_set_clockgating(struct amdgpu_device *adev,
static void mmhub_v2_3_get_clockgating(struct amdgpu_device *adev, u32 *flags)
{
- int data, data1;
+ int data, data1, data2, data3;
if (amdgpu_sriov_vf(adev))
*flags = 0;
- data = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG);
- data1 = RREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2);
+ data = RREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2);
+ data1 = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_CGTT_CLK_CTRL);
+ data2 = RREG32_SOC15(MMHUB, 0, mmDAGB0_WR_CGTT_CLK_CTRL);
+ data3 = RREG32_SOC15(MMHUB, 0, mmDAGB0_RD_CGTT_CLK_CTRL);
/* AMD_CG_SUPPORT_MC_MGCG */
- if ((data & MM_ATC_L2_MISC_CG__ENABLE_MASK) &&
- !(data1 & (DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
+ if (!(data & (DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
DAGB0_CNTL_MISC2__DISABLE_RDRET_CG_MASK |
DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK |
- DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK)))
- *flags |= AMD_CG_SUPPORT_MC_MGCG;
+ DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK))
+ && !(data1 & MM_ATC_L2_CGTT_CLK_CTRL__SOFT_OVERRIDE_MASK)) {
+ *flags |= AMD_CG_SUPPORT_MC_MGCG;
+ }
/* AMD_CG_SUPPORT_MC_LS */
- if (data & MM_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK)
+ if (!(data1 & MM_ATC_L2_CGTT_CLK_CTRL__MGLS_OVERRIDE_MASK)
+ && !(data2 & (DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_MASK |
+ DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK |
+ DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK |
+ DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK |
+ DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK))
+ && !(data3 & (DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_MASK |
+ DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK |
+ DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK |
+ DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK |
+ DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK)))
*flags |= AMD_CG_SUPPORT_MC_LS;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
index 7767ccca526b..3ee481557fc9 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
@@ -255,6 +255,7 @@ static void xgpu_ai_mailbox_flr_work(struct work_struct *work)
if (!down_read_trylock(&adev->reset_sem))
return;
+ amdgpu_virt_fini_data_exchange(adev);
atomic_set(&adev->in_gpu_reset, 1);
do {
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
index dd5c1e6ce009..48e588d3c409 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
@@ -276,6 +276,7 @@ static void xgpu_nv_mailbox_flr_work(struct work_struct *work)
if (!down_read_trylock(&adev->reset_sem))
return;
+ amdgpu_virt_fini_data_exchange(adev);
atomic_set(&adev->in_gpu_reset, 1);
do {
diff --git a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
index 7ba229e43799..f4e4040bbd25 100644
--- a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
@@ -40,6 +40,53 @@
static void navi10_ih_set_interrupt_funcs(struct amdgpu_device *adev);
/**
+ * navi10_ih_init_register_offset - Initialize register offset for ih rings
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Initialize register offset ih rings (NAVI10).
+ */
+static void navi10_ih_init_register_offset(struct amdgpu_device *adev)
+{
+ struct amdgpu_ih_regs *ih_regs;
+
+ if (adev->irq.ih.ring_size) {
+ ih_regs = &adev->irq.ih.ih_regs;
+ ih_regs->ih_rb_base = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE);
+ ih_regs->ih_rb_base_hi = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE_HI);
+ ih_regs->ih_rb_cntl = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL);
+ ih_regs->ih_rb_wptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR);
+ ih_regs->ih_rb_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR);
+ ih_regs->ih_doorbell_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_DOORBELL_RPTR);
+ ih_regs->ih_rb_wptr_addr_lo = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_ADDR_LO);
+ ih_regs->ih_rb_wptr_addr_hi = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_ADDR_HI);
+ ih_regs->psp_reg_id = PSP_REG_IH_RB_CNTL;
+ }
+
+ if (adev->irq.ih1.ring_size) {
+ ih_regs = &adev->irq.ih1.ih_regs;
+ ih_regs->ih_rb_base = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE_RING1);
+ ih_regs->ih_rb_base_hi = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE_HI_RING1);
+ ih_regs->ih_rb_cntl = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL_RING1);
+ ih_regs->ih_rb_wptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_RING1);
+ ih_regs->ih_rb_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR_RING1);
+ ih_regs->ih_doorbell_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_DOORBELL_RPTR_RING1);
+ ih_regs->psp_reg_id = PSP_REG_IH_RB_CNTL_RING1;
+ }
+
+ if (adev->irq.ih2.ring_size) {
+ ih_regs = &adev->irq.ih2.ih_regs;
+ ih_regs->ih_rb_base = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE_RING2);
+ ih_regs->ih_rb_base_hi = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE_HI_RING2);
+ ih_regs->ih_rb_cntl = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL_RING2);
+ ih_regs->ih_rb_wptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_RING2);
+ ih_regs->ih_rb_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR_RING2);
+ ih_regs->ih_doorbell_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_DOORBELL_RPTR_RING2);
+ ih_regs->psp_reg_id = PSP_REG_IH_RB_CNTL_RING2;
+ }
+}
+
+/**
* force_update_wptr_for_self_int - Force update the wptr for self interrupt
*
* @adev: amdgpu_device pointer
@@ -82,133 +129,66 @@ force_update_wptr_for_self_int(struct amdgpu_device *adev,
}
/**
- * navi10_ih_enable_interrupts - Enable the interrupt ring buffer
+ * navi10_ih_toggle_ring_interrupts - toggle the interrupt ring buffer
*
* @adev: amdgpu_device pointer
+ * @ih: amdgpu_ih_ring pointet
+ * @enable: true - enable the interrupts, false - disable the interrupts
*
- * Enable the interrupt ring buffer (NAVI10).
+ * Toggle the interrupt ring buffer (NAVI10)
*/
-static void navi10_ih_enable_interrupts(struct amdgpu_device *adev)
+static int navi10_ih_toggle_ring_interrupts(struct amdgpu_device *adev,
+ struct amdgpu_ih_ring *ih,
+ bool enable)
{
- u32 ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL);
-
- ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_ENABLE, 1);
- ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, ENABLE_INTR, 1);
- if (amdgpu_sriov_vf(adev) && adev->asic_type < CHIP_NAVI10) {
- if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL, ih_rb_cntl)) {
- DRM_ERROR("PSP program IH_RB_CNTL failed!\n");
- return;
- }
- } else {
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl);
- }
+ struct amdgpu_ih_regs *ih_regs;
+ uint32_t tmp;
- adev->irq.ih.enabled = true;
+ ih_regs = &ih->ih_regs;
- if (adev->irq.ih1.ring_size) {
- ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1);
- ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING1,
- RB_ENABLE, 1);
- if (amdgpu_sriov_vf(adev) && adev->asic_type < CHIP_NAVI10) {
- if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING1,
- ih_rb_cntl)) {
- DRM_ERROR("program IH_RB_CNTL_RING1 failed!\n");
- return;
- }
- } else {
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1, ih_rb_cntl);
- }
- adev->irq.ih1.enabled = true;
- }
+ tmp = RREG32(ih_regs->ih_rb_cntl);
+ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_ENABLE, (enable ? 1 : 0));
+ /* enable_intr field is only valid in ring0 */
+ if (ih == &adev->irq.ih)
+ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, ENABLE_INTR, (enable ? 1 : 0));
+ WREG32(ih_regs->ih_rb_cntl, tmp);
- if (adev->irq.ih2.ring_size) {
- ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2);
- ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING2,
- RB_ENABLE, 1);
- if (amdgpu_sriov_vf(adev) && adev->asic_type < CHIP_NAVI10) {
- if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING2,
- ih_rb_cntl)) {
- DRM_ERROR("program IH_RB_CNTL_RING2 failed!\n");
- return;
- }
- } else {
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2, ih_rb_cntl);
- }
- adev->irq.ih2.enabled = true;
+ if (enable) {
+ ih->enabled = true;
+ } else {
+ /* set rptr, wptr to 0 */
+ WREG32(ih_regs->ih_rb_rptr, 0);
+ WREG32(ih_regs->ih_rb_wptr, 0);
+ ih->enabled = false;
+ ih->rptr = 0;
}
- if (adev->irq.ih_soft.ring_size)
- adev->irq.ih_soft.enabled = true;
+ return 0;
}
/**
- * navi10_ih_disable_interrupts - Disable the interrupt ring buffer
+ * navi10_ih_toggle_interrupts - Toggle all the available interrupt ring buffers
*
* @adev: amdgpu_device pointer
+ * @enable: enable or disable interrupt ring buffers
*
- * Disable the interrupt ring buffer (NAVI10).
+ * Toggle all the available interrupt ring buffers (NAVI10).
*/
-static void navi10_ih_disable_interrupts(struct amdgpu_device *adev)
+static int navi10_ih_toggle_interrupts(struct amdgpu_device *adev, bool enable)
{
- u32 ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL);
-
- ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_ENABLE, 0);
- ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, ENABLE_INTR, 0);
- if (amdgpu_sriov_vf(adev) && adev->asic_type < CHIP_NAVI10) {
- if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL, ih_rb_cntl)) {
- DRM_ERROR("PSP program IH_RB_CNTL failed!\n");
- return;
- }
- } else {
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl);
- }
-
- /* set rptr, wptr to 0 */
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, 0);
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR, 0);
- adev->irq.ih.enabled = false;
- adev->irq.ih.rptr = 0;
-
- if (adev->irq.ih1.ring_size) {
- ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1);
- ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING1,
- RB_ENABLE, 0);
- if (amdgpu_sriov_vf(adev) && adev->asic_type < CHIP_NAVI10) {
- if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING1,
- ih_rb_cntl)) {
- DRM_ERROR("program IH_RB_CNTL_RING1 failed!\n");
- return;
- }
- } else {
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1, ih_rb_cntl);
- }
- /* set rptr, wptr to 0 */
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING1, 0);
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_RING1, 0);
- adev->irq.ih1.enabled = false;
- adev->irq.ih1.rptr = 0;
- }
+ struct amdgpu_ih_ring *ih[] = {&adev->irq.ih, &adev->irq.ih1, &adev->irq.ih2};
+ int i;
+ int r;
- if (adev->irq.ih2.ring_size) {
- ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2);
- ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING2,
- RB_ENABLE, 0);
- if (amdgpu_sriov_vf(adev) && adev->asic_type < CHIP_NAVI10) {
- if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING2,
- ih_rb_cntl)) {
- DRM_ERROR("program IH_RB_CNTL_RING2 failed!\n");
- return;
- }
- } else {
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2, ih_rb_cntl);
+ for (i = 0; i < ARRAY_SIZE(ih); i++) {
+ if (ih[i]->ring_size) {
+ r = navi10_ih_toggle_ring_interrupts(adev, ih[i], enable);
+ if (r)
+ return r;
}
- /* set rptr, wptr to 0 */
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING2, 0);
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_RING2, 0);
- adev->irq.ih2.enabled = false;
- adev->irq.ih2.rptr = 0;
}
+ return 0;
}
static uint32_t navi10_ih_rb_cntl(struct amdgpu_ih_ring *ih, uint32_t ih_rb_cntl)
@@ -253,22 +233,49 @@ static uint32_t navi10_ih_doorbell_rptr(struct amdgpu_ih_ring *ih)
return ih_doorbell_rtpr;
}
-static void navi10_ih_reroute_ih(struct amdgpu_device *adev)
+/**
+ * navi10_ih_enable_ring - enable an ih ring buffer
+ *
+ * @adev: amdgpu_device pointer
+ * @ih: amdgpu_ih_ring pointer
+ *
+ * Enable an ih ring buffer (NAVI10)
+ */
+static int navi10_ih_enable_ring(struct amdgpu_device *adev,
+ struct amdgpu_ih_ring *ih)
{
+ struct amdgpu_ih_regs *ih_regs;
uint32_t tmp;
- /* Reroute to IH ring 1 for VMC */
- WREG32_SOC15(OSSSYS, 0, mmIH_CLIENT_CFG_INDEX, 0x12);
- tmp = RREG32_SOC15(OSSSYS, 0, mmIH_CLIENT_CFG_DATA);
- tmp = REG_SET_FIELD(tmp, IH_CLIENT_CFG_DATA, CLIENT_TYPE, 1);
- tmp = REG_SET_FIELD(tmp, IH_CLIENT_CFG_DATA, RING_ID, 1);
- WREG32_SOC15(OSSSYS, 0, mmIH_CLIENT_CFG_DATA, tmp);
-
- /* Reroute IH ring 1 for UMC */
- WREG32_SOC15(OSSSYS, 0, mmIH_CLIENT_CFG_INDEX, 0x1B);
- tmp = RREG32_SOC15(OSSSYS, 0, mmIH_CLIENT_CFG_DATA);
- tmp = REG_SET_FIELD(tmp, IH_CLIENT_CFG_DATA, RING_ID, 1);
- WREG32_SOC15(OSSSYS, 0, mmIH_CLIENT_CFG_DATA, tmp);
+ ih_regs = &ih->ih_regs;
+
+ /* Ring Buffer base. [39:8] of 40-bit address of the beginning of the ring buffer*/
+ WREG32(ih_regs->ih_rb_base, ih->gpu_addr >> 8);
+ WREG32(ih_regs->ih_rb_base_hi, (ih->gpu_addr >> 40) & 0xff);
+
+ tmp = RREG32(ih_regs->ih_rb_cntl);
+ tmp = navi10_ih_rb_cntl(ih, tmp);
+ if (ih == &adev->irq.ih)
+ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RPTR_REARM, !!adev->irq.msi_enabled);
+ if (ih == &adev->irq.ih1) {
+ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_ENABLE, 0);
+ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_FULL_DRAIN_ENABLE, 1);
+ }
+ WREG32(ih_regs->ih_rb_cntl, tmp);
+
+ if (ih == &adev->irq.ih) {
+ /* set the ih ring 0 writeback address whether it's enabled or not */
+ WREG32(ih_regs->ih_rb_wptr_addr_lo, lower_32_bits(ih->wptr_addr));
+ WREG32(ih_regs->ih_rb_wptr_addr_hi, upper_32_bits(ih->wptr_addr) & 0xFFFF);
+ }
+
+ /* set rptr, wptr to 0 */
+ WREG32(ih_regs->ih_rb_wptr, 0);
+ WREG32(ih_regs->ih_rb_rptr, 0);
+
+ WREG32(ih_regs->ih_doorbell_rptr, navi10_ih_doorbell_rptr(ih));
+
+ return 0;
}
/**
@@ -284,36 +291,21 @@ static void navi10_ih_reroute_ih(struct amdgpu_device *adev)
*/
static int navi10_ih_irq_init(struct amdgpu_device *adev)
{
- struct amdgpu_ih_ring *ih = &adev->irq.ih;
- u32 ih_rb_cntl, ih_chicken;
+ struct amdgpu_ih_ring *ih[] = {&adev->irq.ih, &adev->irq.ih1, &adev->irq.ih2};
+ u32 ih_chicken;
u32 tmp;
+ int ret;
+ int i;
/* disable irqs */
- navi10_ih_disable_interrupts(adev);
+ ret = navi10_ih_toggle_interrupts(adev, false);
+ if (ret)
+ return ret;
adev->nbio.funcs->ih_control(adev);
- /* Ring Buffer base. [39:8] of 40-bit address of the beginning of the ring buffer*/
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE, ih->gpu_addr >> 8);
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_HI, (ih->gpu_addr >> 40) & 0xff);
-
- ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL);
- ih_rb_cntl = navi10_ih_rb_cntl(ih, ih_rb_cntl);
- ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RPTR_REARM,
- !!adev->irq.msi_enabled);
- if (amdgpu_sriov_vf(adev) && adev->asic_type < CHIP_NAVI10) {
- if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL, ih_rb_cntl)) {
- DRM_ERROR("PSP program IH_RB_CNTL failed!\n");
- return -ETIMEDOUT;
- }
- } else {
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl);
- }
- if (adev->irq.ih1.ring_size)
- navi10_ih_reroute_ih(adev);
-
if (unlikely(adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT)) {
- if (ih->use_bus_addr) {
+ if (ih[0]->use_bus_addr) {
switch (adev->asic_type) {
case CHIP_SIENNA_CICHLID:
case CHIP_NAVY_FLOUNDER:
@@ -334,77 +326,17 @@ static int navi10_ih_irq_init(struct amdgpu_device *adev)
}
}
- /* set the writeback address whether it's enabled or not */
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_ADDR_LO,
- lower_32_bits(ih->wptr_addr));
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_ADDR_HI,
- upper_32_bits(ih->wptr_addr) & 0xFFFF);
-
- /* set rptr, wptr to 0 */
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, 0);
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR, 0);
-
- WREG32_SOC15(OSSSYS, 0, mmIH_DOORBELL_RPTR,
- navi10_ih_doorbell_rptr(ih));
-
- adev->nbio.funcs->ih_doorbell_range(adev, ih->use_doorbell,
- ih->doorbell_index);
-
- ih = &adev->irq.ih1;
- if (ih->ring_size) {
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_RING1, ih->gpu_addr >> 8);
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_HI_RING1,
- (ih->gpu_addr >> 40) & 0xff);
-
- ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1);
- ih_rb_cntl = navi10_ih_rb_cntl(ih, ih_rb_cntl);
- ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
- WPTR_OVERFLOW_ENABLE, 0);
- ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
- RB_FULL_DRAIN_ENABLE, 1);
- if (amdgpu_sriov_vf(adev) && adev->asic_type < CHIP_NAVI10) {
- if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING1,
- ih_rb_cntl)) {
- DRM_ERROR("program IH_RB_CNTL_RING1 failed!\n");
- return -ETIMEDOUT;
- }
- } else {
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1, ih_rb_cntl);
+ for (i = 0; i < ARRAY_SIZE(ih); i++) {
+ if (ih[i]->ring_size) {
+ ret = navi10_ih_enable_ring(adev, ih[i]);
+ if (ret)
+ return ret;
}
- /* set rptr, wptr to 0 */
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_RING1, 0);
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING1, 0);
-
- WREG32_SOC15(OSSSYS, 0, mmIH_DOORBELL_RPTR_RING1,
- navi10_ih_doorbell_rptr(ih));
- }
-
- ih = &adev->irq.ih2;
- if (ih->ring_size) {
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_RING2, ih->gpu_addr >> 8);
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_HI_RING2,
- (ih->gpu_addr >> 40) & 0xff);
-
- ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2);
- ih_rb_cntl = navi10_ih_rb_cntl(ih, ih_rb_cntl);
-
- if (amdgpu_sriov_vf(adev) && adev->asic_type < CHIP_NAVI10) {
- if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING2,
- ih_rb_cntl)) {
- DRM_ERROR("program IH_RB_CNTL_RING2 failed!\n");
- return -ETIMEDOUT;
- }
- } else {
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2, ih_rb_cntl);
- }
- /* set rptr, wptr to 0 */
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_RING2, 0);
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING2, 0);
-
- WREG32_SOC15(OSSSYS, 0, mmIH_DOORBELL_RPTR_RING2,
- navi10_ih_doorbell_rptr(ih));
}
+ /* update doorbell range for ih ring 0*/
+ adev->nbio.funcs->ih_doorbell_range(adev, ih[0]->use_doorbell,
+ ih[0]->doorbell_index);
tmp = RREG32_SOC15(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL);
tmp = REG_SET_FIELD(tmp, IH_STORM_CLIENT_LIST_CNTL,
@@ -418,10 +350,15 @@ static int navi10_ih_irq_init(struct amdgpu_device *adev)
pci_set_master(adev->pdev);
/* enable interrupts */
- navi10_ih_enable_interrupts(adev);
+ ret = navi10_ih_toggle_interrupts(adev, true);
+ if (ret)
+ return ret;
/* enable wptr force update for self int */
force_update_wptr_for_self_int(adev, 0, 8, true);
+ if (adev->irq.ih_soft.ring_size)
+ adev->irq.ih_soft.enabled = true;
+
return 0;
}
@@ -435,7 +372,7 @@ static int navi10_ih_irq_init(struct amdgpu_device *adev)
static void navi10_ih_irq_disable(struct amdgpu_device *adev)
{
force_update_wptr_for_self_int(adev, 0, 8, false);
- navi10_ih_disable_interrupts(adev);
+ navi10_ih_toggle_interrupts(adev, false);
/* Wait and acknowledge irq */
mdelay(1);
@@ -455,23 +392,16 @@ static void navi10_ih_irq_disable(struct amdgpu_device *adev)
static u32 navi10_ih_get_wptr(struct amdgpu_device *adev,
struct amdgpu_ih_ring *ih)
{
- u32 wptr, reg, tmp;
+ u32 wptr, tmp;
+ struct amdgpu_ih_regs *ih_regs;
wptr = le32_to_cpu(*ih->wptr_cpu);
+ ih_regs = &ih->ih_regs;
if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
goto out;
- if (ih == &adev->irq.ih)
- reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR);
- else if (ih == &adev->irq.ih1)
- reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_RING1);
- else if (ih == &adev->irq.ih2)
- reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_RING2);
- else
- BUG();
-
- wptr = RREG32_NO_KIQ(reg);
+ wptr = RREG32_NO_KIQ(ih_regs->ih_rb_wptr);
if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
goto out;
wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0);
@@ -486,68 +416,14 @@ static u32 navi10_ih_get_wptr(struct amdgpu_device *adev,
wptr, ih->rptr, tmp);
ih->rptr = tmp;
- if (ih == &adev->irq.ih)
- reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL);
- else if (ih == &adev->irq.ih1)
- reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL_RING1);
- else if (ih == &adev->irq.ih2)
- reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL_RING2);
- else
- BUG();
-
- tmp = RREG32_NO_KIQ(reg);
+ tmp = RREG32_NO_KIQ(ih_regs->ih_rb_cntl);
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
- WREG32_NO_KIQ(reg, tmp);
+ WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
out:
return (wptr & ih->ptr_mask);
}
/**
- * navi10_ih_decode_iv - decode an interrupt vector
- *
- * @adev: amdgpu_device pointer
- * @ih: IH ring buffer to decode
- * @entry: IV entry to place decoded information into
- *
- * Decodes the interrupt vector at the current rptr
- * position and also advance the position.
- */
-static void navi10_ih_decode_iv(struct amdgpu_device *adev,
- struct amdgpu_ih_ring *ih,
- struct amdgpu_iv_entry *entry)
-{
- /* wptr/rptr are in bytes! */
- u32 ring_index = ih->rptr >> 2;
- uint32_t dw[8];
-
- dw[0] = le32_to_cpu(ih->ring[ring_index + 0]);
- dw[1] = le32_to_cpu(ih->ring[ring_index + 1]);
- dw[2] = le32_to_cpu(ih->ring[ring_index + 2]);
- dw[3] = le32_to_cpu(ih->ring[ring_index + 3]);
- dw[4] = le32_to_cpu(ih->ring[ring_index + 4]);
- dw[5] = le32_to_cpu(ih->ring[ring_index + 5]);
- dw[6] = le32_to_cpu(ih->ring[ring_index + 6]);
- dw[7] = le32_to_cpu(ih->ring[ring_index + 7]);
-
- entry->client_id = dw[0] & 0xff;
- entry->src_id = (dw[0] >> 8) & 0xff;
- entry->ring_id = (dw[0] >> 16) & 0xff;
- entry->vmid = (dw[0] >> 24) & 0xf;
- entry->vmid_src = (dw[0] >> 31);
- entry->timestamp = dw[1] | ((u64)(dw[2] & 0xffff) << 32);
- entry->timestamp_src = dw[2] >> 31;
- entry->pasid = dw[3] & 0xffff;
- entry->pasid_src = dw[3] >> 31;
- entry->src_data[0] = dw[4];
- entry->src_data[1] = dw[5];
- entry->src_data[2] = dw[6];
- entry->src_data[3] = dw[7];
-
- /* wptr/rptr are in bytes! */
- ih->rptr += 32;
-}
-
-/**
* navi10_ih_irq_rearm - rearm IRQ if lost
*
* @adev: amdgpu_device pointer
@@ -557,22 +433,15 @@ static void navi10_ih_decode_iv(struct amdgpu_device *adev,
static void navi10_ih_irq_rearm(struct amdgpu_device *adev,
struct amdgpu_ih_ring *ih)
{
- uint32_t reg_rptr = 0;
uint32_t v = 0;
uint32_t i = 0;
+ struct amdgpu_ih_regs *ih_regs;
- if (ih == &adev->irq.ih)
- reg_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR);
- else if (ih == &adev->irq.ih1)
- reg_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR_RING1);
- else if (ih == &adev->irq.ih2)
- reg_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR_RING2);
- else
- return;
+ ih_regs = &ih->ih_regs;
/* Rearm IRQ / re-write doorbell if doorbell write is lost */
for (i = 0; i < MAX_REARM_RETRY; i++) {
- v = RREG32_NO_KIQ(reg_rptr);
+ v = RREG32_NO_KIQ(ih_regs->ih_rb_rptr);
if ((v < ih->ring_size) && (v != ih->rptr))
WDOORBELL32(ih->doorbell_index, ih->rptr);
else
@@ -591,6 +460,8 @@ static void navi10_ih_irq_rearm(struct amdgpu_device *adev,
static void navi10_ih_set_rptr(struct amdgpu_device *adev,
struct amdgpu_ih_ring *ih)
{
+ struct amdgpu_ih_regs *ih_regs;
+
if (ih->use_doorbell) {
/* XXX check if swapping is necessary on BE */
*ih->rptr_cpu = ih->rptr;
@@ -598,12 +469,9 @@ static void navi10_ih_set_rptr(struct amdgpu_device *adev,
if (amdgpu_sriov_vf(adev))
navi10_ih_irq_rearm(adev, ih);
- } else if (ih == &adev->irq.ih) {
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, ih->rptr);
- } else if (ih == &adev->irq.ih1) {
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING1, ih->rptr);
- } else if (ih == &adev->irq.ih2) {
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING2, ih->rptr);
+ } else {
+ ih_regs = &ih->ih_regs;
+ WREG32(ih_regs->ih_rb_rptr, ih->rptr);
}
}
@@ -685,23 +553,8 @@ static int navi10_ih_sw_init(void *handle)
adev->irq.ih1.ring_size = 0;
adev->irq.ih2.ring_size = 0;
- if (adev->asic_type < CHIP_NAVI10) {
- r = amdgpu_ih_ring_init(adev, &adev->irq.ih1, PAGE_SIZE, true);
- if (r)
- return r;
-
- adev->irq.ih1.use_doorbell = true;
- adev->irq.ih1.doorbell_index =
- (adev->doorbell_index.ih + 1) << 1;
-
- r = amdgpu_ih_ring_init(adev, &adev->irq.ih2, PAGE_SIZE, true);
- if (r)
- return r;
-
- adev->irq.ih2.use_doorbell = true;
- adev->irq.ih2.doorbell_index =
- (adev->doorbell_index.ih + 2) << 1;
- }
+ /* initialize ih control registers offset */
+ navi10_ih_init_register_offset(adev);
r = amdgpu_ih_ring_init(adev, &adev->irq.ih_soft, PAGE_SIZE, true);
if (r)
@@ -717,6 +570,7 @@ static int navi10_ih_sw_fini(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
amdgpu_irq_fini(adev);
+ amdgpu_ih_ring_fini(adev, &adev->irq.ih_soft);
amdgpu_ih_ring_fini(adev, &adev->irq.ih2);
amdgpu_ih_ring_fini(adev, &adev->irq.ih1);
amdgpu_ih_ring_fini(adev, &adev->irq.ih);
@@ -848,7 +702,7 @@ static const struct amd_ip_funcs navi10_ih_ip_funcs = {
static const struct amdgpu_ih_funcs navi10_ih_funcs = {
.get_wptr = navi10_ih_get_wptr,
- .decode_iv = navi10_ih_decode_iv,
+ .decode_iv = amdgpu_ih_decode_iv_helper,
.set_rptr = navi10_ih_set_rptr
};
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c
index b5c3db16c2b0..b860f1c7b5b1 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c
@@ -80,15 +80,6 @@ static void nbio_v2_3_mc_access_enable(struct amdgpu_device *adev, bool enable)
WREG32_SOC15(NBIO, 0, mmBIF_FB_EN, 0);
}
-static void nbio_v2_3_hdp_flush(struct amdgpu_device *adev,
- struct amdgpu_ring *ring)
-{
- if (!ring || !ring->funcs->emit_wreg)
- WREG32_NO_KIQ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
- else
- amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
-}
-
static u32 nbio_v2_3_get_memsize(struct amdgpu_device *adev)
{
return RREG32_SOC15(NBIO, 0, mmRCC_DEV0_EPF0_RCC_CONFIG_MEMSIZE);
@@ -366,7 +357,6 @@ const struct amdgpu_nbio_funcs nbio_v2_3_funcs = {
.get_pcie_data_offset = nbio_v2_3_get_pcie_data_offset,
.get_rev_id = nbio_v2_3_get_rev_id,
.mc_access_enable = nbio_v2_3_mc_access_enable,
- .hdp_flush = nbio_v2_3_hdp_flush,
.get_memsize = nbio_v2_3_get_memsize,
.sdma_doorbell_range = nbio_v2_3_sdma_doorbell_range,
.vcn_doorbell_range = nbio_v2_3_vcn_doorbell_range,
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
index d2f1fe55d388..83ea063388fd 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
@@ -29,6 +29,15 @@
#include "nbio/nbio_6_1_sh_mask.h"
#include "nbio/nbio_6_1_smn.h"
#include "vega10_enum.h"
+#include <uapi/linux/kfd_ioctl.h>
+
+static void nbio_v6_1_remap_hdp_registers(struct amdgpu_device *adev)
+{
+ WREG32_SOC15(NBIO, 0, mmREMAP_HDP_MEM_FLUSH_CNTL,
+ adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL);
+ WREG32_SOC15(NBIO, 0, mmREMAP_HDP_REG_FLUSH_CNTL,
+ adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_REG_FLUSH_CNTL);
+}
static u32 nbio_v6_1_get_rev_id(struct amdgpu_device *adev)
{
@@ -50,18 +59,6 @@ static void nbio_v6_1_mc_access_enable(struct amdgpu_device *adev, bool enable)
WREG32_SOC15(NBIO, 0, mmBIF_FB_EN, 0);
}
-static void nbio_v6_1_hdp_flush(struct amdgpu_device *adev,
- struct amdgpu_ring *ring)
-{
- if (!ring || !ring->funcs->emit_wreg)
- WREG32_SOC15_NO_KIQ(NBIO, 0,
- mmBIF_BX_PF0_HDP_MEM_COHERENCY_FLUSH_CNTL,
- 0);
- else
- amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET(
- NBIO, 0, mmBIF_BX_PF0_HDP_MEM_COHERENCY_FLUSH_CNTL), 0);
-}
-
static u32 nbio_v6_1_get_memsize(struct amdgpu_device *adev)
{
return RREG32_SOC15(NBIO, 0, mmRCC_PF_0_0_RCC_CONFIG_MEMSIZE);
@@ -266,7 +263,6 @@ const struct amdgpu_nbio_funcs nbio_v6_1_funcs = {
.get_pcie_data_offset = nbio_v6_1_get_pcie_data_offset,
.get_rev_id = nbio_v6_1_get_rev_id,
.mc_access_enable = nbio_v6_1_mc_access_enable,
- .hdp_flush = nbio_v6_1_hdp_flush,
.get_memsize = nbio_v6_1_get_memsize,
.sdma_doorbell_range = nbio_v6_1_sdma_doorbell_range,
.enable_doorbell_aperture = nbio_v6_1_enable_doorbell_aperture,
@@ -277,4 +273,5 @@ const struct amdgpu_nbio_funcs nbio_v6_1_funcs = {
.get_clockgating_state = nbio_v6_1_get_clockgating_state,
.ih_control = nbio_v6_1_ih_control,
.init_registers = nbio_v6_1_init_registers,
+ .remap_hdp_registers = nbio_v6_1_remap_hdp_registers,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c
index ae685813c419..3c00666a13e1 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c
@@ -60,15 +60,6 @@ static void nbio_v7_0_mc_access_enable(struct amdgpu_device *adev, bool enable)
WREG32_SOC15(NBIO, 0, mmBIF_FB_EN, 0);
}
-static void nbio_v7_0_hdp_flush(struct amdgpu_device *adev,
- struct amdgpu_ring *ring)
-{
- if (!ring || !ring->funcs->emit_wreg)
- WREG32_NO_KIQ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
- else
- amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
-}
-
static u32 nbio_v7_0_get_memsize(struct amdgpu_device *adev)
{
return RREG32_SOC15(NBIO, 0, mmRCC_CONFIG_MEMSIZE);
@@ -292,7 +283,6 @@ const struct amdgpu_nbio_funcs nbio_v7_0_funcs = {
.get_pcie_data_offset = nbio_v7_0_get_pcie_data_offset,
.get_rev_id = nbio_v7_0_get_rev_id,
.mc_access_enable = nbio_v7_0_mc_access_enable,
- .hdp_flush = nbio_v7_0_hdp_flush,
.get_memsize = nbio_v7_0_get_memsize,
.sdma_doorbell_range = nbio_v7_0_sdma_doorbell_range,
.vcn_doorbell_range = nbio_v7_0_vcn_doorbell_range,
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c
index aa36022670f9..598ce0e93627 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c
@@ -56,15 +56,6 @@ static void nbio_v7_2_mc_access_enable(struct amdgpu_device *adev, bool enable)
WREG32_SOC15(NBIO, 0, regBIF_BX0_BIF_FB_EN, 0);
}
-static void nbio_v7_2_hdp_flush(struct amdgpu_device *adev,
- struct amdgpu_ring *ring)
-{
- if (!ring || !ring->funcs->emit_wreg)
- WREG32_NO_KIQ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
- else
- amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
-}
-
static u32 nbio_v7_2_get_memsize(struct amdgpu_device *adev)
{
return RREG32_SOC15(NBIO, 0, regRCC_DEV0_EPF0_0_RCC_CONFIG_MEMSIZE);
@@ -325,7 +316,6 @@ const struct amdgpu_nbio_funcs nbio_v7_2_funcs = {
.get_pcie_port_data_offset = nbio_v7_2_get_pcie_port_data_offset,
.get_rev_id = nbio_v7_2_get_rev_id,
.mc_access_enable = nbio_v7_2_mc_access_enable,
- .hdp_flush = nbio_v7_2_hdp_flush,
.get_memsize = nbio_v7_2_get_memsize,
.sdma_doorbell_range = nbio_v7_2_sdma_doorbell_range,
.vcn_doorbell_range = nbio_v7_2_vcn_doorbell_range,
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
index eadc9526d33f..4bc1d1434065 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
@@ -82,15 +82,6 @@ static void nbio_v7_4_mc_access_enable(struct amdgpu_device *adev, bool enable)
WREG32_SOC15(NBIO, 0, mmBIF_FB_EN, 0);
}
-static void nbio_v7_4_hdp_flush(struct amdgpu_device *adev,
- struct amdgpu_ring *ring)
-{
- if (!ring || !ring->funcs->emit_wreg)
- WREG32_NO_KIQ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
- else
- amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
-}
-
static u32 nbio_v7_4_get_memsize(struct amdgpu_device *adev)
{
return RREG32_SOC15(NBIO, 0, mmRCC_CONFIG_MEMSIZE);
@@ -541,7 +532,6 @@ const struct amdgpu_nbio_funcs nbio_v7_4_funcs = {
.get_pcie_data_offset = nbio_v7_4_get_pcie_data_offset,
.get_rev_id = nbio_v7_4_get_rev_id,
.mc_access_enable = nbio_v7_4_mc_access_enable,
- .hdp_flush = nbio_v7_4_hdp_flush,
.get_memsize = nbio_v7_4_get_memsize,
.sdma_doorbell_range = nbio_v7_4_sdma_doorbell_range,
.vcn_doorbell_range = nbio_v7_4_vcn_doorbell_range,
diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c
index 6bee3677394a..b9722282d1b8 100644
--- a/drivers/gpu/drm/amd/amdgpu/nv.c
+++ b/drivers/gpu/drm/amd/amdgpu/nv.c
@@ -38,8 +38,6 @@
#include "gc/gc_10_1_0_offset.h"
#include "gc/gc_10_1_0_sh_mask.h"
-#include "hdp/hdp_5_0_0_offset.h"
-#include "hdp/hdp_5_0_0_sh_mask.h"
#include "smuio/smuio_11_0_0_offset.h"
#include "mp/mp_11_0_offset.h"
@@ -50,6 +48,7 @@
#include "mmhub_v2_0.h"
#include "nbio_v2_3.h"
#include "nbio_v7_2.h"
+#include "hdp_v5_0.h"
#include "nv.h"
#include "navi10_ih.h"
#include "gfx_v10_0.h"
@@ -336,6 +335,38 @@ static int nv_asic_mode1_reset(struct amdgpu_device *adev)
return ret;
}
+static int nv_asic_mode2_reset(struct amdgpu_device *adev)
+{
+ u32 i;
+ int ret = 0;
+
+ amdgpu_atombios_scratch_regs_engine_hung(adev, true);
+
+ /* disable BM */
+ pci_clear_master(adev->pdev);
+
+ amdgpu_device_cache_pci_state(adev->pdev);
+
+ ret = amdgpu_dpm_mode2_reset(adev);
+ if (ret)
+ dev_err(adev->dev, "GPU mode2 reset failed\n");
+
+ amdgpu_device_load_pci_state(adev->pdev);
+
+ /* wait for asic to come out of reset */
+ for (i = 0; i < adev->usec_timeout; i++) {
+ u32 memsize = adev->nbio.funcs->get_memsize(adev);
+
+ if (memsize != 0xffffffff)
+ break;
+ udelay(1);
+ }
+
+ amdgpu_atombios_scratch_regs_engine_hung(adev, false);
+
+ return ret;
+}
+
static bool nv_asic_supports_baco(struct amdgpu_device *adev)
{
struct smu_context *smu = &adev->smu;
@@ -352,6 +383,7 @@ nv_asic_reset_method(struct amdgpu_device *adev)
struct smu_context *smu = &adev->smu;
if (amdgpu_reset_method == AMD_RESET_METHOD_MODE1 ||
+ amdgpu_reset_method == AMD_RESET_METHOD_MODE2 ||
amdgpu_reset_method == AMD_RESET_METHOD_BACO)
return amdgpu_reset_method;
@@ -360,6 +392,8 @@ nv_asic_reset_method(struct amdgpu_device *adev)
amdgpu_reset_method);
switch (adev->asic_type) {
+ case CHIP_VANGOGH:
+ return AMD_RESET_METHOD_MODE2;
case CHIP_SIENNA_CICHLID:
case CHIP_NAVY_FLOUNDER:
case CHIP_DIMGREY_CAVEFISH:
@@ -377,7 +411,8 @@ static int nv_asic_reset(struct amdgpu_device *adev)
int ret = 0;
struct smu_context *smu = &adev->smu;
- if (nv_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
+ switch (nv_asic_reset_method(adev)) {
+ case AMD_RESET_METHOD_BACO:
dev_info(adev->dev, "BACO reset\n");
ret = smu_baco_enter(smu);
@@ -386,9 +421,15 @@ static int nv_asic_reset(struct amdgpu_device *adev)
ret = smu_baco_exit(smu);
if (ret)
return ret;
- } else {
+ break;
+ case AMD_RESET_METHOD_MODE2:
+ dev_info(adev->dev, "MODE2 reset\n");
+ ret = nv_asic_mode2_reset(adev);
+ break;
+ default:
dev_info(adev->dev, "MODE1 reset\n");
ret = nv_asic_mode1_reset(adev);
+ break;
}
return ret;
@@ -514,6 +555,7 @@ int nv_set_ip_blocks(struct amdgpu_device *adev)
adev->nbio.funcs = &nbio_v2_3_funcs;
adev->nbio.hdp_flush_reg = &nbio_v2_3_hdp_flush_reg;
}
+ adev->hdp.funcs = &hdp_v5_0_funcs;
if (adev->asic_type == CHIP_SIENNA_CICHLID)
adev->gmc.xgmi.supported = true;
@@ -669,22 +711,6 @@ static uint32_t nv_get_rev_id(struct amdgpu_device *adev)
return adev->nbio.funcs->get_rev_id(adev);
}
-static void nv_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring)
-{
- adev->nbio.funcs->hdp_flush(adev, ring);
-}
-
-static void nv_invalidate_hdp(struct amdgpu_device *adev,
- struct amdgpu_ring *ring)
-{
- if (!ring || !ring->funcs->emit_wreg) {
- WREG32_SOC15_NO_KIQ(HDP, 0, mmHDP_READ_CACHE_INVALIDATE, 1);
- } else {
- amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET(
- HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 1);
- }
-}
-
static bool nv_need_full_reset(struct amdgpu_device *adev)
{
return true;
@@ -788,8 +814,6 @@ static const struct amdgpu_asic_funcs nv_asic_funcs =
.set_uvd_clocks = &nv_set_uvd_clocks,
.set_vce_clocks = &nv_set_vce_clocks,
.get_config_memsize = &nv_get_config_memsize,
- .flush_hdp = &nv_flush_hdp,
- .invalidate_hdp = &nv_invalidate_hdp,
.init_doorbell_index = &nv_init_doorbell_index,
.need_full_reset = &nv_need_full_reset,
.need_reset_on_init = &nv_need_reset_on_init,
@@ -1080,120 +1104,6 @@ static int nv_common_soft_reset(void *handle)
return 0;
}
-static void nv_update_hdp_mem_power_gating(struct amdgpu_device *adev,
- bool enable)
-{
- uint32_t hdp_clk_cntl, hdp_clk_cntl1;
- uint32_t hdp_mem_pwr_cntl;
-
- if (!(adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS |
- AMD_CG_SUPPORT_HDP_DS |
- AMD_CG_SUPPORT_HDP_SD)))
- return;
-
- hdp_clk_cntl = hdp_clk_cntl1 = RREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL);
- hdp_mem_pwr_cntl = RREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL);
-
- /* Before doing clock/power mode switch,
- * forced on IPH & RC clock */
- hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL,
- IPH_MEM_CLK_SOFT_OVERRIDE, 1);
- hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL,
- RC_MEM_CLK_SOFT_OVERRIDE, 1);
- WREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL, hdp_clk_cntl);
-
- /* HDP 5.0 doesn't support dynamic power mode switch,
- * disable clock and power gating before any changing */
- hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
- IPH_MEM_POWER_CTRL_EN, 0);
- hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
- IPH_MEM_POWER_LS_EN, 0);
- hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
- IPH_MEM_POWER_DS_EN, 0);
- hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
- IPH_MEM_POWER_SD_EN, 0);
- hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
- RC_MEM_POWER_CTRL_EN, 0);
- hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
- RC_MEM_POWER_LS_EN, 0);
- hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
- RC_MEM_POWER_DS_EN, 0);
- hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
- RC_MEM_POWER_SD_EN, 0);
- WREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL, hdp_mem_pwr_cntl);
-
- /* only one clock gating mode (LS/DS/SD) can be enabled */
- if (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS) {
- hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
- HDP_MEM_POWER_CTRL,
- IPH_MEM_POWER_LS_EN, enable);
- hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
- HDP_MEM_POWER_CTRL,
- RC_MEM_POWER_LS_EN, enable);
- } else if (adev->cg_flags & AMD_CG_SUPPORT_HDP_DS) {
- hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
- HDP_MEM_POWER_CTRL,
- IPH_MEM_POWER_DS_EN, enable);
- hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
- HDP_MEM_POWER_CTRL,
- RC_MEM_POWER_DS_EN, enable);
- } else if (adev->cg_flags & AMD_CG_SUPPORT_HDP_SD) {
- hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
- HDP_MEM_POWER_CTRL,
- IPH_MEM_POWER_SD_EN, enable);
- /* RC should not use shut down mode, fallback to ds */
- hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
- HDP_MEM_POWER_CTRL,
- RC_MEM_POWER_DS_EN, enable);
- }
-
- /* confirmed that IPH_MEM_POWER_CTRL_EN and RC_MEM_POWER_CTRL_EN have to
- * be set for SRAM LS/DS/SD */
- if (adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS | AMD_CG_SUPPORT_HDP_DS |
- AMD_CG_SUPPORT_HDP_SD)) {
- hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
- IPH_MEM_POWER_CTRL_EN, 1);
- hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
- RC_MEM_POWER_CTRL_EN, 1);
- }
-
- WREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL, hdp_mem_pwr_cntl);
-
- /* restore IPH & RC clock override after clock/power mode changing */
- WREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL, hdp_clk_cntl1);
-}
-
-static void nv_update_hdp_clock_gating(struct amdgpu_device *adev,
- bool enable)
-{
- uint32_t hdp_clk_cntl;
-
- if (!(adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG))
- return;
-
- hdp_clk_cntl = RREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL);
-
- if (enable) {
- hdp_clk_cntl &=
- ~(uint32_t)
- (HDP_CLK_CNTL__IPH_MEM_CLK_SOFT_OVERRIDE_MASK |
- HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK |
- HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK |
- HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK |
- HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK |
- HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK);
- } else {
- hdp_clk_cntl |= HDP_CLK_CNTL__IPH_MEM_CLK_SOFT_OVERRIDE_MASK |
- HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK |
- HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK |
- HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK |
- HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK |
- HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK;
- }
-
- WREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL, hdp_clk_cntl);
-}
-
static int nv_common_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
@@ -1213,9 +1123,7 @@ static int nv_common_set_clockgating_state(void *handle,
state == AMD_CG_STATE_GATE);
adev->nbio.funcs->update_medium_grain_light_sleep(adev,
state == AMD_CG_STATE_GATE);
- nv_update_hdp_mem_power_gating(adev,
- state == AMD_CG_STATE_GATE);
- nv_update_hdp_clock_gating(adev,
+ adev->hdp.funcs->update_clock_gating(adev,
state == AMD_CG_STATE_GATE);
break;
default:
@@ -1234,31 +1142,13 @@ static int nv_common_set_powergating_state(void *handle,
static void nv_common_get_clockgating_state(void *handle, u32 *flags)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- uint32_t tmp;
if (amdgpu_sriov_vf(adev))
*flags = 0;
adev->nbio.funcs->get_clockgating_state(adev, flags);
- /* AMD_CG_SUPPORT_HDP_MGCG */
- tmp = RREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL);
- if (!(tmp & (HDP_CLK_CNTL__IPH_MEM_CLK_SOFT_OVERRIDE_MASK |
- HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK |
- HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK |
- HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK |
- HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK |
- HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK)))
- *flags |= AMD_CG_SUPPORT_HDP_MGCG;
-
- /* AMD_CG_SUPPORT_HDP_LS/DS/SD */
- tmp = RREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL);
- if (tmp & HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK)
- *flags |= AMD_CG_SUPPORT_HDP_LS;
- else if (tmp & HDP_MEM_POWER_CTRL__IPH_MEM_POWER_DS_EN_MASK)
- *flags |= AMD_CG_SUPPORT_HDP_DS;
- else if (tmp & HDP_MEM_POWER_CTRL__IPH_MEM_POWER_SD_EN_MASK)
- *flags |= AMD_CG_SUPPORT_HDP_SD;
+ adev->hdp.funcs->get_clock_gating_state(adev, flags);
return;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
index d65a5339d354..3ba7bdfde65d 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
+++ b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
@@ -47,7 +47,7 @@ enum psp_gfx_crtl_cmd_id
GFX_CTRL_CMD_ID_DISABLE_INT = 0x00060000, /* disable PSP-to-Gfx interrupt */
GFX_CTRL_CMD_ID_MODE1_RST = 0x00070000, /* trigger the Mode 1 reset */
GFX_CTRL_CMD_ID_GBR_IH_SET = 0x00080000, /* set Gbr IH_RB_CNTL registers */
- GFX_CTRL_CMD_ID_CONSUME_CMD = 0x000A0000, /* send interrupt to psp for updating write pointer of vf */
+ GFX_CTRL_CMD_ID_CONSUME_CMD = 0x00090000, /* send interrupt to psp for updating write pointer of vf */
GFX_CTRL_CMD_ID_DESTROY_GPCOM_RING = 0x000C0000, /* destroy GPCOM ring */
GFX_CTRL_CMD_ID_MAX = 0x000F0000, /* max command ID */
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
index d7f92634eba2..4b1cc5e9ee92 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
@@ -92,8 +92,6 @@ static int psp_v10_0_init_microcode(struct psp_context *psp)
(uint8_t *)ta_hdr +
le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
- adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version);
-
adev->psp.ta_dtm_ucode_version =
le32_to_cpu(ta_hdr->ta_dtm_ucode_version);
adev->psp.ta_dtm_ucode_size =
@@ -101,6 +99,16 @@ static int psp_v10_0_init_microcode(struct psp_context *psp)
adev->psp.ta_dtm_start_addr =
(uint8_t *)adev->psp.ta_hdcp_start_addr +
le32_to_cpu(ta_hdr->ta_dtm_offset_bytes);
+
+ adev->psp.ta_securedisplay_ucode_version =
+ le32_to_cpu(ta_hdr->ta_securedisplay_ucode_version);
+ adev->psp.ta_securedisplay_ucode_size =
+ le32_to_cpu(ta_hdr->ta_securedisplay_size_bytes);
+ adev->psp.ta_securedisplay_start_addr =
+ (uint8_t *)adev->psp.ta_hdcp_start_addr +
+ le32_to_cpu(ta_hdr->ta_securedisplay_offset_bytes);
+
+ adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version);
}
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
index bd4248c93c49..c325d6f53a71 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
@@ -392,37 +392,6 @@ static int psp_v11_0_bootloader_load_sos(struct psp_context *psp)
return ret;
}
-static void psp_v11_0_reroute_ih(struct psp_context *psp)
-{
- struct amdgpu_device *adev = psp->adev;
- uint32_t tmp;
-
- /* Change IH ring for VMC */
- tmp = REG_SET_FIELD(0, IH_CLIENT_CFG_DATA, CREDIT_RETURN_ADDR, 0x1244b);
- tmp = REG_SET_FIELD(tmp, IH_CLIENT_CFG_DATA, CLIENT_TYPE, 1);
- tmp = REG_SET_FIELD(tmp, IH_CLIENT_CFG_DATA, RING_ID, 1);
-
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_69, 3);
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_70, tmp);
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, GFX_CTRL_CMD_ID_GBR_IH_SET);
-
- mdelay(20);
- psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
- 0x80000000, 0x8000FFFF, false);
-
- /* Change IH ring for UMC */
- tmp = REG_SET_FIELD(0, IH_CLIENT_CFG_DATA, CREDIT_RETURN_ADDR, 0x1216b);
- tmp = REG_SET_FIELD(tmp, IH_CLIENT_CFG_DATA, RING_ID, 1);
-
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_69, 4);
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_70, tmp);
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, GFX_CTRL_CMD_ID_GBR_IH_SET);
-
- mdelay(20);
- psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
- 0x80000000, 0x8000FFFF, false);
-}
-
static int psp_v11_0_ring_init(struct psp_context *psp,
enum psp_ring_type ring_type)
{
@@ -430,11 +399,6 @@ static int psp_v11_0_ring_init(struct psp_context *psp,
struct psp_ring *ring;
struct amdgpu_device *adev = psp->adev;
- if ((!amdgpu_sriov_vf(adev)) &&
- !(adev->asic_type >= CHIP_SIENNA_CICHLID &&
- adev->asic_type <= CHIP_DIMGREY_CAVEFISH))
- psp_v11_0_reroute_ih(psp);
-
ring = &psp->km_ring;
ring->ring_type = ring_type;
@@ -726,7 +690,7 @@ static int psp_v11_0_memory_training(struct psp_context *psp, uint32_t ops)
}
memcpy_toio(adev->mman.aper_base_kaddr, buf, sz);
- adev->nbio.funcs->hdp_flush(adev, NULL);
+ adev->hdp.funcs->flush_hdp(adev, NULL);
vfree(buf);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index ce56e93c6886..c8c22c1d1e65 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -46,7 +46,6 @@
#include "sdma6/sdma6_4_2_2_sh_mask.h"
#include "sdma7/sdma7_4_2_2_offset.h"
#include "sdma7/sdma7_4_2_2_sh_mask.h"
-#include "hdp/hdp_4_0_offset.h"
#include "sdma0/sdma0_4_1_default.h"
#include "soc15_common.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
index b208b81005bb..d345e324837d 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
@@ -32,7 +32,6 @@
#include "gc/gc_10_1_0_offset.h"
#include "gc/gc_10_1_0_sh_mask.h"
-#include "hdp/hdp_5_0_0_offset.h"
#include "ivsrcid/sdma0/irqsrcs_sdma0_5_0.h"
#include "ivsrcid/sdma1/irqsrcs_sdma1_5_0.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
index f1ba36a094da..690a5090475a 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
@@ -119,15 +119,7 @@ static int sdma_v5_2_init_inst_ctx(struct amdgpu_sdma_instance *sdma_inst)
static void sdma_v5_2_destroy_inst_ctx(struct amdgpu_device *adev)
{
- int i;
-
- for (i = 0; i < adev->sdma.num_instances; i++) {
- release_firmware(adev->sdma.instance[i].fw);
- adev->sdma.instance[i].fw = NULL;
-
- if (adev->asic_type == CHIP_SIENNA_CICHLID)
- break;
- }
+ release_firmware(adev->sdma.instance[0].fw);
memset((void *)adev->sdma.instance, 0,
sizeof(struct amdgpu_sdma_instance) * AMDGPU_MAX_SDMA_INSTANCES);
@@ -185,23 +177,10 @@ static int sdma_v5_2_init_microcode(struct amdgpu_device *adev)
if (err)
goto out;
- for (i = 1; i < adev->sdma.num_instances; i++) {
- if (adev->asic_type >= CHIP_SIENNA_CICHLID &&
- adev->asic_type <= CHIP_DIMGREY_CAVEFISH) {
- memcpy((void *)&adev->sdma.instance[i],
- (void *)&adev->sdma.instance[0],
- sizeof(struct amdgpu_sdma_instance));
- } else {
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma%d.bin", chip_name, i);
- err = request_firmware(&adev->sdma.instance[i].fw, fw_name, adev->dev);
- if (err)
- goto out;
-
- err = sdma_v5_2_init_inst_ctx(&adev->sdma.instance[i]);
- if (err)
- goto out;
- }
- }
+ for (i = 1; i < adev->sdma.num_instances; i++)
+ memcpy((void *)&adev->sdma.instance[i],
+ (void *)&adev->sdma.instance[0],
+ sizeof(struct amdgpu_sdma_instance));
DRM_DEBUG("psp_load == '%s'\n",
adev->firmware.load_type == AMDGPU_FW_LOAD_PSP ? "true" : "false");
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index 8a23636ecc27..2396be16c28e 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -40,8 +40,6 @@
#include "gc/gc_9_0_sh_mask.h"
#include "sdma0/sdma0_4_0_offset.h"
#include "sdma1/sdma1_4_0_offset.h"
-#include "hdp/hdp_4_0_offset.h"
-#include "hdp/hdp_4_0_sh_mask.h"
#include "nbio/nbio_7_0_default.h"
#include "nbio/nbio_7_0_offset.h"
#include "nbio/nbio_7_0_sh_mask.h"
@@ -59,7 +57,9 @@
#include "nbio_v6_1.h"
#include "nbio_v7_0.h"
#include "nbio_v7_4.h"
+#include "hdp_v4_0.h"
#include "vega10_ih.h"
+#include "vega20_ih.h"
#include "navi10_ih.h"
#include "sdma_v4_0.h"
#include "uvd_v7_0.h"
@@ -83,14 +83,6 @@
#define mmMP0_MISC_LIGHT_SLEEP_CTRL 0x01ba
#define mmMP0_MISC_LIGHT_SLEEP_CTRL_BASE_IDX 0
-/* for Vega20 register name change */
-#define mmHDP_MEM_POWER_CTRL 0x00d4
-#define HDP_MEM_POWER_CTRL__IPH_MEM_POWER_CTRL_EN_MASK 0x00000001L
-#define HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK 0x00000002L
-#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK 0x00010000L
-#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK 0x00020000L
-#define mmHDP_MEM_POWER_CTRL_BASE_IDX 0
-
/*
* Indirect registers accessor
*/
@@ -699,6 +691,7 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
adev->nbio.funcs = &nbio_v6_1_funcs;
adev->nbio.hdp_flush_reg = &nbio_v6_1_hdp_flush_reg;
}
+ adev->hdp.funcs = &hdp_v4_0_funcs;
if (adev->asic_type == CHIP_VEGA20 || adev->asic_type == CHIP_ARCTURUS)
adev->df.funcs = &df_v3_6_funcs;
@@ -729,12 +722,12 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block);
}
if (adev->asic_type == CHIP_VEGA20)
- amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
+ amdgpu_device_ip_block_add(adev, &vega20_ih_ip_block);
else
amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
} else {
if (adev->asic_type == CHIP_VEGA20)
- amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
+ amdgpu_device_ip_block_add(adev, &vega20_ih_ip_block);
else
amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) {
@@ -787,9 +780,9 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
if (amdgpu_sriov_vf(adev)) {
if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
- amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
+ amdgpu_device_ip_block_add(adev, &vega20_ih_ip_block);
} else {
- amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
+ amdgpu_device_ip_block_add(adev, &vega20_ih_ip_block);
if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
}
@@ -834,35 +827,12 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
return 0;
}
-static void soc15_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring)
-{
- adev->nbio.funcs->hdp_flush(adev, ring);
-}
-
-static void soc15_invalidate_hdp(struct amdgpu_device *adev,
- struct amdgpu_ring *ring)
-{
- if (!ring || !ring->funcs->emit_wreg)
- WREG32_SOC15_NO_KIQ(HDP, 0, mmHDP_READ_CACHE_INVALIDATE, 1);
- else
- amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET(
- HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 1);
-}
-
static bool soc15_need_full_reset(struct amdgpu_device *adev)
{
/* change this when we implement soft reset */
return true;
}
-static void vega20_reset_hdp_ras_error_count(struct amdgpu_device *adev)
-{
- if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__HDP))
- return;
- /*read back hdp ras counter to reset it to 0 */
- RREG32_SOC15(HDP, 0, mmHDP_EDC_CNT);
-}
-
static void soc15_get_pcie_usage(struct amdgpu_device *adev, uint64_t *count0,
uint64_t *count1)
{
@@ -1011,8 +981,6 @@ static const struct amdgpu_asic_funcs soc15_asic_funcs =
.set_uvd_clocks = &soc15_set_uvd_clocks,
.set_vce_clocks = &soc15_set_vce_clocks,
.get_config_memsize = &soc15_get_config_memsize,
- .flush_hdp = &soc15_flush_hdp,
- .invalidate_hdp = &soc15_invalidate_hdp,
.need_full_reset = &soc15_need_full_reset,
.init_doorbell_index = &vega10_doorbell_index_init,
.get_pcie_usage = &soc15_get_pcie_usage,
@@ -1034,9 +1002,6 @@ static const struct amdgpu_asic_funcs vega20_asic_funcs =
.set_uvd_clocks = &soc15_set_uvd_clocks,
.set_vce_clocks = &soc15_set_vce_clocks,
.get_config_memsize = &soc15_get_config_memsize,
- .flush_hdp = &soc15_flush_hdp,
- .invalidate_hdp = &soc15_invalidate_hdp,
- .reset_hdp_ras_error_count = &vega20_reset_hdp_ras_error_count,
.need_full_reset = &soc15_need_full_reset,
.init_doorbell_index = &vega20_doorbell_index_init,
.get_pcie_usage = &vega20_get_pcie_usage,
@@ -1239,7 +1204,8 @@ static int soc15_common_early_init(void *handle)
break;
case CHIP_RENOIR:
adev->asic_funcs = &soc15_asic_funcs;
- if (adev->pdev->device == 0x1636)
+ if ((adev->pdev->device == 0x1636) ||
+ (adev->pdev->device == 0x164c))
adev->apu_flags |= AMD_APU_IS_RENOIR;
else
adev->apu_flags |= AMD_APU_IS_GREEN_SARDINE;
@@ -1293,9 +1259,8 @@ static int soc15_common_late_init(void *handle)
if (amdgpu_sriov_vf(adev))
xgpu_ai_mailbox_get_irq(adev);
- if (adev->asic_funcs &&
- adev->asic_funcs->reset_hdp_ras_error_count)
- adev->asic_funcs->reset_hdp_ras_error_count(adev);
+ if (adev->hdp.funcs->reset_ras_error_count)
+ adev->hdp.funcs->reset_ras_error_count(adev);
if (adev->nbio.funcs->ras_late_init)
r = adev->nbio.funcs->ras_late_init(adev);
@@ -1421,41 +1386,6 @@ static int soc15_common_soft_reset(void *handle)
return 0;
}
-static void soc15_update_hdp_light_sleep(struct amdgpu_device *adev, bool enable)
-{
- uint32_t def, data;
-
- if (adev->asic_type == CHIP_VEGA20 ||
- adev->asic_type == CHIP_ARCTURUS ||
- adev->asic_type == CHIP_RENOIR) {
- def = data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_CTRL));
-
- if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
- data |= HDP_MEM_POWER_CTRL__IPH_MEM_POWER_CTRL_EN_MASK |
- HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK |
- HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK |
- HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK;
- else
- data &= ~(HDP_MEM_POWER_CTRL__IPH_MEM_POWER_CTRL_EN_MASK |
- HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK |
- HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK |
- HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK);
-
- if (def != data)
- WREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_CTRL), data);
- } else {
- def = data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS));
-
- if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
- data |= HDP_MEM_POWER_LS__LS_ENABLE_MASK;
- else
- data &= ~HDP_MEM_POWER_LS__LS_ENABLE_MASK;
-
- if (def != data)
- WREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS), data);
- }
-}
-
static void soc15_update_drm_clock_gating(struct amdgpu_device *adev, bool enable)
{
uint32_t def, data;
@@ -1516,7 +1446,7 @@ static int soc15_common_set_clockgating_state(void *handle,
state == AMD_CG_STATE_GATE);
adev->nbio.funcs->update_medium_grain_light_sleep(adev,
state == AMD_CG_STATE_GATE);
- soc15_update_hdp_light_sleep(adev,
+ adev->hdp.funcs->update_clock_gating(adev,
state == AMD_CG_STATE_GATE);
soc15_update_drm_clock_gating(adev,
state == AMD_CG_STATE_GATE);
@@ -1533,7 +1463,7 @@ static int soc15_common_set_clockgating_state(void *handle,
state == AMD_CG_STATE_GATE);
adev->nbio.funcs->update_medium_grain_light_sleep(adev,
state == AMD_CG_STATE_GATE);
- soc15_update_hdp_light_sleep(adev,
+ adev->hdp.funcs->update_clock_gating(adev,
state == AMD_CG_STATE_GATE);
soc15_update_drm_clock_gating(adev,
state == AMD_CG_STATE_GATE);
@@ -1541,7 +1471,7 @@ static int soc15_common_set_clockgating_state(void *handle,
state == AMD_CG_STATE_GATE);
break;
case CHIP_ARCTURUS:
- soc15_update_hdp_light_sleep(adev,
+ adev->hdp.funcs->update_clock_gating(adev,
state == AMD_CG_STATE_GATE);
break;
default:
@@ -1560,10 +1490,7 @@ static void soc15_common_get_clockgating_state(void *handle, u32 *flags)
adev->nbio.funcs->get_clockgating_state(adev, flags);
- /* AMD_CG_SUPPORT_HDP_LS */
- data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS));
- if (data & HDP_MEM_POWER_LS__LS_ENABLE_MASK)
- *flags |= AMD_CG_SUPPORT_HDP_LS;
+ adev->hdp.funcs->get_clock_gating_state(adev, flags);
/* AMD_CG_SUPPORT_DRM_MGCG */
data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_CGTT_CTRL0));
diff --git a/drivers/gpu/drm/amd/amdgpu/ta_secureDisplay_if.h b/drivers/gpu/drm/amd/amdgpu/ta_secureDisplay_if.h
new file mode 100644
index 000000000000..5039375bb1d4
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/ta_secureDisplay_if.h
@@ -0,0 +1,154 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _TA_SECUREDISPLAY_IF_H
+#define _TA_SECUREDISPLAY_IF_H
+
+/** Secure Display related enumerations */
+/**********************************************************/
+
+/** @enum ta_securedisplay_command
+ * Secure Display Command ID
+ */
+enum ta_securedisplay_command {
+ /* Query whether TA is responding used only for validation purpose */
+ TA_SECUREDISPLAY_COMMAND__QUERY_TA = 1,
+ /* Send region of Interest and CRC value to I2C */
+ TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC = 2,
+ /* Maximum Command ID */
+ TA_SECUREDISPLAY_COMMAND__MAX_ID = 0x7FFFFFFF,
+};
+
+/** @enum ta_securedisplay_status
+ * Secure Display status returns in shared buffer status
+ */
+enum ta_securedisplay_status {
+ TA_SECUREDISPLAY_STATUS__SUCCESS = 0x00, /* Success */
+ TA_SECUREDISPLAY_STATUS__GENERIC_FAILURE = 0x01, /* Generic Failure */
+ TA_SECUREDISPLAY_STATUS__INVALID_PARAMETER = 0x02, /* Invalid Parameter */
+ TA_SECUREDISPLAY_STATUS__NULL_POINTER = 0x03, /* Null Pointer*/
+ TA_SECUREDISPLAY_STATUS__I2C_WRITE_ERROR = 0x04, /* Fail to Write to I2C */
+ TA_SECUREDISPLAY_STATUS__READ_DIO_SCRATCH_ERROR = 0x05, /*Fail Read DIO Scratch Register*/
+ TA_SECUREDISPLAY_STATUS__READ_CRC_ERROR = 0x06, /* Fail to Read CRC*/
+
+ TA_SECUREDISPLAY_STATUS__MAX = 0x7FFFFFFF,/* Maximum Value for status*/
+};
+
+/** @enum ta_securedisplay_max_phy
+ * Physical ID number to use for reading corresponding DIO Scratch register for ROI
+ */
+enum ta_securedisplay_max_phy {
+ TA_SECUREDISPLAY_PHY0 = 0,
+ TA_SECUREDISPLAY_PHY1 = 1,
+ TA_SECUREDISPLAY_PHY2 = 2,
+ TA_SECUREDISPLAY_PHY3 = 3,
+ TA_SECUREDISPLAY_MAX_PHY = 4,
+};
+
+/** @enum ta_securedisplay_ta_query_cmd_ret
+ * A predefined specific reteurn value which is 0xAB only used to validate
+ * communication to Secure Display TA is functional.
+ * This value is used to validate whether TA is responding successfully
+ */
+enum ta_securedisplay_ta_query_cmd_ret {
+ /* This is a value to validate if TA is loaded successfully */
+ TA_SECUREDISPLAY_QUERY_CMD_RET = 0xAB,
+};
+
+/** @enum ta_securedisplay_buffer_size
+ * I2C Buffer size which contains 8 bytes of ROI (X start, X end, Y start, Y end)
+ * and 6 bytes of CRC( R,G,B) and 1 byte for physical ID
+ */
+enum ta_securedisplay_buffer_size {
+ /* 15 bytes = 8 byte (ROI) + 6 byte(CRC) + 1 byte(phy_id) */
+ TA_SECUREDISPLAY_I2C_BUFFER_SIZE = 15,
+};
+
+/** Input/output structures for Secure Display commands */
+/**********************************************************/
+/**
+ * Input structures
+ */
+
+/** @struct ta_securedisplay_send_roi_crc_input
+ * Physical ID to determine which DIO scratch register should be used to get ROI
+ */
+struct ta_securedisplay_send_roi_crc_input {
+ uint32_t phy_id; /* Physical ID */
+};
+
+/** @union ta_securedisplay_cmd_input
+ * Input buffer
+ */
+union ta_securedisplay_cmd_input {
+ /* send ROI and CRC input buffer format */
+ struct ta_securedisplay_send_roi_crc_input send_roi_crc;
+ uint32_t reserved[4];
+};
+
+/**
+ * Output structures
+ */
+
+/** @struct ta_securedisplay_query_ta_output
+ * Output buffer format for query TA whether TA is responding used only for validation purpose
+ */
+struct ta_securedisplay_query_ta_output {
+ /* return value from TA when it is queried for validation purpose only */
+ uint32_t query_cmd_ret;
+};
+
+/** @struct ta_securedisplay_send_roi_crc_output
+ * Output buffer format for send ROI CRC command which will pass I2c buffer created inside TA
+ * and used to write to I2C used only for validation purpose
+ */
+struct ta_securedisplay_send_roi_crc_output {
+ uint8_t i2c_buf[TA_SECUREDISPLAY_I2C_BUFFER_SIZE]; /* I2C buffer */
+ uint8_t reserved;
+};
+
+/** @union ta_securedisplay_cmd_output
+ * Output buffer
+ */
+union ta_securedisplay_cmd_output {
+ /* Query TA output buffer format used only for validation purpose*/
+ struct ta_securedisplay_query_ta_output query_ta;
+ /* Send ROI CRC output buffer format used only for validation purpose */
+ struct ta_securedisplay_send_roi_crc_output send_roi_crc;
+ uint32_t reserved[4];
+};
+
+/** @struct securedisplay_cmd
+ * Secure Display Command which is shared buffer memory
+ */
+struct securedisplay_cmd {
+ uint32_t cmd_id; /* +0 Bytes Command ID */
+ enum ta_securedisplay_status status; /* +4 Bytes Status of Secure Display TA */
+ uint32_t reserved[2]; /* +8 Bytes Reserved */
+ union ta_securedisplay_cmd_input securedisplay_in_message; /* +16 Bytes Input Buffer */
+ union ta_securedisplay_cmd_output securedisplay_out_message;/* +32 Bytes Output Buffer */
+ /**@note Total 48 Bytes */
+};
+
+#endif //_TA_SECUREDISPLAY_IF_H
+
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
index ce3319993b4b..249fcbee7871 100644
--- a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
@@ -196,19 +196,30 @@ static u32 tonga_ih_get_wptr(struct amdgpu_device *adev,
wptr = le32_to_cpu(*ih->wptr_cpu);
- if (REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW)) {
- wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0);
- /* When a ring buffer overflow happen start parsing interrupt
- * from the last not overwritten vector (wptr + 16). Hopefully
- * this should allow us to catchup.
- */
- dev_warn(adev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
- wptr, ih->rptr, (wptr + 16) & ih->ptr_mask);
- ih->rptr = (wptr + 16) & ih->ptr_mask;
- tmp = RREG32(mmIH_RB_CNTL);
- tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
- WREG32(mmIH_RB_CNTL, tmp);
- }
+ if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
+ goto out;
+
+ /* Double check that the overflow wasn't already cleared. */
+ wptr = RREG32(mmIH_RB_WPTR);
+
+ if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
+ goto out;
+
+ wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0);
+
+ /* When a ring buffer overflow happen start parsing interrupt
+ * from the last not overwritten vector (wptr + 16). Hopefully
+ * this should allow us to catchup.
+ */
+
+ dev_warn(adev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
+ wptr, ih->rptr, (wptr + 16) & ih->ptr_mask);
+ ih->rptr = (wptr + 16) & ih->ptr_mask;
+ tmp = RREG32(mmIH_RB_CNTL);
+ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
+ WREG32(mmIH_RB_CNTL, tmp);
+
+out:
return (wptr & ih->ptr_mask);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
index 312ecf6d24a0..7cd67cb2ac5f 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
@@ -36,7 +36,6 @@
#include "vce/vce_4_0_default.h"
#include "vce/vce_4_0_sh_mask.h"
#include "nbif/nbif_6_1_offset.h"
-#include "hdp/hdp_4_0_offset.h"
#include "mmhub/mmhub_1_0_offset.h"
#include "mmhub/mmhub_1_0_sh_mask.h"
#include "ivsrcid/uvd/irqsrcs_uvd_7_0.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
index c734e31a9e65..6117931fa8d7 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
@@ -32,7 +32,6 @@
#include "vcn/vcn_1_0_offset.h"
#include "vcn/vcn_1_0_sh_mask.h"
-#include "hdp/hdp_4_0_offset.h"
#include "mmhub/mmhub_9_1_offset.h"
#include "mmhub/mmhub_9_1_sh_mask.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
index e5ae31eb744e..88626d83e07b 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
@@ -38,132 +38,120 @@
static void vega10_ih_set_interrupt_funcs(struct amdgpu_device *adev);
/**
- * vega10_ih_enable_interrupts - Enable the interrupt ring buffer
+ * vega10_ih_init_register_offset - Initialize register offset for ih rings
*
* @adev: amdgpu_device pointer
*
- * Enable the interrupt ring buffer (VEGA10).
+ * Initialize register offset ih rings (VEGA10).
*/
-static void vega10_ih_enable_interrupts(struct amdgpu_device *adev)
+static void vega10_ih_init_register_offset(struct amdgpu_device *adev)
{
- u32 ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL);
-
- ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_ENABLE, 1);
- ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, ENABLE_INTR, 1);
- if (amdgpu_sriov_vf(adev)) {
- if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL, ih_rb_cntl)) {
- DRM_ERROR("PSP program IH_RB_CNTL failed!\n");
- return;
- }
- } else {
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl);
+ struct amdgpu_ih_regs *ih_regs;
+
+ if (adev->irq.ih.ring_size) {
+ ih_regs = &adev->irq.ih.ih_regs;
+ ih_regs->ih_rb_base = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE);
+ ih_regs->ih_rb_base_hi = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE_HI);
+ ih_regs->ih_rb_cntl = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL);
+ ih_regs->ih_rb_wptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR);
+ ih_regs->ih_rb_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR);
+ ih_regs->ih_doorbell_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_DOORBELL_RPTR);
+ ih_regs->ih_rb_wptr_addr_lo = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_ADDR_LO);
+ ih_regs->ih_rb_wptr_addr_hi = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_ADDR_HI);
+ ih_regs->psp_reg_id = PSP_REG_IH_RB_CNTL;
}
- adev->irq.ih.enabled = true;
if (adev->irq.ih1.ring_size) {
- ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1);
- ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING1,
- RB_ENABLE, 1);
- if (amdgpu_sriov_vf(adev)) {
- if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING1,
- ih_rb_cntl)) {
- DRM_ERROR("program IH_RB_CNTL_RING1 failed!\n");
- return;
- }
- } else {
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1, ih_rb_cntl);
- }
- adev->irq.ih1.enabled = true;
+ ih_regs = &adev->irq.ih1.ih_regs;
+ ih_regs->ih_rb_base = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE_RING1);
+ ih_regs->ih_rb_base_hi = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE_HI_RING1);
+ ih_regs->ih_rb_cntl = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL_RING1);
+ ih_regs->ih_rb_wptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_RING1);
+ ih_regs->ih_rb_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR_RING1);
+ ih_regs->ih_doorbell_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_DOORBELL_RPTR_RING1);
+ ih_regs->psp_reg_id = PSP_REG_IH_RB_CNTL_RING1;
}
if (adev->irq.ih2.ring_size) {
- ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2);
- ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING2,
- RB_ENABLE, 1);
- if (amdgpu_sriov_vf(adev)) {
- if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING2,
- ih_rb_cntl)) {
- DRM_ERROR("program IH_RB_CNTL_RING2 failed!\n");
- return;
- }
- } else {
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2, ih_rb_cntl);
- }
- adev->irq.ih2.enabled = true;
+ ih_regs = &adev->irq.ih2.ih_regs;
+ ih_regs->ih_rb_base = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE_RING2);
+ ih_regs->ih_rb_base_hi = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE_HI_RING2);
+ ih_regs->ih_rb_cntl = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL_RING2);
+ ih_regs->ih_rb_wptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_RING2);
+ ih_regs->ih_rb_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR_RING2);
+ ih_regs->ih_doorbell_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_DOORBELL_RPTR_RING2);
+ ih_regs->psp_reg_id = PSP_REG_IH_RB_CNTL_RING2;
}
-
- if (adev->irq.ih_soft.ring_size)
- adev->irq.ih_soft.enabled = true;
}
/**
- * vega10_ih_disable_interrupts - Disable the interrupt ring buffer
+ * vega10_ih_toggle_ring_interrupts - toggle the interrupt ring buffer
*
* @adev: amdgpu_device pointer
+ * @ih: amdgpu_ih_ring pointet
+ * @enable: true - enable the interrupts, false - disable the interrupts
*
- * Disable the interrupt ring buffer (VEGA10).
+ * Toggle the interrupt ring buffer (VEGA10)
*/
-static void vega10_ih_disable_interrupts(struct amdgpu_device *adev)
+static int vega10_ih_toggle_ring_interrupts(struct amdgpu_device *adev,
+ struct amdgpu_ih_ring *ih,
+ bool enable)
{
- u32 ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL);
+ struct amdgpu_ih_regs *ih_regs;
+ uint32_t tmp;
- ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_ENABLE, 0);
- ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, ENABLE_INTR, 0);
+ ih_regs = &ih->ih_regs;
+
+ tmp = RREG32(ih_regs->ih_rb_cntl);
+ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_ENABLE, (enable ? 1 : 0));
+ /* enable_intr field is only valid in ring0 */
+ if (ih == &adev->irq.ih)
+ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, ENABLE_INTR, (enable ? 1 : 0));
if (amdgpu_sriov_vf(adev)) {
- if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL, ih_rb_cntl)) {
- DRM_ERROR("PSP program IH_RB_CNTL failed!\n");
- return;
+ if (psp_reg_program(&adev->psp, ih_regs->psp_reg_id, tmp)) {
+ dev_err(adev->dev, "PSP program IH_RB_CNTL failed!\n");
+ return -ETIMEDOUT;
}
} else {
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl);
+ WREG32(ih_regs->ih_rb_cntl, tmp);
}
- /* set rptr, wptr to 0 */
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, 0);
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR, 0);
- adev->irq.ih.enabled = false;
- adev->irq.ih.rptr = 0;
-
- if (adev->irq.ih1.ring_size) {
- ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1);
- ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING1,
- RB_ENABLE, 0);
- if (amdgpu_sriov_vf(adev)) {
- if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING1,
- ih_rb_cntl)) {
- DRM_ERROR("program IH_RB_CNTL_RING1 failed!\n");
- return;
- }
- } else {
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1, ih_rb_cntl);
- }
+ if (enable) {
+ ih->enabled = true;
+ } else {
/* set rptr, wptr to 0 */
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING1, 0);
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_RING1, 0);
- adev->irq.ih1.enabled = false;
- adev->irq.ih1.rptr = 0;
+ WREG32(ih_regs->ih_rb_rptr, 0);
+ WREG32(ih_regs->ih_rb_wptr, 0);
+ ih->enabled = false;
+ ih->rptr = 0;
}
- if (adev->irq.ih2.ring_size) {
- ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2);
- ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING2,
- RB_ENABLE, 0);
- if (amdgpu_sriov_vf(adev)) {
- if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING2,
- ih_rb_cntl)) {
- DRM_ERROR("program IH_RB_CNTL_RING2 failed!\n");
- return;
- }
- } else {
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2, ih_rb_cntl);
- }
+ return 0;
+}
- /* set rptr, wptr to 0 */
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING2, 0);
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_RING2, 0);
- adev->irq.ih2.enabled = false;
- adev->irq.ih2.rptr = 0;
+/**
+ * vega10_ih_toggle_interrupts - Toggle all the available interrupt ring buffers
+ *
+ * @adev: amdgpu_device pointer
+ * @enable: enable or disable interrupt ring buffers
+ *
+ * Toggle all the available interrupt ring buffers (VEGA10).
+ */
+static int vega10_ih_toggle_interrupts(struct amdgpu_device *adev, bool enable)
+{
+ struct amdgpu_ih_ring *ih[] = {&adev->irq.ih, &adev->irq.ih1, &adev->irq.ih2};
+ int i;
+ int r;
+
+ for (i = 0; i < ARRAY_SIZE(ih); i++) {
+ if (ih[i]->ring_size) {
+ r = vega10_ih_toggle_ring_interrupts(adev, ih[i], enable);
+ if (r)
+ return r;
+ }
}
+
+ return 0;
}
static uint32_t vega10_ih_rb_cntl(struct amdgpu_ih_ring *ih, uint32_t ih_rb_cntl)
@@ -209,6 +197,58 @@ static uint32_t vega10_ih_doorbell_rptr(struct amdgpu_ih_ring *ih)
}
/**
+ * vega10_ih_enable_ring - enable an ih ring buffer
+ *
+ * @adev: amdgpu_device pointer
+ * @ih: amdgpu_ih_ring pointer
+ *
+ * Enable an ih ring buffer (VEGA10)
+ */
+static int vega10_ih_enable_ring(struct amdgpu_device *adev,
+ struct amdgpu_ih_ring *ih)
+{
+ struct amdgpu_ih_regs *ih_regs;
+ uint32_t tmp;
+
+ ih_regs = &ih->ih_regs;
+
+ /* Ring Buffer base. [39:8] of 40-bit address of the beginning of the ring buffer*/
+ WREG32(ih_regs->ih_rb_base, ih->gpu_addr >> 8);
+ WREG32(ih_regs->ih_rb_base_hi, (ih->gpu_addr >> 40) & 0xff);
+
+ tmp = RREG32(ih_regs->ih_rb_cntl);
+ tmp = vega10_ih_rb_cntl(ih, tmp);
+ if (ih == &adev->irq.ih)
+ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RPTR_REARM, !!adev->irq.msi_enabled);
+ if (ih == &adev->irq.ih1) {
+ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_ENABLE, 0);
+ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_FULL_DRAIN_ENABLE, 1);
+ }
+ if (amdgpu_sriov_vf(adev)) {
+ if (psp_reg_program(&adev->psp, ih_regs->psp_reg_id, tmp)) {
+ dev_err(adev->dev, "PSP program IH_RB_CNTL failed!\n");
+ return -ETIMEDOUT;
+ }
+ } else {
+ WREG32(ih_regs->ih_rb_cntl, tmp);
+ }
+
+ if (ih == &adev->irq.ih) {
+ /* set the ih ring 0 writeback address whether it's enabled or not */
+ WREG32(ih_regs->ih_rb_wptr_addr_lo, lower_32_bits(ih->wptr_addr));
+ WREG32(ih_regs->ih_rb_wptr_addr_hi, upper_32_bits(ih->wptr_addr) & 0xFFFF);
+ }
+
+ /* set rptr, wptr to 0 */
+ WREG32(ih_regs->ih_rb_wptr, 0);
+ WREG32(ih_regs->ih_rb_rptr, 0);
+
+ WREG32(ih_regs->ih_doorbell_rptr, vega10_ih_doorbell_rptr(ih));
+
+ return 0;
+}
+
+/**
* vega10_ih_irq_init - init and enable the interrupt ring
*
* @adev: amdgpu_device pointer
@@ -221,116 +261,34 @@ static uint32_t vega10_ih_doorbell_rptr(struct amdgpu_ih_ring *ih)
*/
static int vega10_ih_irq_init(struct amdgpu_device *adev)
{
- struct amdgpu_ih_ring *ih;
- u32 ih_rb_cntl, ih_chicken;
- int ret = 0;
+ struct amdgpu_ih_ring *ih[] = {&adev->irq.ih, &adev->irq.ih1, &adev->irq.ih2};
+ u32 ih_chicken;
+ int ret;
+ int i;
u32 tmp;
/* disable irqs */
- vega10_ih_disable_interrupts(adev);
+ ret = vega10_ih_toggle_interrupts(adev, false);
+ if (ret)
+ return ret;
adev->nbio.funcs->ih_control(adev);
- ih = &adev->irq.ih;
- /* Ring Buffer base. [39:8] of 40-bit address of the beginning of the ring buffer*/
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE, ih->gpu_addr >> 8);
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_HI, (ih->gpu_addr >> 40) & 0xff);
-
- ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL);
- ih_rb_cntl = vega10_ih_rb_cntl(ih, ih_rb_cntl);
- ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RPTR_REARM,
- !!adev->irq.msi_enabled);
- if (amdgpu_sriov_vf(adev)) {
- if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL, ih_rb_cntl)) {
- DRM_ERROR("PSP program IH_RB_CNTL failed!\n");
- return -ETIMEDOUT;
- }
- } else {
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl);
- }
-
- if ((adev->asic_type == CHIP_ARCTURUS &&
- adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) ||
- adev->asic_type == CHIP_RENOIR) {
+ if (adev->asic_type == CHIP_RENOIR) {
ih_chicken = RREG32_SOC15(OSSSYS, 0, mmIH_CHICKEN);
if (adev->irq.ih.use_bus_addr) {
ih_chicken = REG_SET_FIELD(ih_chicken, IH_CHICKEN,
MC_SPACE_GPA_ENABLE, 1);
- } else {
- ih_chicken = REG_SET_FIELD(ih_chicken, IH_CHICKEN,
- MC_SPACE_FBPA_ENABLE, 1);
}
WREG32_SOC15(OSSSYS, 0, mmIH_CHICKEN, ih_chicken);
}
- /* set the writeback address whether it's enabled or not */
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_ADDR_LO,
- lower_32_bits(ih->wptr_addr));
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_ADDR_HI,
- upper_32_bits(ih->wptr_addr) & 0xFFFF);
-
- /* set rptr, wptr to 0 */
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR, 0);
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, 0);
-
- WREG32_SOC15(OSSSYS, 0, mmIH_DOORBELL_RPTR,
- vega10_ih_doorbell_rptr(ih));
-
- ih = &adev->irq.ih1;
- if (ih->ring_size) {
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_RING1, ih->gpu_addr >> 8);
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_HI_RING1,
- (ih->gpu_addr >> 40) & 0xff);
-
- ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1);
- ih_rb_cntl = vega10_ih_rb_cntl(ih, ih_rb_cntl);
- ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
- WPTR_OVERFLOW_ENABLE, 0);
- ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
- RB_FULL_DRAIN_ENABLE, 1);
- if (amdgpu_sriov_vf(adev)) {
- if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING1,
- ih_rb_cntl)) {
- DRM_ERROR("program IH_RB_CNTL_RING1 failed!\n");
- return -ETIMEDOUT;
- }
- } else {
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1, ih_rb_cntl);
+ for (i = 0; i < ARRAY_SIZE(ih); i++) {
+ if (ih[i]->ring_size) {
+ ret = vega10_ih_enable_ring(adev, ih[i]);
+ if (ret)
+ return ret;
}
-
- /* set rptr, wptr to 0 */
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_RING1, 0);
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING1, 0);
-
- WREG32_SOC15(OSSSYS, 0, mmIH_DOORBELL_RPTR_RING1,
- vega10_ih_doorbell_rptr(ih));
- }
-
- ih = &adev->irq.ih2;
- if (ih->ring_size) {
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_RING2, ih->gpu_addr >> 8);
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_HI_RING2,
- (ih->gpu_addr >> 40) & 0xff);
-
- ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2);
- ih_rb_cntl = vega10_ih_rb_cntl(ih, ih_rb_cntl);
-
- if (amdgpu_sriov_vf(adev)) {
- if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING2,
- ih_rb_cntl)) {
- DRM_ERROR("program IH_RB_CNTL_RING2 failed!\n");
- return -ETIMEDOUT;
- }
- } else {
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2, ih_rb_cntl);
- }
-
- /* set rptr, wptr to 0 */
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_RING2, 0);
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING2, 0);
-
- WREG32_SOC15(OSSSYS, 0, mmIH_DOORBELL_RPTR_RING2,
- vega10_ih_doorbell_rptr(ih));
}
tmp = RREG32_SOC15(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL);
@@ -345,9 +303,14 @@ static int vega10_ih_irq_init(struct amdgpu_device *adev)
pci_set_master(adev->pdev);
/* enable interrupts */
- vega10_ih_enable_interrupts(adev);
+ ret = vega10_ih_toggle_interrupts(adev, true);
+ if (ret)
+ return ret;
- return ret;
+ if (adev->irq.ih_soft.ring_size)
+ adev->irq.ih_soft.enabled = true;
+
+ return 0;
}
/**
@@ -359,7 +322,7 @@ static int vega10_ih_irq_init(struct amdgpu_device *adev)
*/
static void vega10_ih_irq_disable(struct amdgpu_device *adev)
{
- vega10_ih_disable_interrupts(adev);
+ vega10_ih_toggle_interrupts(adev, false);
/* Wait and acknowledge irq */
mdelay(1);
@@ -379,25 +342,17 @@ static void vega10_ih_irq_disable(struct amdgpu_device *adev)
static u32 vega10_ih_get_wptr(struct amdgpu_device *adev,
struct amdgpu_ih_ring *ih)
{
- u32 wptr, reg, tmp;
+ u32 wptr, tmp;
+ struct amdgpu_ih_regs *ih_regs;
wptr = le32_to_cpu(*ih->wptr_cpu);
+ ih_regs = &ih->ih_regs;
if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
goto out;
/* Double check that the overflow wasn't already cleared. */
-
- if (ih == &adev->irq.ih)
- reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR);
- else if (ih == &adev->irq.ih1)
- reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_RING1);
- else if (ih == &adev->irq.ih2)
- reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_RING2);
- else
- BUG();
-
- wptr = RREG32_NO_KIQ(reg);
+ wptr = RREG32_NO_KIQ(ih_regs->ih_rb_wptr);
if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
goto out;
@@ -413,69 +368,15 @@ static u32 vega10_ih_get_wptr(struct amdgpu_device *adev,
wptr, ih->rptr, tmp);
ih->rptr = tmp;
- if (ih == &adev->irq.ih)
- reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL);
- else if (ih == &adev->irq.ih1)
- reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL_RING1);
- else if (ih == &adev->irq.ih2)
- reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL_RING2);
- else
- BUG();
-
- tmp = RREG32_NO_KIQ(reg);
+ tmp = RREG32_NO_KIQ(ih_regs->ih_rb_cntl);
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
- WREG32_NO_KIQ(reg, tmp);
+ WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
out:
return (wptr & ih->ptr_mask);
}
/**
- * vega10_ih_decode_iv - decode an interrupt vector
- *
- * @adev: amdgpu_device pointer
- * @ih: IH ring buffer to decode
- * @entry: IV entry to place decoded information into
- *
- * Decodes the interrupt vector at the current rptr
- * position and also advance the position.
- */
-static void vega10_ih_decode_iv(struct amdgpu_device *adev,
- struct amdgpu_ih_ring *ih,
- struct amdgpu_iv_entry *entry)
-{
- /* wptr/rptr are in bytes! */
- u32 ring_index = ih->rptr >> 2;
- uint32_t dw[8];
-
- dw[0] = le32_to_cpu(ih->ring[ring_index + 0]);
- dw[1] = le32_to_cpu(ih->ring[ring_index + 1]);
- dw[2] = le32_to_cpu(ih->ring[ring_index + 2]);
- dw[3] = le32_to_cpu(ih->ring[ring_index + 3]);
- dw[4] = le32_to_cpu(ih->ring[ring_index + 4]);
- dw[5] = le32_to_cpu(ih->ring[ring_index + 5]);
- dw[6] = le32_to_cpu(ih->ring[ring_index + 6]);
- dw[7] = le32_to_cpu(ih->ring[ring_index + 7]);
-
- entry->client_id = dw[0] & 0xff;
- entry->src_id = (dw[0] >> 8) & 0xff;
- entry->ring_id = (dw[0] >> 16) & 0xff;
- entry->vmid = (dw[0] >> 24) & 0xf;
- entry->vmid_src = (dw[0] >> 31);
- entry->timestamp = dw[1] | ((u64)(dw[2] & 0xffff) << 32);
- entry->timestamp_src = dw[2] >> 31;
- entry->pasid = dw[3] & 0xffff;
- entry->pasid_src = dw[3] >> 31;
- entry->src_data[0] = dw[4];
- entry->src_data[1] = dw[5];
- entry->src_data[2] = dw[6];
- entry->src_data[3] = dw[7];
-
- /* wptr/rptr are in bytes! */
- ih->rptr += 32;
-}
-
-/**
* vega10_ih_irq_rearm - rearm IRQ if lost
*
* @adev: amdgpu_device pointer
@@ -485,22 +386,14 @@ static void vega10_ih_decode_iv(struct amdgpu_device *adev,
static void vega10_ih_irq_rearm(struct amdgpu_device *adev,
struct amdgpu_ih_ring *ih)
{
- uint32_t reg_rptr = 0;
uint32_t v = 0;
uint32_t i = 0;
+ struct amdgpu_ih_regs *ih_regs;
- if (ih == &adev->irq.ih)
- reg_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR);
- else if (ih == &adev->irq.ih1)
- reg_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR_RING1);
- else if (ih == &adev->irq.ih2)
- reg_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR_RING2);
- else
- return;
-
+ ih_regs = &ih->ih_regs;
/* Rearm IRQ / re-wwrite doorbell if doorbell write is lost */
for (i = 0; i < MAX_REARM_RETRY; i++) {
- v = RREG32_NO_KIQ(reg_rptr);
+ v = RREG32_NO_KIQ(ih_regs->ih_rb_rptr);
if ((v < ih->ring_size) && (v != ih->rptr))
WDOORBELL32(ih->doorbell_index, ih->rptr);
else
@@ -519,6 +412,8 @@ static void vega10_ih_irq_rearm(struct amdgpu_device *adev,
static void vega10_ih_set_rptr(struct amdgpu_device *adev,
struct amdgpu_ih_ring *ih)
{
+ struct amdgpu_ih_regs *ih_regs;
+
if (ih->use_doorbell) {
/* XXX check if swapping is necessary on BE */
*ih->rptr_cpu = ih->rptr;
@@ -526,12 +421,9 @@ static void vega10_ih_set_rptr(struct amdgpu_device *adev,
if (amdgpu_sriov_vf(adev))
vega10_ih_irq_rearm(adev, ih);
- } else if (ih == &adev->irq.ih) {
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, ih->rptr);
- } else if (ih == &adev->irq.ih1) {
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING1, ih->rptr);
- } else if (ih == &adev->irq.ih2) {
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING2, ih->rptr);
+ } else {
+ ih_regs = &ih->ih_regs;
+ WREG32(ih_regs->ih_rb_rptr, ih->rptr);
}
}
@@ -600,19 +492,23 @@ static int vega10_ih_sw_init(void *handle)
adev->irq.ih.use_doorbell = true;
adev->irq.ih.doorbell_index = adev->doorbell_index.ih << 1;
- r = amdgpu_ih_ring_init(adev, &adev->irq.ih1, PAGE_SIZE, true);
- if (r)
- return r;
+ if (!(adev->flags & AMD_IS_APU)) {
+ r = amdgpu_ih_ring_init(adev, &adev->irq.ih1, PAGE_SIZE, true);
+ if (r)
+ return r;
- adev->irq.ih1.use_doorbell = true;
- adev->irq.ih1.doorbell_index = (adev->doorbell_index.ih + 1) << 1;
+ adev->irq.ih1.use_doorbell = true;
+ adev->irq.ih1.doorbell_index = (adev->doorbell_index.ih + 1) << 1;
- r = amdgpu_ih_ring_init(adev, &adev->irq.ih2, PAGE_SIZE, true);
- if (r)
- return r;
+ r = amdgpu_ih_ring_init(adev, &adev->irq.ih2, PAGE_SIZE, true);
+ if (r)
+ return r;
- adev->irq.ih2.use_doorbell = true;
- adev->irq.ih2.doorbell_index = (adev->doorbell_index.ih + 2) << 1;
+ adev->irq.ih2.use_doorbell = true;
+ adev->irq.ih2.doorbell_index = (adev->doorbell_index.ih + 2) << 1;
+ }
+ /* initialize ih control registers offset */
+ vega10_ih_init_register_offset(adev);
r = amdgpu_ih_ring_init(adev, &adev->irq.ih_soft, PAGE_SIZE, true);
if (r)
@@ -628,6 +524,7 @@ static int vega10_ih_sw_fini(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
amdgpu_irq_fini(adev);
+ amdgpu_ih_ring_fini(adev, &adev->irq.ih_soft);
amdgpu_ih_ring_fini(adev, &adev->irq.ih2);
amdgpu_ih_ring_fini(adev, &adev->irq.ih1);
amdgpu_ih_ring_fini(adev, &adev->irq.ih);
@@ -698,15 +595,11 @@ static void vega10_ih_update_clockgating_state(struct amdgpu_device *adev,
def = data = RREG32_SOC15(OSSSYS, 0, mmIH_CLK_CTRL);
field_val = enable ? 0 : 1;
/**
- * Vega10 does not have IH_RETRY_INT_CAM_MEM_CLK_SOFT_OVERRIDE
- * and IH_BUFFER_MEM_CLK_SOFT_OVERRIDE field.
+ * Vega10/12 and RAVEN don't have IH_BUFFER_MEM_CLK_SOFT_OVERRIDE field.
*/
- if (adev->asic_type > CHIP_VEGA10) {
- data = REG_SET_FIELD(data, IH_CLK_CTRL,
- IH_RETRY_INT_CAM_MEM_CLK_SOFT_OVERRIDE, field_val);
+ if (adev->asic_type == CHIP_RENOIR)
data = REG_SET_FIELD(data, IH_CLK_CTRL,
IH_BUFFER_MEM_CLK_SOFT_OVERRIDE, field_val);
- }
data = REG_SET_FIELD(data, IH_CLK_CTRL,
DBUS_MUX_CLK_SOFT_OVERRIDE, field_val);
@@ -759,7 +652,7 @@ const struct amd_ip_funcs vega10_ih_ip_funcs = {
static const struct amdgpu_ih_funcs vega10_ih_funcs = {
.get_wptr = vega10_ih_get_wptr,
- .decode_iv = vega10_ih_decode_iv,
+ .decode_iv = amdgpu_ih_decode_iv_helper,
.set_rptr = vega10_ih_set_rptr
};
diff --git a/drivers/gpu/drm/amd/amdgpu/vega20_ih.c b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c
new file mode 100644
index 000000000000..5a3c867d5881
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c
@@ -0,0 +1,703 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/pci.h>
+
+#include "amdgpu.h"
+#include "amdgpu_ih.h"
+#include "soc15.h"
+
+#include "oss/osssys_4_2_0_offset.h"
+#include "oss/osssys_4_2_0_sh_mask.h"
+
+#include "soc15_common.h"
+#include "vega20_ih.h"
+
+#define MAX_REARM_RETRY 10
+
+static void vega20_ih_set_interrupt_funcs(struct amdgpu_device *adev);
+
+/**
+ * vega20_ih_init_register_offset - Initialize register offset for ih rings
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Initialize register offset ih rings (VEGA20).
+ */
+static void vega20_ih_init_register_offset(struct amdgpu_device *adev)
+{
+ struct amdgpu_ih_regs *ih_regs;
+
+ if (adev->irq.ih.ring_size) {
+ ih_regs = &adev->irq.ih.ih_regs;
+ ih_regs->ih_rb_base = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE);
+ ih_regs->ih_rb_base_hi = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE_HI);
+ ih_regs->ih_rb_cntl = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL);
+ ih_regs->ih_rb_wptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR);
+ ih_regs->ih_rb_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR);
+ ih_regs->ih_doorbell_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_DOORBELL_RPTR);
+ ih_regs->ih_rb_wptr_addr_lo = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_ADDR_LO);
+ ih_regs->ih_rb_wptr_addr_hi = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_ADDR_HI);
+ ih_regs->psp_reg_id = PSP_REG_IH_RB_CNTL;
+ }
+
+ if (adev->irq.ih1.ring_size) {
+ ih_regs = &adev->irq.ih1.ih_regs;
+ ih_regs->ih_rb_base = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE_RING1);
+ ih_regs->ih_rb_base_hi = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE_HI_RING1);
+ ih_regs->ih_rb_cntl = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL_RING1);
+ ih_regs->ih_rb_wptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_RING1);
+ ih_regs->ih_rb_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR_RING1);
+ ih_regs->ih_doorbell_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_DOORBELL_RPTR_RING1);
+ ih_regs->psp_reg_id = PSP_REG_IH_RB_CNTL_RING1;
+ }
+
+ if (adev->irq.ih2.ring_size) {
+ ih_regs = &adev->irq.ih2.ih_regs;
+ ih_regs->ih_rb_base = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE_RING2);
+ ih_regs->ih_rb_base_hi = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE_HI_RING2);
+ ih_regs->ih_rb_cntl = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL_RING2);
+ ih_regs->ih_rb_wptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_RING2);
+ ih_regs->ih_rb_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR_RING2);
+ ih_regs->ih_doorbell_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_DOORBELL_RPTR_RING2);
+ ih_regs->psp_reg_id = PSP_REG_IH_RB_CNTL_RING2;
+ }
+}
+
+/**
+ * vega20_ih_toggle_ring_interrupts - toggle the interrupt ring buffer
+ *
+ * @adev: amdgpu_device pointer
+ * @ih: amdgpu_ih_ring pointer
+ * @enable: true - enable the interrupts, false - disable the interrupts
+ *
+ * Toggle the interrupt ring buffer (VEGA20)
+ */
+static int vega20_ih_toggle_ring_interrupts(struct amdgpu_device *adev,
+ struct amdgpu_ih_ring *ih,
+ bool enable)
+{
+ struct amdgpu_ih_regs *ih_regs;
+ uint32_t tmp;
+
+ ih_regs = &ih->ih_regs;
+
+ tmp = RREG32(ih_regs->ih_rb_cntl);
+ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_ENABLE, (enable ? 1 : 0));
+ /* enable_intr field is only valid in ring0 */
+ if (ih == &adev->irq.ih)
+ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, ENABLE_INTR, (enable ? 1 : 0));
+ if (amdgpu_sriov_vf(adev)) {
+ if (psp_reg_program(&adev->psp, ih_regs->psp_reg_id, tmp)) {
+ dev_err(adev->dev, "PSP program IH_RB_CNTL failed!\n");
+ return -ETIMEDOUT;
+ }
+ } else {
+ WREG32(ih_regs->ih_rb_cntl, tmp);
+ }
+
+ if (enable) {
+ ih->enabled = true;
+ } else {
+ /* set rptr, wptr to 0 */
+ WREG32(ih_regs->ih_rb_rptr, 0);
+ WREG32(ih_regs->ih_rb_wptr, 0);
+ ih->enabled = false;
+ ih->rptr = 0;
+ }
+
+ return 0;
+}
+
+/**
+ * vega20_ih_toggle_interrupts - Toggle all the available interrupt ring buffers
+ *
+ * @adev: amdgpu_device pointer
+ * @enable: enable or disable interrupt ring buffers
+ *
+ * Toggle all the available interrupt ring buffers (VEGA20).
+ */
+static int vega20_ih_toggle_interrupts(struct amdgpu_device *adev, bool enable)
+{
+ struct amdgpu_ih_ring *ih[] = {&adev->irq.ih, &adev->irq.ih1, &adev->irq.ih2};
+ int i;
+ int r;
+
+ for (i = 0; i < ARRAY_SIZE(ih); i++) {
+ if (ih[i]->ring_size) {
+ r = vega20_ih_toggle_ring_interrupts(adev, ih[i], enable);
+ if (r)
+ return r;
+ }
+ }
+
+ return 0;
+}
+
+static uint32_t vega20_ih_rb_cntl(struct amdgpu_ih_ring *ih, uint32_t ih_rb_cntl)
+{
+ int rb_bufsz = order_base_2(ih->ring_size / 4);
+
+ ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
+ MC_SPACE, ih->use_bus_addr ? 1 : 4);
+ ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
+ WPTR_OVERFLOW_CLEAR, 1);
+ ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
+ WPTR_OVERFLOW_ENABLE, 1);
+ ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_SIZE, rb_bufsz);
+ /* Ring Buffer write pointer writeback. If enabled, IH_RB_WPTR register
+ * value is written to memory
+ */
+ ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
+ WPTR_WRITEBACK_ENABLE, 1);
+ ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_SNOOP, 1);
+ ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_RO, 0);
+ ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_VMID, 0);
+
+ return ih_rb_cntl;
+}
+
+static uint32_t vega20_ih_doorbell_rptr(struct amdgpu_ih_ring *ih)
+{
+ u32 ih_doorbell_rtpr = 0;
+
+ if (ih->use_doorbell) {
+ ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr,
+ IH_DOORBELL_RPTR, OFFSET,
+ ih->doorbell_index);
+ ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr,
+ IH_DOORBELL_RPTR,
+ ENABLE, 1);
+ } else {
+ ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr,
+ IH_DOORBELL_RPTR,
+ ENABLE, 0);
+ }
+ return ih_doorbell_rtpr;
+}
+
+/**
+ * vega20_ih_enable_ring - enable an ih ring buffer
+ *
+ * @adev: amdgpu_device pointer
+ * @ih: amdgpu_ih_ring pointer
+ *
+ * Enable an ih ring buffer (VEGA20)
+ */
+static int vega20_ih_enable_ring(struct amdgpu_device *adev,
+ struct amdgpu_ih_ring *ih)
+{
+ struct amdgpu_ih_regs *ih_regs;
+ uint32_t tmp;
+
+ ih_regs = &ih->ih_regs;
+
+ /* Ring Buffer base. [39:8] of 40-bit address of the beginning of the ring buffer*/
+ WREG32(ih_regs->ih_rb_base, ih->gpu_addr >> 8);
+ WREG32(ih_regs->ih_rb_base_hi, (ih->gpu_addr >> 40) & 0xff);
+
+ tmp = RREG32(ih_regs->ih_rb_cntl);
+ tmp = vega20_ih_rb_cntl(ih, tmp);
+ if (ih == &adev->irq.ih)
+ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RPTR_REARM, !!adev->irq.msi_enabled);
+ if (ih == &adev->irq.ih1) {
+ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_ENABLE, 0);
+ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_FULL_DRAIN_ENABLE, 1);
+ }
+ if (amdgpu_sriov_vf(adev)) {
+ if (psp_reg_program(&adev->psp, ih_regs->psp_reg_id, tmp)) {
+ dev_err(adev->dev, "PSP program IH_RB_CNTL failed!\n");
+ return -ETIMEDOUT;
+ }
+ } else {
+ WREG32(ih_regs->ih_rb_cntl, tmp);
+ }
+
+ if (ih == &adev->irq.ih) {
+ /* set the ih ring 0 writeback address whether it's enabled or not */
+ WREG32(ih_regs->ih_rb_wptr_addr_lo, lower_32_bits(ih->wptr_addr));
+ WREG32(ih_regs->ih_rb_wptr_addr_hi, upper_32_bits(ih->wptr_addr) & 0xFFFF);
+ }
+
+ /* set rptr, wptr to 0 */
+ WREG32(ih_regs->ih_rb_wptr, 0);
+ WREG32(ih_regs->ih_rb_rptr, 0);
+
+ WREG32(ih_regs->ih_doorbell_rptr, vega20_ih_doorbell_rptr(ih));
+
+ return 0;
+}
+
+/**
+ * vega20_ih_reroute_ih - reroute VMC/UTCL2 ih to an ih ring
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Reroute VMC and UMC interrupts on primary ih ring to
+ * ih ring 1 so they won't lose when bunches of page faults
+ * interrupts overwhelms the interrupt handler(VEGA20)
+ */
+static void vega20_ih_reroute_ih(struct amdgpu_device *adev)
+{
+ uint32_t tmp;
+
+ /* vega20 ih reroute will go through psp
+ * this function is only used for arcturus
+ */
+ if (adev->asic_type == CHIP_ARCTURUS) {
+ /* Reroute to IH ring 1 for VMC */
+ WREG32_SOC15(OSSSYS, 0, mmIH_CLIENT_CFG_INDEX, 0x12);
+ tmp = RREG32_SOC15(OSSSYS, 0, mmIH_CLIENT_CFG_DATA);
+ tmp = REG_SET_FIELD(tmp, IH_CLIENT_CFG_DATA, CLIENT_TYPE, 1);
+ tmp = REG_SET_FIELD(tmp, IH_CLIENT_CFG_DATA, RING_ID, 1);
+ WREG32_SOC15(OSSSYS, 0, mmIH_CLIENT_CFG_DATA, tmp);
+
+ /* Reroute IH ring 1 for UTCL2 */
+ WREG32_SOC15(OSSSYS, 0, mmIH_CLIENT_CFG_INDEX, 0x1B);
+ tmp = RREG32_SOC15(OSSSYS, 0, mmIH_CLIENT_CFG_DATA);
+ tmp = REG_SET_FIELD(tmp, IH_CLIENT_CFG_DATA, RING_ID, 1);
+ WREG32_SOC15(OSSSYS, 0, mmIH_CLIENT_CFG_DATA, tmp);
+ }
+}
+
+/**
+ * vega20_ih_irq_init - init and enable the interrupt ring
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Allocate a ring buffer for the interrupt controller,
+ * enable the RLC, disable interrupts, enable the IH
+ * ring buffer and enable it (VI).
+ * Called at device load and reume.
+ * Returns 0 for success, errors for failure.
+ */
+static int vega20_ih_irq_init(struct amdgpu_device *adev)
+{
+ struct amdgpu_ih_ring *ih[] = {&adev->irq.ih, &adev->irq.ih1, &adev->irq.ih2};
+ u32 ih_chicken;
+ int ret;
+ int i;
+ u32 tmp;
+
+ /* disable irqs */
+ ret = vega20_ih_toggle_interrupts(adev, false);
+ if (ret)
+ return ret;
+
+ adev->nbio.funcs->ih_control(adev);
+
+ if (adev->asic_type == CHIP_ARCTURUS &&
+ adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
+ ih_chicken = RREG32_SOC15(OSSSYS, 0, mmIH_CHICKEN);
+ if (adev->irq.ih.use_bus_addr) {
+ ih_chicken = REG_SET_FIELD(ih_chicken, IH_CHICKEN,
+ MC_SPACE_GPA_ENABLE, 1);
+ }
+ WREG32_SOC15(OSSSYS, 0, mmIH_CHICKEN, ih_chicken);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(ih); i++) {
+ if (ih[i]->ring_size) {
+ if (i == 1)
+ vega20_ih_reroute_ih(adev);
+ ret = vega20_ih_enable_ring(adev, ih[i]);
+ if (ret)
+ return ret;
+ }
+ }
+
+ tmp = RREG32_SOC15(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL);
+ tmp = REG_SET_FIELD(tmp, IH_STORM_CLIENT_LIST_CNTL,
+ CLIENT18_IS_STORM_CLIENT, 1);
+ WREG32_SOC15(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL, tmp);
+
+ tmp = RREG32_SOC15(OSSSYS, 0, mmIH_INT_FLOOD_CNTL);
+ tmp = REG_SET_FIELD(tmp, IH_INT_FLOOD_CNTL, FLOOD_CNTL_ENABLE, 1);
+ WREG32_SOC15(OSSSYS, 0, mmIH_INT_FLOOD_CNTL, tmp);
+
+ pci_set_master(adev->pdev);
+
+ /* enable interrupts */
+ ret = vega20_ih_toggle_interrupts(adev, true);
+ if (ret)
+ return ret;
+
+ if (adev->irq.ih_soft.ring_size)
+ adev->irq.ih_soft.enabled = true;
+
+ return 0;
+}
+
+/**
+ * vega20_ih_irq_disable - disable interrupts
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Disable interrupts on the hw (VEGA20).
+ */
+static void vega20_ih_irq_disable(struct amdgpu_device *adev)
+{
+ vega20_ih_toggle_interrupts(adev, false);
+
+ /* Wait and acknowledge irq */
+ mdelay(1);
+}
+
+/**
+ * vega20_ih_get_wptr - get the IH ring buffer wptr
+ *
+ * @adev: amdgpu_device pointer
+ * @ih: amdgpu_ih_ring pointer
+ *
+ * Get the IH ring buffer wptr from either the register
+ * or the writeback memory buffer (VEGA20). Also check for
+ * ring buffer overflow and deal with it.
+ * Returns the value of the wptr.
+ */
+static u32 vega20_ih_get_wptr(struct amdgpu_device *adev,
+ struct amdgpu_ih_ring *ih)
+{
+ u32 wptr, tmp;
+ struct amdgpu_ih_regs *ih_regs;
+
+ wptr = le32_to_cpu(*ih->wptr_cpu);
+ ih_regs = &ih->ih_regs;
+
+ if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
+ goto out;
+
+ /* Double check that the overflow wasn't already cleared. */
+ wptr = RREG32_NO_KIQ(ih_regs->ih_rb_wptr);
+ if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
+ goto out;
+
+ wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0);
+
+ /* When a ring buffer overflow happen start parsing interrupt
+ * from the last not overwritten vector (wptr + 32). Hopefully
+ * this should allow us to catchup.
+ */
+ tmp = (wptr + 32) & ih->ptr_mask;
+ dev_warn(adev->dev, "IH ring buffer overflow "
+ "(0x%08X, 0x%08X, 0x%08X)\n",
+ wptr, ih->rptr, tmp);
+ ih->rptr = tmp;
+
+ tmp = RREG32_NO_KIQ(ih_regs->ih_rb_cntl);
+ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
+ WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
+
+out:
+ return (wptr & ih->ptr_mask);
+}
+
+/**
+ * vega20_ih_irq_rearm - rearm IRQ if lost
+ *
+ * @adev: amdgpu_device pointer
+ * @ih: amdgpu_ih_ring pointer
+ *
+ */
+static void vega20_ih_irq_rearm(struct amdgpu_device *adev,
+ struct amdgpu_ih_ring *ih)
+{
+ uint32_t v = 0;
+ uint32_t i = 0;
+ struct amdgpu_ih_regs *ih_regs;
+
+ ih_regs = &ih->ih_regs;
+
+ /* Rearm IRQ / re-wwrite doorbell if doorbell write is lost */
+ for (i = 0; i < MAX_REARM_RETRY; i++) {
+ v = RREG32_NO_KIQ(ih_regs->ih_rb_rptr);
+ if ((v < ih->ring_size) && (v != ih->rptr))
+ WDOORBELL32(ih->doorbell_index, ih->rptr);
+ else
+ break;
+ }
+}
+
+/**
+ * vega20_ih_set_rptr - set the IH ring buffer rptr
+ *
+ * @adev: amdgpu_device pointer
+ * @ih: amdgpu_ih_ring pointer
+ *
+ * Set the IH ring buffer rptr.
+ */
+static void vega20_ih_set_rptr(struct amdgpu_device *adev,
+ struct amdgpu_ih_ring *ih)
+{
+ struct amdgpu_ih_regs *ih_regs;
+
+ if (ih->use_doorbell) {
+ /* XXX check if swapping is necessary on BE */
+ *ih->rptr_cpu = ih->rptr;
+ WDOORBELL32(ih->doorbell_index, ih->rptr);
+
+ if (amdgpu_sriov_vf(adev))
+ vega20_ih_irq_rearm(adev, ih);
+ } else {
+ ih_regs = &ih->ih_regs;
+ WREG32(ih_regs->ih_rb_rptr, ih->rptr);
+ }
+}
+
+/**
+ * vega20_ih_self_irq - dispatch work for ring 1 and 2
+ *
+ * @adev: amdgpu_device pointer
+ * @source: irq source
+ * @entry: IV with WPTR update
+ *
+ * Update the WPTR from the IV and schedule work to handle the entries.
+ */
+static int vega20_ih_self_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ uint32_t wptr = cpu_to_le32(entry->src_data[0]);
+
+ switch (entry->ring_id) {
+ case 1:
+ *adev->irq.ih1.wptr_cpu = wptr;
+ schedule_work(&adev->irq.ih1_work);
+ break;
+ case 2:
+ *adev->irq.ih2.wptr_cpu = wptr;
+ schedule_work(&adev->irq.ih2_work);
+ break;
+ default: break;
+ }
+ return 0;
+}
+
+static const struct amdgpu_irq_src_funcs vega20_ih_self_irq_funcs = {
+ .process = vega20_ih_self_irq,
+};
+
+static void vega20_ih_set_self_irq_funcs(struct amdgpu_device *adev)
+{
+ adev->irq.self_irq.num_types = 0;
+ adev->irq.self_irq.funcs = &vega20_ih_self_irq_funcs;
+}
+
+static int vega20_ih_early_init(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ vega20_ih_set_interrupt_funcs(adev);
+ vega20_ih_set_self_irq_funcs(adev);
+ return 0;
+}
+
+static int vega20_ih_sw_init(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int r;
+
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_IH, 0,
+ &adev->irq.self_irq);
+ if (r)
+ return r;
+
+ r = amdgpu_ih_ring_init(adev, &adev->irq.ih, 256 * 1024, true);
+ if (r)
+ return r;
+
+ adev->irq.ih.use_doorbell = true;
+ adev->irq.ih.doorbell_index = adev->doorbell_index.ih << 1;
+
+ r = amdgpu_ih_ring_init(adev, &adev->irq.ih1, PAGE_SIZE, true);
+ if (r)
+ return r;
+
+ adev->irq.ih1.use_doorbell = true;
+ adev->irq.ih1.doorbell_index = (adev->doorbell_index.ih + 1) << 1;
+
+ r = amdgpu_ih_ring_init(adev, &adev->irq.ih2, PAGE_SIZE, true);
+ if (r)
+ return r;
+
+ adev->irq.ih2.use_doorbell = true;
+ adev->irq.ih2.doorbell_index = (adev->doorbell_index.ih + 2) << 1;
+
+ /* initialize ih control registers offset */
+ vega20_ih_init_register_offset(adev);
+
+ r = amdgpu_ih_ring_init(adev, &adev->irq.ih_soft, PAGE_SIZE, true);
+ if (r)
+ return r;
+
+ r = amdgpu_irq_init(adev);
+
+ return r;
+}
+
+static int vega20_ih_sw_fini(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ amdgpu_irq_fini(adev);
+ amdgpu_ih_ring_fini(adev, &adev->irq.ih_soft);
+ amdgpu_ih_ring_fini(adev, &adev->irq.ih2);
+ amdgpu_ih_ring_fini(adev, &adev->irq.ih1);
+ amdgpu_ih_ring_fini(adev, &adev->irq.ih);
+
+ return 0;
+}
+
+static int vega20_ih_hw_init(void *handle)
+{
+ int r;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ r = vega20_ih_irq_init(adev);
+ if (r)
+ return r;
+
+ return 0;
+}
+
+static int vega20_ih_hw_fini(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ vega20_ih_irq_disable(adev);
+
+ return 0;
+}
+
+static int vega20_ih_suspend(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ return vega20_ih_hw_fini(adev);
+}
+
+static int vega20_ih_resume(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ return vega20_ih_hw_init(adev);
+}
+
+static bool vega20_ih_is_idle(void *handle)
+{
+ /* todo */
+ return true;
+}
+
+static int vega20_ih_wait_for_idle(void *handle)
+{
+ /* todo */
+ return -ETIMEDOUT;
+}
+
+static int vega20_ih_soft_reset(void *handle)
+{
+ /* todo */
+
+ return 0;
+}
+
+static void vega20_ih_update_clockgating_state(struct amdgpu_device *adev,
+ bool enable)
+{
+ uint32_t data, def, field_val;
+
+ if (adev->cg_flags & AMD_CG_SUPPORT_IH_CG) {
+ def = data = RREG32_SOC15(OSSSYS, 0, mmIH_CLK_CTRL);
+ field_val = enable ? 0 : 1;
+ data = REG_SET_FIELD(data, IH_CLK_CTRL,
+ IH_RETRY_INT_CAM_MEM_CLK_SOFT_OVERRIDE, field_val);
+ data = REG_SET_FIELD(data, IH_CLK_CTRL,
+ IH_BUFFER_MEM_CLK_SOFT_OVERRIDE, field_val);
+ data = REG_SET_FIELD(data, IH_CLK_CTRL,
+ DBUS_MUX_CLK_SOFT_OVERRIDE, field_val);
+ data = REG_SET_FIELD(data, IH_CLK_CTRL,
+ OSSSYS_SHARE_CLK_SOFT_OVERRIDE, field_val);
+ data = REG_SET_FIELD(data, IH_CLK_CTRL,
+ LIMIT_SMN_CLK_SOFT_OVERRIDE, field_val);
+ data = REG_SET_FIELD(data, IH_CLK_CTRL,
+ DYN_CLK_SOFT_OVERRIDE, field_val);
+ data = REG_SET_FIELD(data, IH_CLK_CTRL,
+ REG_CLK_SOFT_OVERRIDE, field_val);
+ if (def != data)
+ WREG32_SOC15(OSSSYS, 0, mmIH_CLK_CTRL, data);
+ }
+}
+
+static int vega20_ih_set_clockgating_state(void *handle,
+ enum amd_clockgating_state state)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ vega20_ih_update_clockgating_state(adev,
+ state == AMD_CG_STATE_GATE);
+ return 0;
+
+}
+
+static int vega20_ih_set_powergating_state(void *handle,
+ enum amd_powergating_state state)
+{
+ return 0;
+}
+
+const struct amd_ip_funcs vega20_ih_ip_funcs = {
+ .name = "vega20_ih",
+ .early_init = vega20_ih_early_init,
+ .late_init = NULL,
+ .sw_init = vega20_ih_sw_init,
+ .sw_fini = vega20_ih_sw_fini,
+ .hw_init = vega20_ih_hw_init,
+ .hw_fini = vega20_ih_hw_fini,
+ .suspend = vega20_ih_suspend,
+ .resume = vega20_ih_resume,
+ .is_idle = vega20_ih_is_idle,
+ .wait_for_idle = vega20_ih_wait_for_idle,
+ .soft_reset = vega20_ih_soft_reset,
+ .set_clockgating_state = vega20_ih_set_clockgating_state,
+ .set_powergating_state = vega20_ih_set_powergating_state,
+};
+
+static const struct amdgpu_ih_funcs vega20_ih_funcs = {
+ .get_wptr = vega20_ih_get_wptr,
+ .decode_iv = amdgpu_ih_decode_iv_helper,
+ .set_rptr = vega20_ih_set_rptr
+};
+
+static void vega20_ih_set_interrupt_funcs(struct amdgpu_device *adev)
+{
+ adev->irq.ih_funcs = &vega20_ih_funcs;
+}
+
+const struct amdgpu_ip_block_version vega20_ih_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_IH,
+ .major = 4,
+ .minor = 2,
+ .rev = 0,
+ .funcs = &vega20_ih_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/vega20_ih.h b/drivers/gpu/drm/amd/amdgpu/vega20_ih.h
new file mode 100644
index 000000000000..7af6d8758ee3
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/vega20_ih.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __VEGA20_IH_H__
+#define __VEGA20_IH_H__
+
+extern const struct amd_ip_funcs vega20_ih_ip_funcs;
+extern const struct amdgpu_ip_block_version vega20_ih_ip_block;
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
index 8cac497c2c45..a5640a6138cf 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
@@ -1040,11 +1040,14 @@ static int kfd_create_vcrat_image_cpu(void *pcrat_image, size_t *size)
(struct crat_subtype_iolink *)sub_type_hdr);
if (ret < 0)
return ret;
- crat_table->length += (sub_type_hdr->length * entries);
- crat_table->total_entries += entries;
- sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
- sub_type_hdr->length * entries);
+ if (entries) {
+ crat_table->length += (sub_type_hdr->length * entries);
+ crat_table->total_entries += entries;
+
+ sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
+ sub_type_hdr->length * entries);
+ }
#else
pr_info("IO link not available for non x86 platforms\n");
#endif
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
index 241bd6ff79f4..74a460be077b 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
@@ -44,6 +44,25 @@ static bool event_interrupt_isr_v9(struct kfd_dev *dev,
client_id = SOC15_CLIENT_ID_FROM_IH_ENTRY(ih_ring_entry);
pasid = SOC15_PASID_FROM_IH_ENTRY(ih_ring_entry);
+ /* Only handle clients we care about */
+ if (client_id != SOC15_IH_CLIENTID_GRBM_CP &&
+ client_id != SOC15_IH_CLIENTID_SDMA0 &&
+ client_id != SOC15_IH_CLIENTID_SDMA1 &&
+ client_id != SOC15_IH_CLIENTID_SDMA2 &&
+ client_id != SOC15_IH_CLIENTID_SDMA3 &&
+ client_id != SOC15_IH_CLIENTID_SDMA4 &&
+ client_id != SOC15_IH_CLIENTID_SDMA5 &&
+ client_id != SOC15_IH_CLIENTID_SDMA6 &&
+ client_id != SOC15_IH_CLIENTID_SDMA7 &&
+ client_id != SOC15_IH_CLIENTID_VMC &&
+ client_id != SOC15_IH_CLIENTID_VMC1 &&
+ client_id != SOC15_IH_CLIENTID_UTCL2 &&
+ client_id != SOC15_IH_CLIENTID_SE0SH &&
+ client_id != SOC15_IH_CLIENTID_SE1SH &&
+ client_id != SOC15_IH_CLIENTID_SE2SH &&
+ client_id != SOC15_IH_CLIENTID_SE3SH)
+ return false;
+
/* This is a known issue for gfx9. Under non HWS, pasid is not set
* in the interrupt payload, so we need to find out the pasid on our
* own.
@@ -96,17 +115,30 @@ static void event_interrupt_wq_v9(struct kfd_dev *dev,
vmid = SOC15_VMID_FROM_IH_ENTRY(ih_ring_entry);
context_id = SOC15_CONTEXT_ID0_FROM_IH_ENTRY(ih_ring_entry);
- if (source_id == SOC15_INTSRC_CP_END_OF_PIPE)
- kfd_signal_event_interrupt(pasid, context_id, 32);
- else if (source_id == SOC15_INTSRC_SDMA_TRAP)
- kfd_signal_event_interrupt(pasid, context_id & 0xfffffff, 28);
- else if (source_id == SOC15_INTSRC_SQ_INTERRUPT_MSG)
- kfd_signal_event_interrupt(pasid, context_id & 0xffffff, 24);
- else if (source_id == SOC15_INTSRC_CP_BAD_OPCODE)
- kfd_signal_hw_exception_event(pasid);
- else if (client_id == SOC15_IH_CLIENTID_VMC ||
- client_id == SOC15_IH_CLIENTID_VMC1 ||
- client_id == SOC15_IH_CLIENTID_UTCL2) {
+ if (client_id == SOC15_IH_CLIENTID_GRBM_CP ||
+ client_id == SOC15_IH_CLIENTID_SE0SH ||
+ client_id == SOC15_IH_CLIENTID_SE1SH ||
+ client_id == SOC15_IH_CLIENTID_SE2SH ||
+ client_id == SOC15_IH_CLIENTID_SE3SH) {
+ if (source_id == SOC15_INTSRC_CP_END_OF_PIPE)
+ kfd_signal_event_interrupt(pasid, context_id, 32);
+ else if (source_id == SOC15_INTSRC_SQ_INTERRUPT_MSG)
+ kfd_signal_event_interrupt(pasid, context_id & 0xffffff, 24);
+ else if (source_id == SOC15_INTSRC_CP_BAD_OPCODE)
+ kfd_signal_hw_exception_event(pasid);
+ } else if (client_id == SOC15_IH_CLIENTID_SDMA0 ||
+ client_id == SOC15_IH_CLIENTID_SDMA1 ||
+ client_id == SOC15_IH_CLIENTID_SDMA2 ||
+ client_id == SOC15_IH_CLIENTID_SDMA3 ||
+ client_id == SOC15_IH_CLIENTID_SDMA4 ||
+ client_id == SOC15_IH_CLIENTID_SDMA5 ||
+ client_id == SOC15_IH_CLIENTID_SDMA6 ||
+ client_id == SOC15_IH_CLIENTID_SDMA7) {
+ if (source_id == SOC15_INTSRC_SDMA_TRAP)
+ kfd_signal_event_interrupt(pasid, context_id & 0xfffffff, 28);
+ } else if (client_id == SOC15_IH_CLIENTID_VMC ||
+ client_id == SOC15_IH_CLIENTID_VMC1 ||
+ client_id == SOC15_IH_CLIENTID_UTCL2) {
struct kfd_vm_fault_info info = {0};
uint16_t ring_id = SOC15_RING_ID_FROM_IH_ENTRY(ih_ring_entry);
diff --git a/drivers/gpu/drm/amd/display/Kconfig b/drivers/gpu/drm/amd/display/Kconfig
index 797b5d4b43e5..e509a175ed17 100644
--- a/drivers/gpu/drm/amd/display/Kconfig
+++ b/drivers/gpu/drm/amd/display/Kconfig
@@ -6,7 +6,7 @@ config DRM_AMD_DC
bool "AMD DC - Enable new display engine"
default y
select SND_HDA_COMPONENT if SND_HDA_CORE
- select DRM_AMD_DC_DCN if (X86 || PPC64 || (ARM64 && KERNEL_MODE_NEON)) && !(KCOV_INSTRUMENT_ALL && KCOV_ENABLE_COMPARISONS)
+ select DRM_AMD_DC_DCN if (X86 || PPC64) && !(KCOV_INSTRUMENT_ALL && KCOV_ENABLE_COMPARISONS)
help
Choose this option if you want to use the new display engine
support for AMDGPU. This adds required support for Vega and
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 519080e9a233..c64e469ac6b0 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -60,7 +60,6 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
-#include <linux/version.h>
#include <linux/types.h>
#include <linux/pm_runtime.h>
#include <linux/pci.h>
@@ -939,41 +938,6 @@ static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_
}
#endif
-#ifdef CONFIG_DEBUG_FS
-static int create_crtc_crc_properties(struct amdgpu_display_manager *dm)
-{
- dm->crc_win_x_start_property =
- drm_property_create_range(adev_to_drm(dm->adev),
- DRM_MODE_PROP_ATOMIC,
- "AMD_CRC_WIN_X_START", 0, U16_MAX);
- if (!dm->crc_win_x_start_property)
- return -ENOMEM;
-
- dm->crc_win_y_start_property =
- drm_property_create_range(adev_to_drm(dm->adev),
- DRM_MODE_PROP_ATOMIC,
- "AMD_CRC_WIN_Y_START", 0, U16_MAX);
- if (!dm->crc_win_y_start_property)
- return -ENOMEM;
-
- dm->crc_win_x_end_property =
- drm_property_create_range(adev_to_drm(dm->adev),
- DRM_MODE_PROP_ATOMIC,
- "AMD_CRC_WIN_X_END", 0, U16_MAX);
- if (!dm->crc_win_x_end_property)
- return -ENOMEM;
-
- dm->crc_win_y_end_property =
- drm_property_create_range(adev_to_drm(dm->adev),
- DRM_MODE_PROP_ATOMIC,
- "AMD_CRC_WIN_Y_END", 0, U16_MAX);
- if (!dm->crc_win_y_end_property)
- return -ENOMEM;
-
- return 0;
-}
-#endif
-
static int amdgpu_dm_init(struct amdgpu_device *adev)
{
struct dc_init_data init_data;
@@ -1121,10 +1085,6 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
dc_init_callbacks(adev->dm.dc, &init_params);
}
#endif
-#ifdef CONFIG_DEBUG_FS
- if (create_crtc_crc_properties(&adev->dm))
- DRM_ERROR("amdgpu: failed to create crc property.\n");
-#endif
if (amdgpu_dm_initialize_drm_device(adev)) {
DRM_ERROR(
"amdgpu: failed to initialize sw for display support.\n");
@@ -1817,6 +1777,11 @@ static int dm_suspend(void *handle)
if (amdgpu_in_reset(adev)) {
mutex_lock(&dm->dc_lock);
+
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ dc_allow_idle_optimizations(adev->dm.dc, false);
+#endif
+
dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
@@ -2386,8 +2351,7 @@ void amdgpu_dm_update_connector_after_detect(
drm_connector_update_edid_property(connector,
aconnector->edid);
- aconnector->num_modes = drm_add_edid_modes(connector, aconnector->edid);
- drm_connector_list_update(connector);
+ drm_add_edid_modes(connector, aconnector->edid);
if (aconnector->dc_link->aux_mode)
drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
@@ -3760,10 +3724,53 @@ static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
};
+static void get_min_max_dc_plane_scaling(struct drm_device *dev,
+ struct drm_framebuffer *fb,
+ int *min_downscale, int *max_upscale)
+{
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ struct dc *dc = adev->dm.dc;
+ /* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
+ struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
+
+ switch (fb->format->format) {
+ case DRM_FORMAT_P010:
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV21:
+ *max_upscale = plane_cap->max_upscale_factor.nv12;
+ *min_downscale = plane_cap->max_downscale_factor.nv12;
+ break;
+
+ case DRM_FORMAT_XRGB16161616F:
+ case DRM_FORMAT_ARGB16161616F:
+ case DRM_FORMAT_XBGR16161616F:
+ case DRM_FORMAT_ABGR16161616F:
+ *max_upscale = plane_cap->max_upscale_factor.fp16;
+ *min_downscale = plane_cap->max_downscale_factor.fp16;
+ break;
+
+ default:
+ *max_upscale = plane_cap->max_upscale_factor.argb8888;
+ *min_downscale = plane_cap->max_downscale_factor.argb8888;
+ break;
+ }
+
+ /*
+ * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
+ * scaling factor of 1.0 == 1000 units.
+ */
+ if (*max_upscale == 1)
+ *max_upscale = 1000;
+
+ if (*min_downscale == 1)
+ *min_downscale = 1000;
+}
+
+
static int fill_dc_scaling_info(const struct drm_plane_state *state,
struct dc_scaling_info *scaling_info)
{
- int scale_w, scale_h;
+ int scale_w, scale_h, min_downscale, max_upscale;
memset(scaling_info, 0, sizeof(*scaling_info));
@@ -3795,17 +3802,25 @@ static int fill_dc_scaling_info(const struct drm_plane_state *state,
/* DRM doesn't specify clipping on destination output. */
scaling_info->clip_rect = scaling_info->dst_rect;
- /* TODO: Validate scaling per-format with DC plane caps */
+ /* Validate scaling per-format with DC plane caps */
+ if (state->plane && state->plane->dev && state->fb) {
+ get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
+ &min_downscale, &max_upscale);
+ } else {
+ min_downscale = 250;
+ max_upscale = 16000;
+ }
+
scale_w = scaling_info->dst_rect.width * 1000 /
scaling_info->src_rect.width;
- if (scale_w < 250 || scale_w > 16000)
+ if (scale_w < min_downscale || scale_w > max_upscale)
return -EINVAL;
scale_h = scaling_info->dst_rect.height * 1000 /
scaling_info->src_rect.height;
- if (scale_h < 250 || scale_h > 16000)
+ if (scale_h < min_downscale || scale_h > max_upscale)
return -EINVAL;
/*
@@ -5334,64 +5349,12 @@ dm_crtc_duplicate_state(struct drm_crtc *crtc)
state->crc_src = cur->crc_src;
state->cm_has_degamma = cur->cm_has_degamma;
state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
-#ifdef CONFIG_DEBUG_FS
- state->crc_window = cur->crc_window;
-#endif
+
/* TODO Duplicate dc_stream after objects are stream object is flattened */
return &state->base;
}
-#ifdef CONFIG_DEBUG_FS
-static int amdgpu_dm_crtc_atomic_set_property(struct drm_crtc *crtc,
- struct drm_crtc_state *crtc_state,
- struct drm_property *property,
- uint64_t val)
-{
- struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = drm_to_adev(dev);
- struct dm_crtc_state *dm_new_state =
- to_dm_crtc_state(crtc_state);
-
- if (property == adev->dm.crc_win_x_start_property)
- dm_new_state->crc_window.x_start = val;
- else if (property == adev->dm.crc_win_y_start_property)
- dm_new_state->crc_window.y_start = val;
- else if (property == adev->dm.crc_win_x_end_property)
- dm_new_state->crc_window.x_end = val;
- else if (property == adev->dm.crc_win_y_end_property)
- dm_new_state->crc_window.y_end = val;
- else
- return -EINVAL;
-
- return 0;
-}
-
-static int amdgpu_dm_crtc_atomic_get_property(struct drm_crtc *crtc,
- const struct drm_crtc_state *state,
- struct drm_property *property,
- uint64_t *val)
-{
- struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = drm_to_adev(dev);
- struct dm_crtc_state *dm_state =
- to_dm_crtc_state(state);
-
- if (property == adev->dm.crc_win_x_start_property)
- *val = dm_state->crc_window.x_start;
- else if (property == adev->dm.crc_win_y_start_property)
- *val = dm_state->crc_window.y_start;
- else if (property == adev->dm.crc_win_x_end_property)
- *val = dm_state->crc_window.x_end;
- else if (property == adev->dm.crc_win_y_end_property)
- *val = dm_state->crc_window.y_end;
- else
- return -EINVAL;
-
- return 0;
-}
-#endif
-
static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
{
enum dc_irq_source irq_source;
@@ -5414,6 +5377,7 @@ static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
struct amdgpu_device *adev = drm_to_adev(crtc->dev);
struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
+ struct amdgpu_display_manager *dm = &adev->dm;
int rc = 0;
if (enable) {
@@ -5429,7 +5393,32 @@ static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
return rc;
irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
- return dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
+
+ if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
+ return -EBUSY;
+
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (amdgpu_in_reset(adev))
+ return 0;
+
+ mutex_lock(&dm->dc_lock);
+
+ if (enable)
+ dm->active_vblank_irq_count++;
+ else
+ dm->active_vblank_irq_count--;
+
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ dc_allow_idle_optimizations(
+ adev->dm.dc, dm->active_vblank_irq_count == 0 ? true : false);
+
+ DRM_DEBUG_DRIVER("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
+#endif
+
+ mutex_unlock(&dm->dc_lock);
+
+#endif
+ return 0;
}
static int dm_enable_vblank(struct drm_crtc *crtc)
@@ -5446,7 +5435,6 @@ static void dm_disable_vblank(struct drm_crtc *crtc)
static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
.reset = dm_crtc_reset_state,
.destroy = amdgpu_dm_crtc_destroy,
- .gamma_set = drm_atomic_helper_legacy_gamma_set,
.set_config = drm_atomic_helper_set_config,
.page_flip = drm_atomic_helper_page_flip,
.atomic_duplicate_state = dm_crtc_duplicate_state,
@@ -5458,10 +5446,6 @@ static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
.enable_vblank = dm_enable_vblank,
.disable_vblank = dm_disable_vblank,
.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
-#ifdef CONFIG_DEBUG_FS
- .atomic_set_property = amdgpu_dm_crtc_atomic_set_property,
- .atomic_get_property = amdgpu_dm_crtc_atomic_get_property,
-#endif
};
static enum drm_connector_status
@@ -6425,12 +6409,26 @@ static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
static int dm_plane_helper_check_state(struct drm_plane_state *state,
struct drm_crtc_state *new_crtc_state)
{
- int max_downscale = 0;
- int max_upscale = INT_MAX;
+ struct drm_framebuffer *fb = state->fb;
+ int min_downscale, max_upscale;
+ int min_scale = 0;
+ int max_scale = INT_MAX;
+
+ /* Plane enabled? Get min/max allowed scaling factors from plane caps. */
+ if (fb && state->crtc) {
+ get_min_max_dc_plane_scaling(state->crtc->dev, fb,
+ &min_downscale, &max_upscale);
+ /*
+ * Convert to drm convention: 16.16 fixed point, instead of dc's
+ * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
+ * dst/src, so min_scale = 1.0 / max_upscale, etc.
+ */
+ min_scale = (1000 << 16) / max_upscale;
+ max_scale = (1000 << 16) / min_downscale;
+ }
- /* TODO: These should be checked against DC plane caps */
return drm_atomic_helper_check_plane_state(
- state, new_crtc_state, max_downscale, max_upscale, true, true);
+ state, new_crtc_state, min_scale, max_scale, true, true);
}
static int dm_plane_atomic_check(struct drm_plane *plane,
@@ -6663,25 +6661,6 @@ static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
return 0;
}
-#ifdef CONFIG_DEBUG_FS
-static void attach_crtc_crc_properties(struct amdgpu_display_manager *dm,
- struct amdgpu_crtc *acrtc)
-{
- drm_object_attach_property(&acrtc->base.base,
- dm->crc_win_x_start_property,
- 0);
- drm_object_attach_property(&acrtc->base.base,
- dm->crc_win_y_start_property,
- 0);
- drm_object_attach_property(&acrtc->base.base,
- dm->crc_win_x_end_property,
- 0);
- drm_object_attach_property(&acrtc->base.base,
- dm->crc_win_y_end_property,
- 0);
-}
-#endif
-
static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
struct drm_plane *plane,
uint32_t crtc_index)
@@ -6729,9 +6708,7 @@ static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
true, MAX_COLOR_LUT_ENTRIES);
drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
-#ifdef CONFIG_DEBUG_FS
- attach_crtc_crc_properties(dm, acrtc);
-#endif
+
return 0;
fail:
@@ -8368,7 +8345,6 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
*/
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
- bool configure_crc = false;
dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
@@ -8378,30 +8354,21 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
dc_stream_retain(dm_new_crtc_state->stream);
acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
manage_dm_interrupts(adev, acrtc, true);
- }
+
#ifdef CONFIG_DEBUG_FS
- if (new_crtc_state->active &&
- amdgpu_dm_is_valid_crc_source(dm_new_crtc_state->crc_src)) {
/**
* Frontend may have changed so reapply the CRC capture
* settings for the stream.
*/
dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
- dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
-
- if (amdgpu_dm_crc_window_is_default(dm_new_crtc_state)) {
- if (!old_crtc_state->active || drm_atomic_crtc_needs_modeset(new_crtc_state))
- configure_crc = true;
- } else {
- if (amdgpu_dm_crc_window_changed(dm_new_crtc_state, dm_old_crtc_state))
- configure_crc = true;
- }
- if (configure_crc)
+ if (amdgpu_dm_is_valid_crc_source(dm_new_crtc_state->crc_src)) {
amdgpu_dm_crtc_configure_crc_source(
- crtc, dm_new_crtc_state, dm_new_crtc_state->crc_src);
- }
+ crtc, dm_new_crtc_state,
+ dm_new_crtc_state->crc_src);
+ }
#endif
+ }
}
for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index 2ee6edb3df93..f72930c36c22 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -58,10 +58,10 @@
/* Forward declarations */
struct amdgpu_device;
struct drm_device;
-struct amdgpu_dm_irq_handler_data;
struct dc;
struct amdgpu_bo;
struct dmub_srv;
+struct dc_plane_state;
struct common_irq_params {
struct amdgpu_device *adev;
@@ -336,32 +336,13 @@ struct amdgpu_display_manager {
*/
const struct gpu_info_soc_bounding_box_v1_0 *soc_bounding_box;
-#ifdef CONFIG_DEBUG_FS
- /**
- * @crc_win_x_start_property:
- *
- * X start of the crc calculation window
- */
- struct drm_property *crc_win_x_start_property;
- /**
- * @crc_win_y_start_property:
- *
- * Y start of the crc calculation window
- */
- struct drm_property *crc_win_y_start_property;
- /**
- * @crc_win_x_end_property:
- *
- * X end of the crc calculation window
- */
- struct drm_property *crc_win_x_end_property;
/**
- * @crc_win_y_end_property:
+ * @active_vblank_irq_count:
*
- * Y end of the crc calculation window
+ * number of currently active vblank irqs
*/
- struct drm_property *crc_win_y_end_property;
-#endif
+ uint32_t active_vblank_irq_count;
+
/**
* @mst_encoders:
*
@@ -438,25 +419,11 @@ struct amdgpu_dm_connector {
extern const struct amdgpu_ip_block_version dm_ip_block;
-struct amdgpu_framebuffer;
-struct amdgpu_display_manager;
-struct dc_validation_set;
-struct dc_plane_state;
-
struct dm_plane_state {
struct drm_plane_state base;
struct dc_plane_state *dc_state;
};
-#ifdef CONFIG_DEBUG_FS
-struct crc_rec {
- uint16_t x_start;
- uint16_t y_start;
- uint16_t x_end;
- uint16_t y_end;
- };
-#endif
-
struct dm_crtc_state {
struct drm_crtc_state base;
struct dc_stream_state *stream;
@@ -479,9 +446,6 @@ struct dm_crtc_state {
struct dc_info_packet vrr_infopacket;
int abm_level;
-#ifdef CONFIG_DEBUG_FS
- struct crc_rec crc_window;
-#endif
};
#define to_dm_crtc_state(x) container_of(x, struct dm_crtc_state, base)
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
index 7b886a779a8c..66cb8730586b 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
@@ -81,41 +81,6 @@ const char *const *amdgpu_dm_crtc_get_crc_sources(struct drm_crtc *crtc,
return pipe_crc_sources;
}
-static void amdgpu_dm_set_crc_window_default(struct dm_crtc_state *dm_crtc_state)
-{
- dm_crtc_state->crc_window.x_start = 0;
- dm_crtc_state->crc_window.y_start = 0;
- dm_crtc_state->crc_window.x_end = 0;
- dm_crtc_state->crc_window.y_end = 0;
-}
-
-bool amdgpu_dm_crc_window_is_default(struct dm_crtc_state *dm_crtc_state)
-{
- bool ret = true;
-
- if ((dm_crtc_state->crc_window.x_start != 0) ||
- (dm_crtc_state->crc_window.y_start != 0) ||
- (dm_crtc_state->crc_window.x_end != 0) ||
- (dm_crtc_state->crc_window.y_end != 0))
- ret = false;
-
- return ret;
-}
-
-bool amdgpu_dm_crc_window_changed(struct dm_crtc_state *dm_new_crtc_state,
- struct dm_crtc_state *dm_old_crtc_state)
-{
- bool ret = false;
-
- if ((dm_new_crtc_state->crc_window.x_start != dm_old_crtc_state->crc_window.x_start) ||
- (dm_new_crtc_state->crc_window.y_start != dm_old_crtc_state->crc_window.y_start) ||
- (dm_new_crtc_state->crc_window.x_end != dm_old_crtc_state->crc_window.x_end) ||
- (dm_new_crtc_state->crc_window.y_end != dm_old_crtc_state->crc_window.y_end))
- ret = true;
-
- return ret;
-}
-
int
amdgpu_dm_crtc_verify_crc_source(struct drm_crtc *crtc, const char *src_name,
size_t *values_cnt)
@@ -140,7 +105,6 @@ int amdgpu_dm_crtc_configure_crc_source(struct drm_crtc *crtc,
struct dc_stream_state *stream_state = dm_crtc_state->stream;
bool enable = amdgpu_dm_is_valid_crc_source(source);
int ret = 0;
- struct crc_params *crc_window = NULL, tmp_window;
/* Configuration will be deferred to stream enable. */
if (!stream_state)
@@ -150,24 +114,8 @@ int amdgpu_dm_crtc_configure_crc_source(struct drm_crtc *crtc,
/* Enable CRTC CRC generation if necessary. */
if (dm_is_crc_source_crtc(source) || source == AMDGPU_DM_PIPE_CRC_SOURCE_NONE) {
- if (!enable)
- amdgpu_dm_set_crc_window_default(dm_crtc_state);
-
- if (!amdgpu_dm_crc_window_is_default(dm_crtc_state)) {
- crc_window = &tmp_window;
-
- tmp_window.windowa_x_start = dm_crtc_state->crc_window.x_start;
- tmp_window.windowa_y_start = dm_crtc_state->crc_window.y_start;
- tmp_window.windowa_x_end = dm_crtc_state->crc_window.x_end;
- tmp_window.windowa_y_end = dm_crtc_state->crc_window.y_end;
- tmp_window.windowb_x_start = dm_crtc_state->crc_window.x_start;
- tmp_window.windowb_y_start = dm_crtc_state->crc_window.y_start;
- tmp_window.windowb_x_end = dm_crtc_state->crc_window.x_end;
- tmp_window.windowb_y_end = dm_crtc_state->crc_window.y_end;
- }
-
if (!dc_stream_configure_crc(stream_state->ctx->dc,
- stream_state, crc_window, enable, enable)) {
+ stream_state, NULL, enable, enable)) {
ret = -EINVAL;
goto unlock;
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h
index 0235bfb246e5..f7d731797d3f 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h
@@ -47,9 +47,6 @@ static inline bool amdgpu_dm_is_valid_crc_source(enum amdgpu_dm_pipe_crc_source
/* amdgpu_dm_crc.c */
#ifdef CONFIG_DEBUG_FS
-bool amdgpu_dm_crc_window_is_default(struct dm_crtc_state *dm_crtc_state);
-bool amdgpu_dm_crc_window_changed(struct dm_crtc_state *dm_new_crtc_state,
- struct dm_crtc_state *dm_old_crtc_state);
int amdgpu_dm_crtc_configure_crc_source(struct drm_crtc *crtc,
struct dm_crtc_state *dm_crtc_state,
enum amdgpu_dm_pipe_crc_source source);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
index 11459fb09a37..d645f3e4610e 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
@@ -691,7 +691,7 @@ static ssize_t dp_phy_test_pattern_debugfs_write(struct file *f, const char __us
return size;
}
-/**
+/*
* Returns the DMCUB tracebuffer contents.
* Example usage: cat /sys/kernel/debug/dri/0/amdgpu_dm_dmub_tracebuffer
*/
@@ -735,7 +735,7 @@ static int dmub_tracebuffer_show(struct seq_file *m, void *data)
return 0;
}
-/**
+/*
* Returns the DMCUB firmware state contents.
* Example usage: cat /sys/kernel/debug/dri/0/amdgpu_dm_dmub_fw_state
*/
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
index f6f487e9fe2d..3244a6ea7a65 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
@@ -25,7 +25,6 @@
#include <linux/string.h>
#include <linux/acpi.h>
-#include <linux/version.h>
#include <linux/i2c.h>
#include <drm/drm_probe_helper.h>
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index 8ab0b9060d2b..5b0a4a7479e2 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -23,7 +23,6 @@
*
*/
-#include <linux/version.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_dp_mst_helper.h>
diff --git a/drivers/gpu/drm/amd/display/dc/basics/conversion.c b/drivers/gpu/drm/amd/display/dc/basics/conversion.c
index 24ed03d8cda7..6767fab55c26 100644
--- a/drivers/gpu/drm/amd/display/dc/basics/conversion.c
+++ b/drivers/gpu/drm/amd/display/dc/basics/conversion.c
@@ -73,12 +73,9 @@ uint16_t fixed_point_to_int_frac(
return result;
}
-/**
-* convert_float_matrix
-* This converts a double into HW register spec defined format S2D13.
-* @param :
-* @return None
-*/
+/*
+ * convert_float_matrix - This converts a double into HW register spec defined format S2D13.
+ */
void convert_float_matrix(
uint16_t *matrix,
struct fixed31_32 *flt,
diff --git a/drivers/gpu/drm/amd/display/dc/basics/dc_common.c b/drivers/gpu/drm/amd/display/dc/basics/dc_common.c
index b2fc4f8e6482..ad04ef98e652 100644
--- a/drivers/gpu/drm/amd/display/dc/basics/dc_common.c
+++ b/drivers/gpu/drm/amd/display/dc/basics/dc_common.c
@@ -49,20 +49,24 @@ bool is_rgb_cspace(enum dc_color_space output_color_space)
}
}
-bool is_lower_pipe_tree_visible(struct pipe_ctx *pipe_ctx)
+bool is_child_pipe_tree_visible(struct pipe_ctx *pipe_ctx)
{
if (pipe_ctx->plane_state && pipe_ctx->plane_state->visible)
return true;
- if (pipe_ctx->bottom_pipe && is_lower_pipe_tree_visible(pipe_ctx->bottom_pipe))
+ if (pipe_ctx->bottom_pipe && is_child_pipe_tree_visible(pipe_ctx->bottom_pipe))
+ return true;
+ if (pipe_ctx->next_odm_pipe && is_child_pipe_tree_visible(pipe_ctx->next_odm_pipe))
return true;
return false;
}
-bool is_upper_pipe_tree_visible(struct pipe_ctx *pipe_ctx)
+bool is_parent_pipe_tree_visible(struct pipe_ctx *pipe_ctx)
{
if (pipe_ctx->plane_state && pipe_ctx->plane_state->visible)
return true;
- if (pipe_ctx->top_pipe && is_upper_pipe_tree_visible(pipe_ctx->top_pipe))
+ if (pipe_ctx->top_pipe && is_parent_pipe_tree_visible(pipe_ctx->top_pipe))
+ return true;
+ if (pipe_ctx->prev_odm_pipe && is_parent_pipe_tree_visible(pipe_ctx->prev_odm_pipe))
return true;
return false;
}
@@ -71,9 +75,13 @@ bool is_pipe_tree_visible(struct pipe_ctx *pipe_ctx)
{
if (pipe_ctx->plane_state && pipe_ctx->plane_state->visible)
return true;
- if (pipe_ctx->top_pipe && is_upper_pipe_tree_visible(pipe_ctx->top_pipe))
+ if (pipe_ctx->top_pipe && is_parent_pipe_tree_visible(pipe_ctx->top_pipe))
+ return true;
+ if (pipe_ctx->bottom_pipe && is_child_pipe_tree_visible(pipe_ctx->bottom_pipe))
+ return true;
+ if (pipe_ctx->prev_odm_pipe && is_parent_pipe_tree_visible(pipe_ctx->prev_odm_pipe))
return true;
- if (pipe_ctx->bottom_pipe && is_lower_pipe_tree_visible(pipe_ctx->bottom_pipe))
+ if (pipe_ctx->next_odm_pipe && is_child_pipe_tree_visible(pipe_ctx->next_odm_pipe))
return true;
return false;
}
diff --git a/drivers/gpu/drm/amd/display/dc/basics/dc_common.h b/drivers/gpu/drm/amd/display/dc/basics/dc_common.h
index 7c0cbf47e8ce..b061497480b8 100644
--- a/drivers/gpu/drm/amd/display/dc/basics/dc_common.h
+++ b/drivers/gpu/drm/amd/display/dc/basics/dc_common.h
@@ -30,9 +30,9 @@
bool is_rgb_cspace(enum dc_color_space output_color_space);
-bool is_lower_pipe_tree_visible(struct pipe_ctx *pipe_ctx);
+bool is_child_pipe_tree_visible(struct pipe_ctx *pipe_ctx);
-bool is_upper_pipe_tree_visible(struct pipe_ctx *pipe_ctx);
+bool is_parent_pipe_tree_visible(struct pipe_ctx *pipe_ctx);
bool is_pipe_tree_visible(struct pipe_ctx *pipe_ctx);
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
index 23a373ca94b5..c67d21a5ee52 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
@@ -911,11 +911,11 @@ static enum bp_result get_ss_info_from_tbl(
* ver 2.1 can co-exist with SS_Info table. Expect ASIC_InternalSS_Info ver 3.1,
* there is only one entry for each signal /ss id. However, there is
* no planning of supporting multiple spread Sprectum entry for EverGreen
- * @param [in] this
- * @param [in] signal, ASSignalType to be converted to info index
- * @param [in] index, number of entries that match the converted info index
- * @param [out] ss_info, sprectrum information structure,
- * @return Bios parser result code
+ * @dcb: pointer to the DC BIOS
+ * @signal: ASSignalType to be converted to info index
+ * @index: number of entries that match the converted info index
+ * @ss_info: sprectrum information structure,
+ * return: Bios parser result code
*/
static enum bp_result bios_parser_get_spread_spectrum_info(
struct dc_bios *dcb,
@@ -985,10 +985,10 @@ static enum bp_result get_ss_info_from_internal_ss_info_tbl_V2_1(
* There can not be more than 1 entry for ASIC_InternalSS_Info Ver 2.1 or
* SS_Info.
*
- * @param this
- * @param id, spread sprectrum info index
- * @param pSSinfo, sprectrum information structure,
- * @return Bios parser result code
+ * @bp: pointer to the BIOS parser
+ * @id: spread sprectrum info index
+ * @ss_info: sprectrum information structure,
+ * return: BIOS parser result code
*/
static enum bp_result get_ss_info_from_tbl(
struct bios_parser *bp,
@@ -1011,9 +1011,10 @@ static enum bp_result get_ss_info_from_tbl(
* from the VBIOS
* There will not be multiple entry for Ver 2.1
*
- * @param id, spread sprectrum info index
- * @param pSSinfo, sprectrum information structure,
- * @return Bios parser result code
+ * @bp: pointer to the Bios parser
+ * @id: spread sprectrum info index
+ * @info: sprectrum information structure,
+ * return: Bios parser result code
*/
static enum bp_result get_ss_info_from_internal_ss_info_tbl_V2_1(
struct bios_parser *bp,
@@ -1076,9 +1077,10 @@ static enum bp_result get_ss_info_from_internal_ss_info_tbl_V2_1(
* of entries that matches the id
* for, the SS_Info table, there should not be more than 1 entry match.
*
- * @param [in] id, spread sprectrum id
- * @param [out] pSSinfo, sprectrum information structure,
- * @return Bios parser result code
+ * @bp: pointer to the Bios parser
+ * @id: spread sprectrum id
+ * @ss_info: sprectrum information structure,
+ * return: Bios parser result code
*/
static enum bp_result get_ss_info_from_ss_info_table(
struct bios_parser *bp,
@@ -1451,16 +1453,14 @@ static enum bp_result get_embedded_panel_info_v1_3(
}
/**
- * bios_parser_get_encoder_cap_info
+ * bios_parser_get_encoder_cap_info - get encoder capability
+ * information of input object id
*
- * @brief
- * Get encoder capability information of input object id
- *
- * @param object_id, Object id
- * @param object_id, encoder cap information structure
- *
- * @return Bios parser result code
+ * @dcb: pointer to the DC BIOS
+ * @object_id: object id
+ * @info: encoder cap information structure
*
+ * return: Bios parser result code
*/
static enum bp_result bios_parser_get_encoder_cap_info(
struct dc_bios *dcb,
@@ -1490,17 +1490,12 @@ static enum bp_result bios_parser_get_encoder_cap_info(
}
/**
- * get_encoder_cap_record
- *
- * @brief
- * Get encoder cap record for the object
- *
- * @param object, ATOM object
+ * get_encoder_cap_record - Get encoder cap record for the object
*
- * @return atom encoder cap record
- *
- * @note
- * search all records to find the ATOM_ENCODER_CAP_RECORD_V2 record
+ * @bp: pointer to the BIOS parser
+ * @object: ATOM object
+ * return: atom encoder cap record
+ * note: search all records to find the ATOM_ENCODER_CAP_RECORD_V2 record
*/
static ATOM_ENCODER_CAP_RECORD_V2 *get_encoder_cap_record(
struct bios_parser *bp,
@@ -1557,8 +1552,9 @@ static uint32_t get_ss_entry_number_from_ss_info_tbl(
* Get Number of SpreadSpectrum Entry from the ASIC_InternalSS_Info table from
* the VBIOS that match the SSid (to be converted from signal)
*
- * @param[in] signal, ASSignalType to be converted to SSid
- * @return number of SS Entry that match the signal
+ * @dcb: pointer to the DC BIOS
+ * @signal: ASSignalType to be converted to SSid
+ * return: number of SS Entry that match the signal
*/
static uint32_t bios_parser_get_ss_entry_number(
struct dc_bios *dcb,
@@ -1608,10 +1604,10 @@ static uint32_t bios_parser_get_ss_entry_number(
* get_ss_entry_number_from_ss_info_tbl
* Get Number of spread spectrum entry from the SS_Info table from the VBIOS.
*
- * @note There can only be one entry for each id for SS_Info Table
- *
- * @param [in] id, spread spectrum id
- * @return number of SS Entry that match the id
+ * @bp: pointer to the BIOS parser
+ * @id: spread spectrum id
+ * return: number of SS Entry that match the id
+ * note: There can only be one entry for each id for SS_Info Table
*/
static uint32_t get_ss_entry_number_from_ss_info_tbl(
struct bios_parser *bp,
@@ -1679,8 +1675,9 @@ static uint32_t get_ss_entry_number_from_ss_info_tbl(
* There can not be more than 1 entry for ASIC_InternalSS_Info Ver 2.1 or
* SS_Info.
*
- * @param id, spread sprectrum info index
- * @return Bios parser result code
+ * @bp: pointer to the BIOS parser
+ * @id: spread sprectrum info index
+ * return: Bios parser result code
*/
static uint32_t get_ss_entry_number(struct bios_parser *bp, uint32_t id)
{
@@ -1696,8 +1693,9 @@ static uint32_t get_ss_entry_number(struct bios_parser *bp, uint32_t id)
* Ver 2.1 from the VBIOS
* There will not be multiple entry for Ver 2.1
*
- * @param id, spread sprectrum info index
- * @return number of SS Entry that match the id
+ * @bp: pointer to the BIOS parser
+ * @id: spread sprectrum info index
+ * return: number of SS Entry that match the id
*/
static uint32_t get_ss_entry_number_from_internal_ss_info_tbl_v2_1(
struct bios_parser *bp,
@@ -1731,8 +1729,9 @@ static uint32_t get_ss_entry_number_from_internal_ss_info_tbl_v2_1(
* Get Number of SpreadSpectrum Entry from the ASIC_InternalSS_Info table of
* the VBIOS that matches id
*
- * @param[in] id, spread sprectrum id
- * @return number of SS Entry that match the id
+ * @bp: pointer to the BIOS parser
+ * @id: spread sprectrum id
+ * return: number of SS Entry that match the id
*/
static uint32_t get_ss_entry_number_from_internal_ss_info_tbl_V3_1(
struct bios_parser *bp,
@@ -1767,10 +1766,11 @@ static uint32_t get_ss_entry_number_from_internal_ss_info_tbl_V3_1(
* bios_parser_get_gpio_pin_info
* Get GpioPin information of input gpio id
*
- * @param gpio_id, GPIO ID
- * @param info, GpioPin information structure
- * @return Bios parser result code
- * @note
+ * @dcb: pointer to the DC BIOS
+ * @gpio_id: GPIO ID
+ * @info: GpioPin information structure
+ * return: Bios parser result code
+ * note:
* to get the GPIO PIN INFO, we need:
* 1. get the GPIO_ID from other object table, see GetHPDInfo()
* 2. in DATA_TABLE.GPIO_Pin_LUT, search all records, to get the registerA
@@ -2197,13 +2197,10 @@ static uint32_t get_support_mask_for_device_id(struct device_id device_id)
}
/**
- * bios_parser_set_scratch_critical_state
- *
- * @brief
- * update critical state bit in VBIOS scratch register
- *
- * @param
- * bool - to set or reset state
+ * bios_parser_set_scratch_critical_state - update critical state
+ * bit in VBIOS scratch register
+ * @dcb: pointer to the DC BIOS
+ * @state: set or reset state
*/
static void bios_parser_set_scratch_critical_state(
struct dc_bios *dcb,
@@ -2222,7 +2219,7 @@ static void bios_parser_set_scratch_critical_state(
* bios_parser *bp - [in]BIOS parser handler to get master data table
* integrated_info *info - [out] store and output integrated info
*
- * @return
+ * return:
* enum bp_result - BP_RESULT_OK if information is available,
* BP_RESULT_BADBIOSTABLE otherwise.
*/
@@ -2372,7 +2369,7 @@ static enum bp_result get_integrated_info_v8(
* bios_parser *bp - [in]BIOS parser handler to get master data table
* integrated_info *info - [out] store and output integrated info
*
- * @return
+ * return:
* enum bp_result - BP_RESULT_OK if information is available,
* BP_RESULT_BADBIOSTABLE otherwise.
*/
@@ -2509,7 +2506,7 @@ static enum bp_result get_integrated_info_v9(
* bios_parser *bp - [in]BIOS parser handler to get master data table
* integrated_info *info - [out] store and output integrated info
*
- * @return
+ * return:
* enum bp_result - BP_RESULT_OK if information is available,
* BP_RESULT_BADBIOSTABLE otherwise.
*/
@@ -2585,7 +2582,7 @@ static struct integrated_info *bios_parser_create_integrated_info(
return NULL;
}
-enum bp_result update_slot_layout_info(
+static enum bp_result update_slot_layout_info(
struct dc_bios *dcb,
unsigned int i,
struct slot_layout_info *slot_layout_info,
@@ -2689,7 +2686,7 @@ enum bp_result update_slot_layout_info(
}
-enum bp_result get_bracket_layout_record(
+static enum bp_result get_bracket_layout_record(
struct dc_bios *dcb,
unsigned int bracket_layout_id,
struct slot_layout_info *slot_layout_info)
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
index 670c26583817..9f9fda3118d1 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
@@ -485,10 +485,11 @@ static struct atom_hpd_int_record *get_hpd_record(
* bios_parser_get_gpio_pin_info
* Get GpioPin information of input gpio id
*
- * @param gpio_id, GPIO ID
- * @param info, GpioPin information structure
- * @return Bios parser result code
- * @note
+ * @dcb: pointer to the DC BIOS
+ * @gpio_id: GPIO ID
+ * @info: GpioPin information structure
+ * return: Bios parser result code
+ * note:
* to get the GPIO PIN INFO, we need:
* 1. get the GPIO_ID from other object table, see GetHPDInfo()
* 2. in DATA_TABLE.GPIO_Pin_LUT, search all records,
@@ -801,11 +802,11 @@ static enum bp_result get_ss_info_v4_2(
* ver 3.1,
* there is only one entry for each signal /ss id. However, there is
* no planning of supporting multiple spread Sprectum entry for EverGreen
- * @param [in] this
- * @param [in] signal, ASSignalType to be converted to info index
- * @param [in] index, number of entries that match the converted info index
- * @param [out] ss_info, sprectrum information structure,
- * @return Bios parser result code
+ * @dcb: pointer to the DC BIOS
+ * @signal: ASSignalType to be converted to info index
+ * @index: number of entries that match the converted info index
+ * @ss_info: sprectrum information structure,
+ * return: Bios parser result code
*/
static enum bp_result bios_parser_get_spread_spectrum_info(
struct dc_bios *dcb,
@@ -1196,13 +1197,11 @@ static bool bios_parser_is_accelerated_mode(
}
/**
- * bios_parser_set_scratch_critical_state
+ * bios_parser_set_scratch_critical_state - update critical state bit
+ * in VBIOS scratch register
*
- * @brief
- * update critical state bit in VBIOS scratch register
- *
- * @param
- * bool - to set or reset state
+ * @dcb: pointer to the DC BIO
+ * @state: set or reset state
*/
static void bios_parser_set_scratch_critical_state(
struct dc_bios *dcb,
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper.c b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper.c
index 48b4ef03fc8f..5b77251e0590 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper.c
@@ -114,18 +114,14 @@ bool dal_cmd_table_helper_controller_id_to_atom(
}
/**
-* translate_transmitter_bp_to_atom
-*
-* @brief
-* Translate the Transmitter to the corresponding ATOM BIOS value
-*
-* @param
-* input transmitter
-* output digitalTransmitter
-* // =00: Digital Transmitter1 ( UNIPHY linkAB )
-* // =01: Digital Transmitter2 ( UNIPHY linkCD )
-* // =02: Digital Transmitter3 ( UNIPHY linkEF )
-*/
+ * translate_transmitter_bp_to_atom - Translate the Transmitter to the
+ * corresponding ATOM BIOS value
+ * @t: transmitter
+ * returns: output digitalTransmitter
+ * // =00: Digital Transmitter1 ( UNIPHY linkAB )
+ * // =01: Digital Transmitter2 ( UNIPHY linkCD )
+ * // =02: Digital Transmitter3 ( UNIPHY linkEF )
+ */
uint8_t dal_cmd_table_helper_transmitter_bp_to_atom(
enum transmitter t)
{
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c
index 7736c92d55c4..455ee2be15a3 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c
@@ -128,18 +128,14 @@ bool dal_cmd_table_helper_controller_id_to_atom2(
}
/**
-* translate_transmitter_bp_to_atom
-*
-* @brief
-* Translate the Transmitter to the corresponding ATOM BIOS value
-*
-* @param
-* input transmitter
-* output digitalTransmitter
-* // =00: Digital Transmitter1 ( UNIPHY linkAB )
-* // =01: Digital Transmitter2 ( UNIPHY linkCD )
-* // =02: Digital Transmitter3 ( UNIPHY linkEF )
-*/
+ * translate_transmitter_bp_to_atom2 - Translate the Transmitter to the
+ * corresponding ATOM BIOS value
+ * @t: transmitter
+ * returns: digitalTransmitter
+ * // =00: Digital Transmitter1 ( UNIPHY linkAB )
+ * // =01: Digital Transmitter2 ( UNIPHY linkCD )
+ * // =02: Digital Transmitter3 ( UNIPHY linkEF )
+ */
uint8_t dal_cmd_table_helper_transmitter_bp_to_atom2(
enum transmitter t)
{
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/Makefile b/drivers/gpu/drm/amd/display/dc/calcs/Makefile
index 64f515d74410..f3c00f479e1c 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/calcs/Makefile
@@ -33,10 +33,6 @@ ifdef CONFIG_PPC64
calcs_ccflags := -mhard-float -maltivec
endif
-ifdef CONFIG_ARM64
-calcs_rcflags := -mgeneral-regs-only
-endif
-
ifdef CONFIG_CC_IS_GCC
ifeq ($(call cc-ifversion, -lt, 0701, y), y)
IS_OLD_GCC = 1
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
index ef41b287cbe2..e633f8a51edb 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
@@ -106,7 +106,6 @@ static void calculate_bandwidth(
bool lpt_enabled;
enum bw_defines sclk_message;
enum bw_defines yclk_message;
- enum bw_defines v_filter_init_mode[maximum_number_of_surfaces];
enum bw_defines tiling_mode[maximum_number_of_surfaces];
enum bw_defines surface_type[maximum_number_of_surfaces];
enum bw_defines voltage;
@@ -792,12 +791,8 @@ static void calculate_bandwidth(
data->v_filter_init[i] = bw_add(data->v_filter_init[i], bw_int_to_fixed(1));
}
if (data->stereo_mode[i] == bw_def_top_bottom) {
- v_filter_init_mode[i] = bw_def_manual;
data->v_filter_init[i] = bw_min2(data->v_filter_init[i], bw_int_to_fixed(4));
}
- else {
- v_filter_init_mode[i] = bw_def_auto;
- }
if (data->stereo_mode[i] == bw_def_top_bottom) {
data->num_lines_at_frame_start = bw_int_to_fixed(1);
}
@@ -2730,7 +2725,7 @@ void bw_calcs_init(struct bw_calcs_dceip *bw_dceip,
}
-/**
+/*
* Compare calculated (required) clocks against the clocks available at
* maximum voltage (max Performance Level).
*/
@@ -3001,13 +2996,12 @@ static bool all_displays_in_sync(const struct pipe_ctx pipe[],
return true;
}
-/**
+/*
* Return:
* true - Display(s) configuration supported.
* In this case 'calcs_output' contains data for HW programming
* false - Display(s) configuration not supported (not enough bandwidth).
*/
-
bool bw_calcs(struct dc_context *ctx,
const struct bw_calcs_dceip *dceip,
const struct bw_calcs_vbios *vbios,
@@ -3028,7 +3022,7 @@ bool bw_calcs(struct dc_context *ctx,
calcs_output->all_displays_in_sync = false;
if (data->number_of_displays != 0) {
- uint8_t yclk_lvl, sclk_lvl;
+ uint8_t yclk_lvl;
struct bw_fixed high_sclk = vbios->high_sclk;
struct bw_fixed mid1_sclk = vbios->mid1_sclk;
struct bw_fixed mid2_sclk = vbios->mid2_sclk;
@@ -3049,7 +3043,6 @@ bool bw_calcs(struct dc_context *ctx,
calculate_bandwidth(dceip, vbios, data);
yclk_lvl = data->y_clk_level;
- sclk_lvl = data->sclk_level;
calcs_output->nbp_state_change_enable =
data->nbp_state_change_enable;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile b/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile
index d59b380e7b7f..ff96bee57bfc 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile
@@ -104,13 +104,6 @@ ifdef CONFIG_PPC64
CFLAGS_$(AMDDALPATH)/dc/clk_mgr/dcn21/rn_clk_mgr.o := $(call cc-option,-mno-gnu-attribute)
endif
-# prevent build errors:
-# ...: '-mgeneral-regs-only' is incompatible with the use of floating-point types
-# this file is unused on arm64, just like on ppc64
-ifdef CONFIG_ARM64
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/clk_mgr/dcn21/rn_clk_mgr.o := -mgeneral-regs-only
-endif
-
AMD_DAL_CLK_MGR_DCN21 = $(addprefix $(AMDDALPATH)/dc/clk_mgr/dcn21/,$(CLK_MGR_DCN21))
AMD_DISPLAY_FILES += $(AMD_DAL_CLK_MGR_DCN21)
@@ -125,13 +118,6 @@ ifdef CONFIG_PPC64
CFLAGS_$(AMDDALPATH)/dc/clk_mgr/dcn30/dcn30_clk_mgr.o := $(call cc-option,-mno-gnu-attribute)
endif
-# prevent build errors:
-# ...: '-mgeneral-regs-only' is incompatible with the use of floating-point types
-# this file is unused on arm64, just like on ppc64
-ifdef CONFIG_ARM64
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/clk_mgr/dcn30/dcn30_clk_mgr.o := -mgeneral-regs-only
-endif
-
AMD_DAL_CLK_MGR_DCN30 = $(addprefix $(AMDDALPATH)/dc/clk_mgr/dcn30/,$(CLK_MGR_DCN30))
AMD_DISPLAY_FILES += $(AMD_DAL_CLK_MGR_DCN30)
@@ -146,13 +132,6 @@ ifdef CONFIG_PPC64
CFLAGS_$(AMDDALPATH)/dc/clk_mgr/dcn301/vg_clk_mgr.o := $(call cc-option,-mno-gnu-attribute)
endif
-# prevent build errors:
-# ...: '-mgeneral-regs-only' is incompatible with the use of floating-point types
-# this file is unused on arm64, just like on ppc64
-ifdef CONFIG_ARM64
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/clk_mgr/dcn301/vg_clk_mgr.o := -mgeneral-regs-only
-endif
-
AMD_DAL_CLK_MGR_DCN301 = $(addprefix $(AMDDALPATH)/dc/clk_mgr/dcn301/,$(CLK_MGR_DCN301))
AMD_DISPLAY_FILES += $(AMD_DAL_CLK_MGR_DCN301)
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
index f2114bc910bf..ec9dc265cde0 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
@@ -257,8 +257,7 @@ void dcn2_update_clocks(struct clk_mgr *clk_mgr_base,
if (update_dppclk || update_dispclk)
dcn20_update_clocks_update_dentist(clk_mgr);
// always update dtos unless clock is lowered and not safe to lower
- if (new_clocks->dppclk_khz >= dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz)
- dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower);
+ dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower);
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
index 5b466f440d67..ab98c259ef69 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
@@ -251,6 +251,7 @@ static void dcn3_update_clocks(struct clk_mgr *clk_mgr_base,
struct dmcu *dmcu = clk_mgr_base->ctx->dc->res_pool->dmcu;
bool force_reset = false;
bool update_uclk = false;
+ bool p_state_change_support;
if (dc->work_arounds.skip_clock_update || !clk_mgr->smu_present)
return;
@@ -291,8 +292,9 @@ static void dcn3_update_clocks(struct clk_mgr *clk_mgr_base,
clk_mgr_base->clks.socclk_khz = new_clocks->socclk_khz;
clk_mgr_base->clks.prev_p_state_change_support = clk_mgr_base->clks.p_state_change_support;
- if (should_update_pstate_support(safe_to_lower, new_clocks->p_state_change_support, clk_mgr_base->clks.p_state_change_support)) {
- clk_mgr_base->clks.p_state_change_support = new_clocks->p_state_change_support;
+ p_state_change_support = new_clocks->p_state_change_support || (display_count == 0);
+ if (should_update_pstate_support(safe_to_lower, p_state_change_support, clk_mgr_base->clks.p_state_change_support)) {
+ clk_mgr_base->clks.p_state_change_support = p_state_change_support;
/* to disable P-State switching, set UCLK min = max */
if (!clk_mgr_base->clks.p_state_change_support)
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/dcn301_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/dcn301_smu.c
index cfa8e02cf103..68942bbc7472 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/dcn301_smu.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/dcn301_smu.c
@@ -103,7 +103,7 @@ int dcn301_smu_send_msg_with_param(
/* Trigger the message transaction by writing the message ID */
REG_WRITE(MP1_SMN_C2PMSG_67, msg_id);
- result = dcn301_smu_wait_for_response(clk_mgr, 10, 1000);
+ result = dcn301_smu_wait_for_response(clk_mgr, 10, 200000);
ASSERT(result == VBIOSSMC_Result_OK);
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 58eb0d69873a..a667480cc3a3 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -284,20 +284,16 @@ static void dc_perf_trace_destroy(struct dc_perf_trace **perf_trace)
}
/**
- *****************************************************************************
- * Function: dc_stream_adjust_vmin_vmax
+ * dc_stream_adjust_vmin_vmax:
*
- * @brief
- * Looks up the pipe context of dc_stream_state and updates the
- * vertical_total_min and vertical_total_max of the DRR, Dynamic Refresh
- * Rate, which is a power-saving feature that targets reducing panel
- * refresh rate while the screen is static
+ * Looks up the pipe context of dc_stream_state and updates the
+ * vertical_total_min and vertical_total_max of the DRR, Dynamic Refresh
+ * Rate, which is a power-saving feature that targets reducing panel
+ * refresh rate while the screen is static
*
- * @param [in] dc: dc reference
- * @param [in] stream: Initial dc stream state
- * @param [in] adjust: Updated parameters for vertical_total_min and
- * vertical_total_max
- *****************************************************************************
+ * @dc: dc reference
+ * @stream: Initial dc stream state
+ * @adjust: Updated parameters for vertical_total_min and vertical_total_max
*/
bool dc_stream_adjust_vmin_vmax(struct dc *dc,
struct dc_stream_state *stream,
@@ -355,6 +351,7 @@ bool dc_stream_get_crtc_position(struct dc *dc,
* @dc: DC Object
* @stream: The stream to configure CRC on.
* @enable: Enable CRC if true, disable otherwise.
+ * @crc_window: CRC window (x/y start/end) information
* @continuous: Capture CRC on every frame if true. Otherwise, only capture
* once.
*
@@ -420,7 +417,9 @@ bool dc_stream_configure_crc(struct dc *dc, struct dc_stream_state *stream,
* dc_stream_get_crc() - Get CRC values for the given stream.
* @dc: DC object
* @stream: The DC stream state of the stream to get CRCs from.
- * @r_cr, g_y, b_cb: CRC values for the three channels are stored here.
+ * @r_cr: CRC value for the first of the 3 channels stored here.
+ * @g_y: CRC value for the second of the 3 channels stored here.
+ * @b_cb: CRC value for the third of the 3 channels stored here.
*
* dc_stream_configure_crc needs to be called beforehand to enable CRCs.
* Return false if stream is not found, or if CRCs are not enabled.
@@ -803,7 +802,8 @@ static void disable_all_writeback_pipes_for_stream(
stream->writeback_info[i].wb_enabled = false;
}
-void apply_ctx_interdependent_lock(struct dc *dc, struct dc_state *context, struct dc_stream_state *stream, bool lock)
+static void apply_ctx_interdependent_lock(struct dc *dc, struct dc_state *context,
+ struct dc_stream_state *stream, bool lock)
{
int i = 0;
@@ -964,19 +964,15 @@ struct dc *dc_create(const struct dc_init_data *init_params)
struct dc *dc = kzalloc(sizeof(*dc), GFP_KERNEL);
unsigned int full_pipe_count;
- if (NULL == dc)
- goto alloc_fail;
+ if (!dc)
+ return NULL;
if (init_params->dce_environment == DCE_ENV_VIRTUAL_HW) {
- if (false == dc_construct_ctx(dc, init_params)) {
- dc_destruct(dc);
- goto construct_fail;
- }
+ if (!dc_construct_ctx(dc, init_params))
+ goto destruct_dc;
} else {
- if (false == dc_construct(dc, init_params)) {
- dc_destruct(dc);
- goto construct_fail;
- }
+ if (!dc_construct(dc, init_params))
+ goto destruct_dc;
full_pipe_count = dc->res_pool->pipe_count;
if (dc->res_pool->underlay_pipe_index != NO_UNDERLAY_PIPE)
@@ -1007,10 +1003,9 @@ struct dc *dc_create(const struct dc_init_data *init_params)
return dc;
-construct_fail:
+destruct_dc:
+ dc_destruct(dc);
kfree(dc);
-
-alloc_fail:
return NULL;
}
@@ -1493,7 +1488,7 @@ bool dc_commit_state(struct dc *dc, struct dc_state *context)
enum dc_status result = DC_ERROR_UNEXPECTED;
int i;
- if (false == context_changed(dc, context))
+ if (!context_changed(dc, context))
return DC_OK;
DC_LOG_DC("%s: %d streams\n",
@@ -1540,7 +1535,7 @@ bool dc_acquire_release_mpc_3dlut(
if (found_pipe_idx) {
if (acquire && pool->funcs->acquire_post_bldn_3dlut)
ret = pool->funcs->acquire_post_bldn_3dlut(res_ctx, pool, mpcc_id, lut, shaper);
- else if (acquire == false && pool->funcs->release_post_bldn_3dlut)
+ else if (!acquire && pool->funcs->release_post_bldn_3dlut)
ret = pool->funcs->release_post_bldn_3dlut(res_ctx, pool, lut, shaper);
}
}
@@ -2016,7 +2011,7 @@ static enum surface_update_type check_update_surfaces_for_stream(
return overall_type;
}
-/**
+/*
* dc_check_update_surfaces_for_stream() - Determine update type (fast, med, or full)
*
* See :c:type:`enum surface_update_type <surface_update_type>` for explanation of update types
@@ -2270,6 +2265,9 @@ static void copy_stream_update_to_stream(struct dc *dc,
if (update->dither_option)
stream->dither_option = *update->dither_option;
+
+ if (update->pending_test_pattern)
+ stream->test_pattern = *update->pending_test_pattern;
/* update current stream with writeback info */
if (update->wb_update) {
int i;
@@ -2366,6 +2364,15 @@ static void commit_planes_do_stream_update(struct dc *dc,
}
}
+ if (stream_update->pending_test_pattern) {
+ dc_link_dp_set_test_pattern(stream->link,
+ stream->test_pattern.type,
+ stream->test_pattern.color_space,
+ stream->test_pattern.p_link_settings,
+ stream->test_pattern.p_custom_pattern,
+ stream->test_pattern.cust_pattern_size);
+ }
+
/* Full fe update*/
if (update_type == UPDATE_TYPE_FAST)
continue;
@@ -2819,7 +2826,7 @@ enum dc_irq_source dc_interrupt_to_irq_source(
return dal_irq_service_to_irq_source(dc->res_pool->irqs, src_id, ext_id);
}
-/**
+/*
* dc_interrupt_set() - Enable/disable an AMD hw interrupt source
*/
bool dc_interrupt_set(struct dc *dc, enum dc_irq_source src, bool enable)
@@ -2953,7 +2960,7 @@ static bool link_add_remote_sink_helper(struct dc_link *dc_link, struct dc_sink
return true;
}
-/**
+/*
* dc_link_add_remote_sink() - Create a sink and attach it to an existing link
*
* EDID length is in bytes
@@ -3016,7 +3023,7 @@ fail_add_sink:
return NULL;
}
-/**
+/*
* dc_link_remove_remote_sink() - Remove a remote sink from a dc_link
*
* Note that this just removes the struct dc_sink - it doesn't
@@ -3143,9 +3150,11 @@ void dc_lock_memory_clock_frequency(struct dc *dc)
core_link_enable_stream(dc->current_state, &dc->current_state->res_ctx.pipe_ctx[i]);
}
-bool dc_is_plane_eligible_for_idle_optimizaitons(struct dc *dc,
- struct dc_plane_state *plane)
+bool dc_is_plane_eligible_for_idle_optimizaitons(struct dc *dc, struct dc_plane_state *plane)
{
+ if (dc->hwss.does_plane_fit_in_mall && dc->hwss.does_plane_fit_in_mall(dc, plane))
+ return true;
+
return false;
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index 9e1071b2181f..9885ef21cc05 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -203,9 +203,21 @@ static bool program_hpd_filter(const struct dc_link *link)
return result;
}
+bool dc_link_wait_for_t12(struct dc_link *link)
+{
+ if (link->connector_signal == SIGNAL_TYPE_EDP && link->dc->hwss.edp_wait_for_T12) {
+ link->dc->hwss.edp_wait_for_T12(link);
+
+ return true;
+ }
+
+ return false;
+}
+
/**
* dc_link_detect_sink() - Determine if there is a sink connected
*
+ * @link: pointer to the dc link
* @type: Returned connection type
* Does not detect downstream devices, such as MST sinks
* or display connected through active dongles
@@ -342,7 +354,7 @@ static enum signal_type get_basic_signal_type(struct graphics_object_id encoder,
return SIGNAL_TYPE_NONE;
}
-/**
+/*
* dc_link_is_dp_sink_present() - Check if there is a native DP
* or passive DP-HDMI dongle connected
*/
@@ -596,8 +608,6 @@ static void query_hdcp_capability(enum signal_type signal, struct dc_link *link)
dc_process_hdcp_msg(signal, link, &msg22);
if (signal == SIGNAL_TYPE_DISPLAY_PORT || signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
- enum hdcp_message_status status = HDCP_MESSAGE_UNSUPPORTED;
-
msg14.data = &link->hdcp_caps.bcaps.raw;
msg14.length = sizeof(link->hdcp_caps.bcaps.raw);
msg14.msg_id = HDCP_MESSAGE_ID_READ_BCAPS;
@@ -605,7 +615,7 @@ static void query_hdcp_capability(enum signal_type signal, struct dc_link *link)
msg14.link = HDCP_LINK_PRIMARY;
msg14.max_retries = 5;
- status = dc_process_hdcp_msg(signal, link, &msg14);
+ dc_process_hdcp_msg(signal, link, &msg14);
}
}
@@ -830,7 +840,7 @@ static bool wait_for_entering_dp_alt_mode(struct dc_link *link)
return false;
}
-/**
+/*
* dc_link_detect() - Detect if a sink is attached to a given link
*
* link->local_sink is created or destroyed as needed.
@@ -1065,9 +1075,6 @@ static bool dc_link_detect_helper(struct dc_link *link,
break;
}
- if (link->local_sink->edid_caps.panel_patch.disable_fec)
- link->ctx->dc->debug.disable_fec = true;
-
// Check if edid is the same
if ((prev_sink) &&
(edid_status == EDID_THE_SAME || edid_status == EDID_OK))
@@ -1366,13 +1373,17 @@ static bool dc_link_construct(struct dc_link *link,
struct dc_context *dc_ctx = init_params->ctx;
struct encoder_init_data enc_init_data = { 0 };
struct panel_cntl_init_data panel_cntl_init_data = { 0 };
- struct integrated_info info = {{{ 0 }}};
+ struct integrated_info *info;
struct dc_bios *bios = init_params->dc->ctx->dc_bios;
const struct dc_vbios_funcs *bp_funcs = bios->funcs;
struct bp_disp_connector_caps_info disp_connect_caps_info = { 0 };
DC_LOGGER_INIT(dc_ctx->logger);
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info)
+ goto create_fail;
+
link->irq_source_hpd = DC_IRQ_SOURCE_INVALID;
link->irq_source_hpd_rx = DC_IRQ_SOURCE_INVALID;
@@ -1534,12 +1545,12 @@ static bool dc_link_construct(struct dc_link *link,
}
if (bios->integrated_info)
- info = *bios->integrated_info;
+ memcpy(info, bios->integrated_info, sizeof(*info));
/* Look for channel mapping corresponding to connector and device tag */
for (i = 0; i < MAX_NUMBER_OF_EXT_DISPLAY_PATH; i++) {
struct external_display_path *path =
- &info.ext_disp_conn_info.path[i];
+ &info->ext_disp_conn_info.path[i];
if (path->device_connector_id.enum_id == link->link_id.enum_id &&
path->device_connector_id.id == link->link_id.id &&
@@ -1586,6 +1597,8 @@ create_fail:
link->hpd_gpio = NULL;
}
+ kfree(info);
+
return false;
}
@@ -2487,9 +2500,14 @@ enum dc_status dc_link_validate_mode_timing(
static struct abm *get_abm_from_stream_res(const struct dc_link *link)
{
int i;
- struct dc *dc = link->ctx->dc;
+ struct dc *dc = NULL;
struct abm *abm = NULL;
+ if (!link || !link->ctx)
+ return NULL;
+
+ dc = link->ctx->dc;
+
for (i = 0; i < MAX_PIPES; i++) {
struct pipe_ctx pipe_ctx = dc->current_state->res_ctx.pipe_ctx[i];
struct dc_stream_state *stream = pipe_ctx.stream;
@@ -3391,10 +3409,7 @@ void core_link_set_avmute(struct pipe_ctx *pipe_ctx, bool enable)
}
/**
- *****************************************************************************
- * Function: dc_link_enable_hpd_filter
- *
- * @brief
+ * dc_link_enable_hpd_filter:
* If enable is true, programs HPD filter on associated HPD line using
* delay_on_disconnect/delay_on_connect values dependent on
* link->connector_signal
@@ -3402,9 +3417,8 @@ void core_link_set_avmute(struct pipe_ctx *pipe_ctx, bool enable)
* If enable is false, programs HPD filter on associated HPD line with no
* delays on connect or disconnect
*
- * @param [in] link: pointer to the dc link
- * @param [in] enable: boolean specifying whether to enable hbd
- *****************************************************************************
+ * @link: pointer to the dc link
+ * @enable: boolean specifying whether to enable hbd
*/
void dc_link_enable_hpd_filter(struct dc_link *link, bool enable)
{
@@ -3630,7 +3644,7 @@ uint32_t dc_link_bandwidth_kbps(
link_bw_kbps *= 8; /* 8 bits per byte*/
link_bw_kbps *= link_setting->lane_count;
- if (dc_link_is_fec_supported(link) && !link->dc->debug.disable_fec) {
+ if (dc_link_should_enable_fec(link)) {
/* Account for FEC overhead.
* We have to do it based on caps,
* and not based on FEC being set ready,
@@ -3682,3 +3696,18 @@ bool dc_link_is_fec_supported(const struct dc_link *link)
!IS_FPGA_MAXIMUS_DC(link->ctx->dce_environment));
}
+bool dc_link_should_enable_fec(const struct dc_link *link)
+{
+ bool is_fec_disable = false;
+ bool ret = false;
+
+ if (link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT_MST &&
+ link->local_sink &&
+ link->local_sink->edid_caps.panel_patch.disable_fec)
+ is_fec_disable = true;
+
+ if (dc_link_is_fec_supported(link) && !link->dc->debug.disable_fec && !is_fec_disable)
+ ret = true;
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index 2fc12239b22c..4149b8771462 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -2399,6 +2399,9 @@ static bool decide_dp_link_settings(struct dc_link *link, struct dc_link_setting
initial_link_setting;
uint32_t link_bw;
+ if (req_bw > dc_link_bandwidth_kbps(link, &link->verified_link_cap))
+ return false;
+
/* search for the minimum link setting that:
* 1. is supported according to the link training result
* 2. could support the b/w requested by the timing
@@ -3045,14 +3048,14 @@ bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i];
if (pipe_ctx && pipe_ctx->stream && !pipe_ctx->stream->dpms_off &&
- pipe_ctx->stream->link == link)
+ pipe_ctx->stream->link == link && !pipe_ctx->prev_odm_pipe)
core_link_disable_stream(pipe_ctx);
}
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i];
if (pipe_ctx && pipe_ctx->stream && !pipe_ctx->stream->dpms_off &&
- pipe_ctx->stream->link == link)
+ pipe_ctx->stream->link == link && !pipe_ctx->prev_odm_pipe)
core_link_enable_stream(link->dc->current_state, pipe_ctx);
}
@@ -3707,7 +3710,7 @@ bool detect_dp_sink_caps(struct dc_link *link)
/* TODO save sink caps in link->sink */
}
-enum dc_link_rate linkRateInKHzToLinkRateMultiplier(uint32_t link_rate_in_khz)
+static enum dc_link_rate linkRateInKHzToLinkRateMultiplier(uint32_t link_rate_in_khz)
{
enum dc_link_rate link_rate;
// LinkRate is normally stored as a multiplier of 0.27 Gbps per lane. Do the translation.
@@ -3992,7 +3995,7 @@ bool dc_link_dp_set_test_pattern(
unsigned int cust_pattern_size)
{
struct pipe_ctx *pipes = link->dc->current_state->res_ctx.pipe_ctx;
- struct pipe_ctx *pipe_ctx = &pipes[0];
+ struct pipe_ctx *pipe_ctx = NULL;
unsigned int lane;
unsigned int i;
unsigned char link_qual_pattern[LANE_COUNT_DP_MAX] = {0};
@@ -4002,12 +4005,18 @@ bool dc_link_dp_set_test_pattern(
memset(&training_pattern, 0, sizeof(training_pattern));
for (i = 0; i < MAX_PIPES; i++) {
+ if (pipes[i].stream == NULL)
+ continue;
+
if (pipes[i].stream->link == link && !pipes[i].top_pipe && !pipes[i].prev_odm_pipe) {
pipe_ctx = &pipes[i];
break;
}
}
+ if (pipe_ctx == NULL)
+ return false;
+
/* Reset CRTC Test Pattern if it is currently running and request is VideoMode */
if (link->test_pattern_enabled && test_pattern ==
DP_TEST_PATTERN_VIDEO_MODE) {
@@ -4339,7 +4348,7 @@ void dp_set_fec_ready(struct dc_link *link, bool ready)
struct link_encoder *link_enc = link->link_enc;
uint8_t fec_config = 0;
- if (!dc_link_is_fec_supported(link) || link->dc->debug.disable_fec)
+ if (!dc_link_should_enable_fec(link))
return;
if (link_enc->funcs->fec_set_ready &&
@@ -4374,7 +4383,7 @@ void dp_set_fec_enable(struct dc_link *link, bool enable)
{
struct link_encoder *link_enc = link->link_enc;
- if (!dc_link_is_fec_supported(link) || link->dc->debug.disable_fec)
+ if (!dc_link_should_enable_fec(link))
return;
if (link_enc->funcs->fec_set_enable &&
@@ -4400,25 +4409,40 @@ void dp_set_fec_enable(struct dc_link *link, bool enable)
void dpcd_set_source_specific_data(struct dc_link *link)
{
if (!link->dc->vendor_signature.is_valid) {
- enum dc_status result_write_min_hblank = DC_NOT_SUPPORTED;
- struct dpcd_amd_signature amd_signature;
- amd_signature.AMD_IEEE_TxSignature_byte1 = 0x0;
- amd_signature.AMD_IEEE_TxSignature_byte2 = 0x0;
- amd_signature.AMD_IEEE_TxSignature_byte3 = 0x1A;
- amd_signature.device_id_byte1 =
+ enum dc_status __maybe_unused result_write_min_hblank = DC_NOT_SUPPORTED;
+ struct dpcd_amd_signature amd_signature = {0};
+ struct dpcd_amd_device_id amd_device_id = {0};
+
+ amd_device_id.device_id_byte1 =
(uint8_t)(link->ctx->asic_id.chip_id);
- amd_signature.device_id_byte2 =
+ amd_device_id.device_id_byte2 =
(uint8_t)(link->ctx->asic_id.chip_id >> 8);
- memset(&amd_signature.zero, 0, 4);
- amd_signature.dce_version =
+ amd_device_id.dce_version =
(uint8_t)(link->ctx->dce_version);
- amd_signature.dal_version_byte1 = 0x0; // needed? where to get?
- amd_signature.dal_version_byte2 = 0x0; // needed? where to get?
+ amd_device_id.dal_version_byte1 = 0x0; // needed? where to get?
+ amd_device_id.dal_version_byte2 = 0x0; // needed? where to get?
- core_link_write_dpcd(link, DP_SOURCE_OUI,
+ core_link_read_dpcd(link, DP_SOURCE_OUI,
(uint8_t *)(&amd_signature),
sizeof(amd_signature));
+ if (!((amd_signature.AMD_IEEE_TxSignature_byte1 == 0x0) &&
+ (amd_signature.AMD_IEEE_TxSignature_byte2 == 0x0) &&
+ (amd_signature.AMD_IEEE_TxSignature_byte3 == 0x1A))) {
+
+ amd_signature.AMD_IEEE_TxSignature_byte1 = 0x0;
+ amd_signature.AMD_IEEE_TxSignature_byte2 = 0x0;
+ amd_signature.AMD_IEEE_TxSignature_byte3 = 0x1A;
+
+ core_link_write_dpcd(link, DP_SOURCE_OUI,
+ (uint8_t *)(&amd_signature),
+ sizeof(amd_signature));
+ }
+
+ core_link_write_dpcd(link, DP_SOURCE_OUI+0x03,
+ (uint8_t *)(&amd_device_id),
+ sizeof(amd_device_id));
+
if (link->ctx->dce_version >= DCN_VERSION_2_0 &&
link->dc->caps.min_horizontal_blanking_period != 0) {
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index 07c22556480b..68b65a090d17 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -1117,7 +1117,7 @@ static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx)
* We also need to make sure pipe_ctx->plane_res.scl_data.h_active uses the
* original h_border_left value in its calculation.
*/
-int shift_border_left_to_dst(struct pipe_ctx *pipe_ctx)
+static int shift_border_left_to_dst(struct pipe_ctx *pipe_ctx)
{
int store_h_border_left = pipe_ctx->stream->timing.h_border_left;
@@ -1128,8 +1128,8 @@ int shift_border_left_to_dst(struct pipe_ctx *pipe_ctx)
return store_h_border_left;
}
-void restore_border_left_from_dst(struct pipe_ctx *pipe_ctx,
- int store_h_border_left)
+static void restore_border_left_from_dst(struct pipe_ctx *pipe_ctx,
+ int store_h_border_left)
{
pipe_ctx->stream->dst.x -= store_h_border_left;
pipe_ctx->stream->timing.h_border_left = store_h_border_left;
@@ -1697,7 +1697,7 @@ static bool are_stream_backends_same(
return true;
}
-/**
+/*
* dc_is_stream_unchanged() - Compare two stream states for equivalence.
*
* Checks if there a difference between the two states
@@ -1718,7 +1718,7 @@ bool dc_is_stream_unchanged(
return true;
}
-/**
+/*
* dc_is_stream_scaling_unchanged() - Compare scaling rectangles of two streams.
*/
bool dc_is_stream_scaling_unchanged(
@@ -1833,7 +1833,7 @@ static struct audio *find_first_free_audio(
return 0;
}
-/**
+/*
* dc_add_stream_to_ctx() - Add a new dc_stream_state to a dc_state.
*/
enum dc_status dc_add_stream_to_ctx(
@@ -1860,7 +1860,7 @@ enum dc_status dc_add_stream_to_ctx(
return res;
}
-/**
+/*
* dc_remove_stream_from_ctx() - Remove a stream from a dc_state.
*/
enum dc_status dc_remove_stream_from_ctx(
@@ -2075,6 +2075,20 @@ static int acquire_resource_from_hw_enabled_state(
return -1;
}
+static void mark_seamless_boot_stream(
+ const struct dc *dc,
+ struct dc_stream_state *stream)
+{
+ struct dc_bios *dcb = dc->ctx->dc_bios;
+
+ /* TODO: Check Linux */
+ if (dc->config.allow_seamless_boot_optimization &&
+ !dcb->funcs->is_accelerated_mode(dcb)) {
+ if (dc_validate_seamless_boot_timing(dc, stream->sink, &stream->timing))
+ stream->apply_seamless_boot_optimization = true;
+ }
+}
+
enum dc_status resource_map_pool_resources(
const struct dc *dc,
struct dc_state *context,
@@ -2085,22 +2099,20 @@ enum dc_status resource_map_pool_resources(
struct dc_context *dc_ctx = dc->ctx;
struct pipe_ctx *pipe_ctx = NULL;
int pipe_idx = -1;
- struct dc_bios *dcb = dc->ctx->dc_bios;
calculate_phy_pix_clks(stream);
- /* TODO: Check Linux */
- if (dc->config.allow_seamless_boot_optimization &&
- !dcb->funcs->is_accelerated_mode(dcb)) {
- if (dc_validate_seamless_boot_timing(dc, stream->sink, &stream->timing))
- stream->apply_seamless_boot_optimization = true;
- }
+ mark_seamless_boot_stream(dc, stream);
- if (stream->apply_seamless_boot_optimization)
+ if (stream->apply_seamless_boot_optimization) {
pipe_idx = acquire_resource_from_hw_enabled_state(
&context->res_ctx,
pool,
stream);
+ if (pipe_idx < 0)
+ /* hw resource was assigned to other stream */
+ stream->apply_seamless_boot_optimization = false;
+ }
if (pipe_idx < 0)
/* acquire new resources */
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
index c103f858375d..25fa712a7847 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
@@ -244,7 +244,7 @@ struct dc_stream_status *dc_stream_get_status(
}
#ifndef TRIM_FSFT
-/**
+/*
* dc_optimize_timing_for_fsft() - dc to optimize timing
*/
bool dc_optimize_timing_for_fsft(
@@ -260,8 +260,7 @@ bool dc_optimize_timing_for_fsft(
}
#endif
-
-/**
+/*
* dc_stream_set_cursor_attributes() - Update cursor attributes and set cursor surface address
*/
bool dc_stream_set_cursor_attributes(
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
index 3d7d27435f15..e6b9c6a71841 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
@@ -115,7 +115,7 @@ struct dc_plane_state *dc_create_plane_state(struct dc *dc)
return plane_state;
}
-/**
+/*
*****************************************************************************
* Function: dc_plane_get_status
*
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index 3aedadb34548..f3ba02cc85d2 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -42,7 +42,7 @@
#include "inc/hw/dmcu.h"
#include "dml/display_mode_lib.h"
-#define DC_VER "3.2.116"
+#define DC_VER "3.2.118"
#define MAX_SURFACES 3
#define MAX_PLANES 6
@@ -171,6 +171,9 @@ struct dc_caps {
bool dmcub_support;
uint32_t num_of_internal_disp;
enum dp_protocol_version max_dp_protocol_version;
+ unsigned int mall_size_per_mem_channel;
+ unsigned int mall_size_total;
+ unsigned int cursor_cache_size;
struct dc_plane_cap planes[MAX_PLANES];
struct dc_color_caps color;
};
@@ -481,7 +484,6 @@ struct dc_debug_options {
bool performance_trace;
bool az_endpoint_mute_only;
bool always_use_regamma;
- bool p010_mpo_support;
bool recovery_enabled;
bool avoid_vbios_exec_table;
bool scl_reset_length10;
@@ -499,6 +501,7 @@ struct dc_debug_options {
bool dmcub_emulation;
#if defined(CONFIG_DRM_AMD_DC_DCN)
bool disable_idle_power_optimizations;
+ unsigned int mall_size_override;
#endif
bool dmub_command_table; /* for testing only */
struct dc_bw_validation_profile bw_val_profile;
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
index 80a2191a3115..cc6fb838420e 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
@@ -451,6 +451,9 @@ struct dpcd_amd_signature {
uint8_t AMD_IEEE_TxSignature_byte1;
uint8_t AMD_IEEE_TxSignature_byte2;
uint8_t AMD_IEEE_TxSignature_byte3;
+};
+
+struct dpcd_amd_device_id {
uint8_t device_id_byte1;
uint8_t device_id_byte2;
uint8_t zero[4];
diff --git a/drivers/gpu/drm/amd/display/dc/dc_helper.c b/drivers/gpu/drm/amd/display/dc/dc_helper.c
index 57edb25fc381..a612ba6dc389 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_helper.c
@@ -34,6 +34,7 @@
#include "dc.h"
#include "dc_dmub_srv.h"
+#include "reg_helper.h"
static inline void submit_dmub_read_modify_write(
struct dc_reg_helper_state *offload,
diff --git a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
index 701aa7178a89..b41e6367b15e 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
@@ -71,6 +71,7 @@ struct dc_plane_address {
union {
struct{
PHYSICAL_ADDRESS_LOC addr;
+ PHYSICAL_ADDRESS_LOC cursor_cache_addr;
PHYSICAL_ADDRESS_LOC meta_addr;
union large_integer dcc_const_color;
} grph;
diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h
index 6d9a60c9dcc0..d5d8f0ad9233 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_link.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_link.h
@@ -259,6 +259,13 @@ enum dc_status dc_link_reallocate_mst_payload(struct dc_link *link);
bool dc_link_handle_hpd_rx_irq(struct dc_link *dc_link,
union hpd_irq_data *hpd_irq_dpcd_data, bool *out_link_loss);
+/*
+ * On eDP links this function call will stall until T12 has elapsed.
+ * If the panel is not in power off state, this function will return
+ * immediately.
+ */
+bool dc_link_wait_for_t12(struct dc_link *link);
+
enum dc_status read_hpd_rx_irq_data(
struct dc_link *link,
union hpd_irq_data *irq_data);
@@ -369,5 +376,6 @@ uint32_t dc_bandwidth_in_kbps_from_timing(
const struct dc_crtc_timing *timing);
bool dc_link_is_fec_supported(const struct dc_link *link);
+bool dc_link_should_enable_fec(const struct dc_link *link);
#endif /* DC_LINK_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
index b7910976b81a..80b67b860091 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
@@ -130,6 +130,14 @@ union stream_update_flags {
uint32_t raw;
};
+struct test_pattern {
+ enum dp_test_pattern type;
+ enum dp_test_pattern_color_space color_space;
+ struct link_training_settings const *p_link_settings;
+ unsigned char const *p_custom_pattern;
+ unsigned int cust_pattern_size;
+};
+
struct dc_stream_state {
// sink is deprecated, new code should not reference
// this pointer
@@ -227,6 +235,8 @@ struct dc_stream_state {
uint32_t stream_id;
bool is_dsc_enabled;
+
+ struct test_pattern test_pattern;
union stream_update_flags update_flags;
};
@@ -261,6 +271,7 @@ struct dc_stream_update {
struct dc_dsc_config *dsc_config;
struct dc_transfer_func *func_shaper;
struct dc_3dlut *lut3d_func;
+ struct test_pattern *pending_test_pattern;
};
bool dc_is_stream_unchanged(
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
index 2a2a0fdb9253..7866cf2a668f 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
@@ -868,7 +868,7 @@ void dce_aud_wall_dto_setup(
}
#if defined(CONFIG_DRM_AMD_DC_SI)
-void dce60_aud_wall_dto_setup(
+static void dce60_aud_wall_dto_setup(
struct audio *audio,
enum signal_type signal,
const struct audio_crtc_info *crtc_info,
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
index cda5fd0464bc..d51b5fe91287 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
@@ -388,12 +388,6 @@ static enum aux_channel_operation_result get_channel_status(
}
}
-enum i2caux_engine_type get_engine_type(
- const struct dce_aux *engine)
-{
- return I2CAUX_ENGINE_TYPE_AUX;
-}
-
static bool acquire(
struct dce_aux *engine,
struct ddc *ddc)
@@ -582,7 +576,7 @@ int dce_aux_transfer_raw(struct ddc_service *ddc,
*operation_result = get_channel_status(aux_engine, &returned_bytes);
if (*operation_result == AUX_CHANNEL_OPERATION_SUCCEEDED) {
- int bytes_replied = 0;
+ int __maybe_unused bytes_replied = 0;
bytes_replied = read_channel_reply(aux_engine, payload->length,
payload->data, payload->reply,
&status);
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h
index 382465862f29..277484cf853e 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h
@@ -124,7 +124,6 @@ struct dce110_aux_registers {
AUX_SF(AUX_SW_CONTROL, AUX_SW_GO, mask_sh),\
AUX_SF(AUX_SW_DATA, AUX_SW_AUTOINCREMENT_DISABLE, mask_sh),\
AUX_SF(AUX_SW_DATA, AUX_SW_DATA_RW, mask_sh),\
- AUX_SF(AUX_SW_DATA, AUX_SW_AUTOINCREMENT_DISABLE, mask_sh),\
AUX_SF(AUX_SW_DATA, AUX_SW_INDEX, mask_sh),\
AUX_SF(AUX_SW_DATA, AUX_SW_DATA, mask_sh),\
AUX_SF(AUX_SW_STATUS, AUX_SW_REPLY_BYTE_COUNT, mask_sh),\
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
index fb733f573715..10938a8c9500 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
@@ -113,20 +113,19 @@ static const struct spread_spectrum_data *get_ss_data_entry(
}
/**
- * Function: calculate_fb_and_fractional_fb_divider
+ * calculate_fb_and_fractional_fb_divider - Calculates feedback and fractional
+ * feedback dividers values
*
- * * DESCRIPTION: Calculates feedback and fractional feedback dividers values
+ * @calc_pll_cs: Pointer to clock source information
+ * @target_pix_clk_100hz: Desired frequency in 100 Hz
+ * @ref_divider: Reference divider (already known)
+ * @post_divider: Post Divider (already known)
+ * @feedback_divider_param: Pointer where to store
+ * calculated feedback divider value
+ * @fract_feedback_divider_param: Pointer where to store
+ * calculated fract feedback divider value
*
- *PARAMETERS:
- * targetPixelClock Desired frequency in 100 Hz
- * ref_divider Reference divider (already known)
- * postDivider Post Divider (already known)
- * feedback_divider_param Pointer where to store
- * calculated feedback divider value
- * fract_feedback_divider_param Pointer where to store
- * calculated fract feedback divider value
- *
- *RETURNS:
+ * return:
* It fills the locations pointed by feedback_divider_param
* and fract_feedback_divider_param
* It returns - true if feedback divider not 0
@@ -175,22 +174,22 @@ static bool calculate_fb_and_fractional_fb_divider(
}
/**
-*calc_fb_divider_checking_tolerance
-*
-*DESCRIPTION: Calculates Feedback and Fractional Feedback divider values
-* for passed Reference and Post divider, checking for tolerance.
-*PARAMETERS:
-* pll_settings Pointer to structure
-* ref_divider Reference divider (already known)
-* postDivider Post Divider (already known)
-* tolerance Tolerance for Calculated Pixel Clock to be within
-*
-*RETURNS:
-* It fills the PLLSettings structure with PLL Dividers values
-* if calculated values are within required tolerance
-* It returns - true if error is within tolerance
-* - false if error is not within tolerance
-*/
+ * calc_fb_divider_checking_tolerance - Calculates Feedback and
+ * Fractional Feedback divider values
+ * for passed Reference and Post divider,
+ * checking for tolerance.
+ * @calc_pll_cs: Pointer to clock source information
+ * @pll_settings: Pointer to PLL settings
+ * @ref_divider: Reference divider (already known)
+ * @post_divider: Post Divider (already known)
+ * @tolerance: Tolerance for Calculated Pixel Clock to be within
+ *
+ * return:
+ * It fills the PLLSettings structure with PLL Dividers values
+ * if calculated values are within required tolerance
+ * It returns - true if error is within tolerance
+ * - false if error is not within tolerance
+ */
static bool calc_fb_divider_checking_tolerance(
struct calc_pll_clock_source *calc_pll_cs,
struct pll_settings *pll_settings,
@@ -460,7 +459,7 @@ static bool pll_adjust_pix_clk(
return false;
}
-/**
+/*
* Calculate PLL Dividers for given Clock Value.
* First will call VBIOS Adjust Exec table to check if requested Pixel clock
* will be Adjusted based on usage.
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
index f3ed8b619caf..30264fc151a2 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
@@ -65,13 +65,17 @@
//Register access policy version
#define mmMP0_SMN_C2PMSG_91 0x1609B
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+static const uint32_t abm_gain_stepsize = 0x0060;
+#endif
+
static bool dce_dmcu_init(struct dmcu *dmcu)
{
// Do nothing
return true;
}
-bool dce_dmcu_load_iram(struct dmcu *dmcu,
+static bool dce_dmcu_load_iram(struct dmcu *dmcu,
unsigned int start_offset,
const char *src,
unsigned int bytes)
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h
index 93e7f34d4775..cefb7f5bf42c 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h
@@ -317,6 +317,4 @@ struct dmcu *dcn21_dmcu_create(
void dce_dmcu_destroy(struct dmcu **dmcu);
-static const uint32_t abm_gain_stepsize = 0x0060;
-
#endif /* _DCE_ABM_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c
index 7fbd92fbc63a..a524f471e0d7 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c
@@ -435,7 +435,7 @@ struct dce_i2c_hw *acquire_i2c_hw_engine(
return dce_i2c_hw;
}
-enum i2c_channel_operation_result dce_i2c_hw_engine_wait_on_operation_result(
+static enum i2c_channel_operation_result dce_i2c_hw_engine_wait_on_operation_result(
struct dce_i2c_hw *dce_i2c_hw,
uint32_t timeout,
enum i2c_channel_operation_result expected_result)
@@ -502,7 +502,7 @@ static uint32_t get_transaction_timeout_hw(
return period_timeout * num_of_clock_stretches;
}
-bool dce_i2c_hw_engine_submit_payload(
+static bool dce_i2c_hw_engine_submit_payload(
struct dce_i2c_hw *dce_i2c_hw,
struct i2c_payload *payload,
bool middle_of_transaction,
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c
index 87d8428df6c4..6846afd83701 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c
@@ -339,7 +339,7 @@ static bool start_sync_sw(
return false;
}
-void dce_i2c_sw_engine_set_speed(
+static void dce_i2c_sw_engine_set_speed(
struct dce_i2c_sw *engine,
uint32_t speed)
{
@@ -353,7 +353,7 @@ void dce_i2c_sw_engine_set_speed(
engine->clock_delay = 12;
}
-bool dce_i2c_sw_engine_acquire_engine(
+static bool dce_i2c_sw_engine_acquire_engine(
struct dce_i2c_sw *engine,
struct ddc *ddc)
{
@@ -397,7 +397,7 @@ bool dce_i2c_engine_acquire_sw(
-void dce_i2c_sw_engine_submit_channel_request(
+static void dce_i2c_sw_engine_submit_channel_request(
struct dce_i2c_sw *engine,
struct i2c_request_transaction_data *req)
{
@@ -440,7 +440,8 @@ void dce_i2c_sw_engine_submit_channel_request(
I2C_CHANNEL_OPERATION_SUCCEEDED :
I2C_CHANNEL_OPERATION_FAILED;
}
-bool dce_i2c_sw_engine_submit_payload(
+
+static bool dce_i2c_sw_engine_submit_payload(
struct dce_i2c_sw *engine,
struct i2c_payload *payload,
bool middle_of_transaction)
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
index 210466b2d863..1e77ffee71b3 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
@@ -1197,7 +1197,7 @@ void dce110_link_encoder_enable_dp_mst_output(
#if defined(CONFIG_DRM_AMD_DC_SI)
/* enables DP PHY output */
-void dce60_link_encoder_enable_dp_output(
+static void dce60_link_encoder_enable_dp_output(
struct link_encoder *enc,
const struct dc_link_settings *link_settings,
enum clock_source_id clock_source)
@@ -1236,7 +1236,7 @@ void dce60_link_encoder_enable_dp_output(
}
/* enables DP PHY output in MST mode */
-void dce60_link_encoder_enable_dp_mst_output(
+static void dce60_link_encoder_enable_dp_mst_output(
struct link_encoder *enc,
const struct dc_link_settings *link_settings,
enum clock_source_id clock_source)
@@ -1426,7 +1426,7 @@ void dce110_link_encoder_dp_set_phy_pattern(
#if defined(CONFIG_DRM_AMD_DC_SI)
/* set DP PHY test and training patterns */
-void dce60_link_encoder_dp_set_phy_pattern(
+static void dce60_link_encoder_dp_set_phy_pattern(
struct link_encoder *enc,
const struct encoder_set_dp_phy_pattern_param *param)
{
@@ -1503,7 +1503,6 @@ void dce110_link_encoder_update_mst_stream_allocation_table(
const struct link_mst_stream_allocation_table *table)
{
struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc);
- uint32_t value0 = 0;
uint32_t value1 = 0;
uint32_t value2 = 0;
uint32_t slots = 0;
@@ -1604,7 +1603,7 @@ void dce110_link_encoder_update_mst_stream_allocation_table(
do {
udelay(10);
- value0 = REG_READ(DP_MSE_SAT_UPDATE);
+ REG_READ(DP_MSE_SAT_UPDATE);
REG_GET(DP_MSE_SAT_UPDATE,
DP_MSE_SAT_UPDATE, &value1);
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_opp.c b/drivers/gpu/drm/amd/display/dc/dce/dce_opp.c
index e459ae65aaf7..4600231da6cb 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_opp.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_opp.c
@@ -97,7 +97,7 @@ enum {
-/**
+/*
* set_truncation
* 1) set truncation depth: 0 for 18 bpp or 1 for 24 bpp
* 2) enable truncation
@@ -142,7 +142,7 @@ static void set_truncation(
}
#if defined(CONFIG_DRM_AMD_DC_SI)
-/**
+/*
* dce60_set_truncation
* 1) set truncation depth: 0 for 18 bpp or 1 for 24 bpp
* 2) enable truncation
@@ -183,7 +183,7 @@ static void dce60_set_truncation(
}
#endif
-/**
+/*
* set_spatial_dither
* 1) set spatial dithering mode: pattern of seed
* 2) set spatial dithering depth: 0 for 18bpp or 1 for 24bpp
@@ -291,7 +291,7 @@ static void set_spatial_dither(
FMT_SPATIAL_DITHER_EN, 1);
}
-/**
+/*
* SetTemporalDither (Frame Modulation)
* 1) set temporal dither depth
* 2) select pattern: from hard-coded pattern or programmable pattern
@@ -355,7 +355,7 @@ static void set_temporal_dither(
FMT_TEMPORAL_DITHER_EN, 1);
}
-/**
+/*
* Set Clamping
* 1) Set clamping format based on bpc - 0 for 6bpc (No clamping)
* 1 for 8 bpc
@@ -415,7 +415,7 @@ void dce110_opp_set_clamping(
}
#if defined(CONFIG_DRM_AMD_DC_SI)
-/**
+/*
* Set Clamping for DCE6 parts
* 1) Set clamping format based on bpc - 0 for 6bpc (No clamping)
* 1 for 8 bpc
@@ -424,7 +424,7 @@ void dce110_opp_set_clamping(
* 7 for programable
* 2) Enable clamp if Limited range requested
*/
-void dce60_opp_set_clamping(
+static void dce60_opp_set_clamping(
struct dce110_opp *opp110,
const struct clamping_and_pixel_encoding_params *params)
{
@@ -465,7 +465,7 @@ void dce60_opp_set_clamping(
}
#endif
-/**
+/*
* set_pixel_encoding
*
* Set Pixel Encoding
@@ -501,7 +501,7 @@ static void set_pixel_encoding(
}
#if defined(CONFIG_DRM_AMD_DC_SI)
-/**
+/*
* dce60_set_pixel_encoding
* DCE6 has no FMT_SUBSAMPLING_{MODE,ORDER} bits in FMT_CONTROL reg
* Set Pixel Encoding
@@ -545,7 +545,7 @@ void dce110_opp_program_bit_depth_reduction(
}
#if defined(CONFIG_DRM_AMD_DC_SI)
-void dce60_opp_program_bit_depth_reduction(
+static void dce60_opp_program_bit_depth_reduction(
struct output_pixel_processor *opp,
const struct bit_depth_reduction_params *params)
{
@@ -568,7 +568,7 @@ void dce110_opp_program_clamping_and_pixel_encoding(
}
#if defined(CONFIG_DRM_AMD_DC_SI)
-void dce60_opp_program_clamping_and_pixel_encoding(
+static void dce60_opp_program_clamping_and_pixel_encoding(
struct output_pixel_processor *opp,
const struct clamping_and_pixel_encoding_params *params)
{
@@ -678,7 +678,7 @@ void dce110_opp_program_fmt(
}
#if defined(CONFIG_DRM_AMD_DC_SI)
-void dce60_opp_program_fmt(
+static void dce60_opp_program_fmt(
struct output_pixel_processor *opp,
struct bit_depth_reduction_params *fmt_bit_depth,
struct clamping_and_pixel_encoding_params *clamping)
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_opp.h b/drivers/gpu/drm/amd/display/dc/dce/dce_opp.h
index 4d484ef60f35..bf1ffc3629c7 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_opp.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_opp.h
@@ -111,7 +111,6 @@ enum dce110_opp_reg_type {
OPP_SF(FMT_DITHER_RAND_R_SEED, FMT_RAND_R_SEED, mask_sh),\
OPP_SF(FMT_DITHER_RAND_G_SEED, FMT_RAND_G_SEED, mask_sh),\
OPP_SF(FMT_DITHER_RAND_B_SEED, FMT_RAND_B_SEED, mask_sh),\
- OPP_SF(FMT_BIT_DEPTH_CONTROL, FMT_TEMPORAL_DITHER_EN, mask_sh),\
OPP_SF(FMT_BIT_DEPTH_CONTROL, FMT_TEMPORAL_DITHER_RESET, mask_sh),\
OPP_SF(FMT_BIT_DEPTH_CONTROL, FMT_TEMPORAL_DITHER_OFFSET, mask_sh),\
OPP_SF(FMT_BIT_DEPTH_CONTROL, FMT_TEMPORAL_DITHER_DEPTH, mask_sh),\
@@ -219,7 +218,6 @@ enum dce110_opp_reg_type {
OPP_SF(FMT_DITHER_RAND_R_SEED, FMT_RAND_R_SEED, mask_sh),\
OPP_SF(FMT_DITHER_RAND_G_SEED, FMT_RAND_G_SEED, mask_sh),\
OPP_SF(FMT_DITHER_RAND_B_SEED, FMT_RAND_B_SEED, mask_sh),\
- OPP_SF(FMT_BIT_DEPTH_CONTROL, FMT_TEMPORAL_DITHER_EN, mask_sh),\
OPP_SF(FMT_BIT_DEPTH_CONTROL, FMT_TEMPORAL_DITHER_RESET, mask_sh),\
OPP_SF(FMT_BIT_DEPTH_CONTROL, FMT_TEMPORAL_DITHER_OFFSET, mask_sh),\
OPP_SF(FMT_BIT_DEPTH_CONTROL, FMT_TEMPORAL_DITHER_DEPTH, mask_sh),\
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c b/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c
index 761fdfc1f5bd..e92339235863 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c
@@ -50,16 +50,16 @@ static unsigned int dce_get_16_bit_backlight_from_pwm(struct panel_cntl *panel_c
{
uint64_t current_backlight;
uint32_t round_result;
- uint32_t pwm_period_cntl, bl_period, bl_int_count;
- uint32_t bl_pwm_cntl, bl_pwm, fractional_duty_cycle_en;
+ uint32_t bl_period, bl_int_count;
+ uint32_t bl_pwm, fractional_duty_cycle_en;
uint32_t bl_period_mask, bl_pwm_mask;
struct dce_panel_cntl *dce_panel_cntl = TO_DCE_PANEL_CNTL(panel_cntl);
- pwm_period_cntl = REG_READ(BL_PWM_PERIOD_CNTL);
+ REG_READ(BL_PWM_PERIOD_CNTL);
REG_GET(BL_PWM_PERIOD_CNTL, BL_PWM_PERIOD, &bl_period);
REG_GET(BL_PWM_PERIOD_CNTL, BL_PWM_PERIOD_BITCNT, &bl_int_count);
- bl_pwm_cntl = REG_READ(BL_PWM_CNTL);
+ REG_READ(BL_PWM_CNTL);
REG_GET(BL_PWM_CNTL, BL_ACTIVE_INT_FRAC_CNT, (uint32_t *)(&bl_pwm));
REG_GET(BL_PWM_CNTL, BL_PWM_FRACTIONAL_EN, &fractional_duty_cycle_en);
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
index ada57f745fd7..265eaef30a51 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
@@ -67,7 +67,6 @@ static void dce110_update_generic_info_packet(
uint32_t packet_index,
const struct dc_info_packet *info_packet)
{
- uint32_t regval;
/* TODOFPGA Figure out a proper number for max_retries polling for lock
* use 50 for now.
*/
@@ -99,7 +98,7 @@ static void dce110_update_generic_info_packet(
}
/* choose which generic packet to use */
{
- regval = REG_READ(AFMT_VBI_PACKET_CONTROL);
+ REG_READ(AFMT_VBI_PACKET_CONTROL);
REG_UPDATE(AFMT_VBI_PACKET_CONTROL,
AFMT_GENERIC_INDEX, packet_index);
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c
index 130a0a0c8332..abbaa6b0b2db 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c
@@ -493,7 +493,6 @@ static void dce60_transform_set_scaler(
{
struct dce_transform *xfm_dce = TO_DCE_TRANSFORM(xfm);
bool is_scaling_required;
- bool filter_updated = false;
const uint16_t *coeffs_v, *coeffs_h;
/*Use whole line buffer memory always*/
@@ -558,7 +557,6 @@ static void dce60_transform_set_scaler(
xfm_dce->filter_v = coeffs_v;
xfm_dce->filter_h = coeffs_h;
- filter_updated = true;
}
}
@@ -1037,34 +1035,23 @@ static void dce60_transform_set_pixel_storage_depth(
const struct bit_depth_reduction_params *bit_depth_params)
{
struct dce_transform *xfm_dce = TO_DCE_TRANSFORM(xfm);
- int pixel_depth, expan_mode;
enum dc_color_depth color_depth;
switch (depth) {
case LB_PIXEL_DEPTH_18BPP:
color_depth = COLOR_DEPTH_666;
- pixel_depth = 2;
- expan_mode = 1;
break;
case LB_PIXEL_DEPTH_24BPP:
color_depth = COLOR_DEPTH_888;
- pixel_depth = 1;
- expan_mode = 1;
break;
case LB_PIXEL_DEPTH_30BPP:
color_depth = COLOR_DEPTH_101010;
- pixel_depth = 0;
- expan_mode = 1;
break;
case LB_PIXEL_DEPTH_36BPP:
color_depth = COLOR_DEPTH_121212;
- pixel_depth = 3;
- expan_mode = 0;
break;
default:
color_depth = COLOR_DEPTH_101010;
- pixel_depth = 0;
- expan_mode = 1;
BREAK_TO_DEBUGGER();
break;
}
@@ -1113,7 +1100,7 @@ static void program_gamut_remap(
}
-/**
+/*
*****************************************************************************
* Function: dal_transform_wide_gamut_set_gamut_remap
*
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
index 17e84f34ceba..4228caa74119 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
@@ -31,7 +31,7 @@
#define MAX_PIPES 6
-/**
+/*
* Convert dmcub psr state to dmcu psr state.
*/
static enum dc_psr_state convert_psr_state(uint32_t raw_state)
@@ -74,7 +74,7 @@ static enum dc_psr_state convert_psr_state(uint32_t raw_state)
return state;
}
-/**
+/*
* Get PSR state from firmware.
*/
static void dmub_psr_get_state(struct dmub_psr *dmub, enum dc_psr_state *state)
@@ -90,7 +90,7 @@ static void dmub_psr_get_state(struct dmub_psr *dmub, enum dc_psr_state *state)
*state = convert_psr_state(raw_state);
}
-/**
+/*
* Set PSR version.
*/
static bool dmub_psr_set_version(struct dmub_psr *dmub, struct dc_stream_state *stream)
@@ -121,7 +121,7 @@ static bool dmub_psr_set_version(struct dmub_psr *dmub, struct dc_stream_state *
return true;
}
-/**
+/*
* Enable/Disable PSR.
*/
static void dmub_psr_enable(struct dmub_psr *dmub, bool enable, bool wait)
@@ -170,7 +170,7 @@ static void dmub_psr_enable(struct dmub_psr *dmub, bool enable, bool wait)
}
}
-/**
+/*
* Set PSR level.
*/
static void dmub_psr_set_level(struct dmub_psr *dmub, uint16_t psr_level)
@@ -194,7 +194,7 @@ static void dmub_psr_set_level(struct dmub_psr *dmub, uint16_t psr_level)
dc_dmub_srv_wait_idle(dc->dmub_srv);
}
-/**
+/*
* Setup PSR by programming phy registers and sending psr hw context values to firmware.
*/
static bool dmub_psr_copy_settings(struct dmub_psr *dmub,
@@ -277,7 +277,7 @@ static bool dmub_psr_copy_settings(struct dmub_psr *dmub,
return true;
}
-/**
+/*
* Send command to PSR to force static ENTER and ignore all state changes until exit
*/
static void dmub_psr_force_static(struct dmub_psr *dmub)
@@ -294,7 +294,7 @@ static void dmub_psr_force_static(struct dmub_psr *dmub)
dc_dmub_srv_wait_idle(dc->dmub_srv);
}
-/**
+/*
* Get PSR residency from firmware.
*/
static void dmub_psr_get_residency(struct dmub_psr *dmub, uint32_t *residency)
@@ -316,7 +316,7 @@ static const struct dmub_psr_funcs psr_funcs = {
.psr_get_residency = dmub_psr_get_residency,
};
-/**
+/*
* Construct PSR object.
*/
static void dmub_psr_construct(struct dmub_psr *psr, struct dc_context *ctx)
@@ -325,7 +325,7 @@ static void dmub_psr_construct(struct dmub_psr *psr, struct dc_context *ctx)
psr->funcs = &psr_funcs;
}
-/**
+/*
* Allocate and initialize PSR object.
*/
struct dmub_psr *dmub_psr_create(struct dc_context *ctx)
@@ -342,7 +342,7 @@ struct dmub_psr *dmub_psr_create(struct dc_context *ctx)
return psr;
}
-/**
+/*
* Deallocate PSR object.
*/
void dmub_psr_destroy(struct dmub_psr **dmub)
diff --git a/drivers/gpu/drm/amd/display/dc/dce100/Makefile b/drivers/gpu/drm/amd/display/dc/dce100/Makefile
index a822d4e2a169..ff20c47f559e 100644
--- a/drivers/gpu/drm/amd/display/dc/dce100/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dce100/Makefile
@@ -23,6 +23,8 @@
# Makefile for the 'controller' sub-component of DAL.
# It provides the control and status of HW CRTC block.
+CFLAGS_$(AMDDALPATH)/dc/dce100/dce100_resource.o = $(call cc-disable-warning, override-init)
+
DCE100 = dce100_resource.o dce100_hw_sequencer.o
AMD_DAL_DCE100 = $(addprefix $(AMDDALPATH)/dc/dce100/,$(DCE100))
diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
index 8ab9d6c79808..635ef0e7c782 100644
--- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
@@ -58,6 +58,8 @@
#include "dce/dce_abm.h"
#include "dce/dce_i2c.h"
+#include "dce100_resource.h"
+
#ifndef mmMC_HUB_RDREQ_DMIF_LIMIT
#include "gmc/gmc_8_2_d.h"
#include "gmc/gmc_8_2_sh_mask.h"
@@ -385,7 +387,7 @@ static const struct dc_plane_cap plane_cap = {
.pixel_format_support = {
.argb8888 = true,
.nv12 = false,
- .fp16 = false
+ .fp16 = true
},
.max_upscale_factor = {
@@ -611,7 +613,7 @@ static const struct encoder_feature_support link_enc_feature = {
.flags.bits.IS_TPS3_CAPABLE = true
};
-struct link_encoder *dce100_link_encoder_create(
+static struct link_encoder *dce100_link_encoder_create(
const struct encoder_init_data *enc_init_data)
{
struct dce110_link_encoder *enc110 =
@@ -650,7 +652,7 @@ static struct panel_cntl *dce100_panel_cntl_create(const struct panel_cntl_init_
return &panel_cntl->base;
}
-struct output_pixel_processor *dce100_opp_create(
+static struct output_pixel_processor *dce100_opp_create(
struct dc_context *ctx,
uint32_t inst)
{
@@ -665,7 +667,7 @@ struct output_pixel_processor *dce100_opp_create(
return &opp->base;
}
-struct dce_aux *dce100_aux_engine_create(
+static struct dce_aux *dce100_aux_engine_create(
struct dc_context *ctx,
uint32_t inst)
{
@@ -703,7 +705,7 @@ static const struct dce_i2c_mask i2c_masks = {
I2C_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK)
};
-struct dce_i2c_hw *dce100_i2c_hw_create(
+static struct dce_i2c_hw *dce100_i2c_hw_create(
struct dc_context *ctx,
uint32_t inst)
{
@@ -718,7 +720,7 @@ struct dce_i2c_hw *dce100_i2c_hw_create(
return dce_i2c_hw;
}
-struct clock_source *dce100_clock_source_create(
+static struct clock_source *dce100_clock_source_create(
struct dc_context *ctx,
struct dc_bios *bios,
enum clock_source_id id,
@@ -742,7 +744,7 @@ struct clock_source *dce100_clock_source_create(
return NULL;
}
-void dce100_clock_source_destroy(struct clock_source **clk_src)
+static void dce100_clock_source_destroy(struct clock_source **clk_src)
{
kfree(TO_DCE110_CLK_SRC(*clk_src));
*clk_src = NULL;
@@ -831,7 +833,7 @@ static enum dc_status build_mapped_resource(
return DC_OK;
}
-bool dce100_validate_bandwidth(
+static bool dce100_validate_bandwidth(
struct dc *dc,
struct dc_state *context,
bool fast_validate)
@@ -876,7 +878,7 @@ static bool dce100_validate_surface_sets(
return true;
}
-enum dc_status dce100_validate_global(
+static enum dc_status dce100_validate_global(
struct dc *dc,
struct dc_state *context)
{
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/Makefile b/drivers/gpu/drm/amd/display/dc/dce110/Makefile
index d564c0eb8b04..84ab48df0c26 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dce110/Makefile
@@ -23,6 +23,8 @@
# Makefile for the 'controller' sub-component of DAL.
# It provides the control and status of HW CRTC block.
+CFLAGS_$(AMDDALPATH)/dc/dce110/dce110_resource.o = $(call cc-disable-warning, override-init)
+
DCE110 = dce110_timing_generator.o \
dce110_compressor.o dce110_hw_sequencer.o dce110_resource.o \
dce110_opp_regamma_v.o dce110_opp_csc_v.o dce110_timing_generator_v.o \
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c
index 72b580a4eb85..44564a4742b5 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c
@@ -412,36 +412,6 @@ void dce110_compressor_destroy(struct compressor **compressor)
*compressor = NULL;
}
-bool dce110_get_required_compressed_surfacesize(struct fbc_input_info fbc_input_info,
- struct fbc_requested_compressed_size size)
-{
- bool result = false;
-
- unsigned int max_x = FBC_MAX_X, max_y = FBC_MAX_Y;
-
- get_max_support_fbc_buffersize(&max_x, &max_y);
-
- if (fbc_input_info.dynamic_fbc_buffer_alloc == 0) {
- /*
- * For DCE11 here use Max HW supported size: HW Support up to 3840x2400 resolution
- * or 18000 chunks.
- */
- size.preferred_size = size.min_size = align_to_chunks_number_per_line(max_x) * max_y * 4; /* (For FBC when LPT not supported). */
- size.preferred_size_alignment = size.min_size_alignment = 0x100; /* For FBC when LPT not supported */
- size.bits.preferred_must_be_framebuffer_pool = 1;
- size.bits.min_must_be_framebuffer_pool = 1;
-
- result = true;
- }
- /*
- * Maybe to add registry key support with optional size here to override above
- * for debugging purposes
- */
-
- return result;
-}
-
-
void get_max_support_fbc_buffersize(unsigned int *max_x, unsigned int *max_y)
{
*max_x = FBC_MAX_X;
@@ -455,31 +425,6 @@ void get_max_support_fbc_buffersize(unsigned int *max_x, unsigned int *max_y)
*/
}
-
-unsigned int controller_id_to_index(enum controller_id controller_id)
-{
- unsigned int index = 0;
-
- switch (controller_id) {
- case CONTROLLER_ID_D0:
- index = 0;
- break;
- case CONTROLLER_ID_D1:
- index = 1;
- break;
- case CONTROLLER_ID_D2:
- index = 2;
- break;
- case CONTROLLER_ID_D3:
- index = 3;
- break;
- default:
- break;
- }
- return index;
-}
-
-
static const struct compressor_funcs dce110_compressor_funcs = {
.power_up_fbc = dce110_compressor_power_up_fbc,
.enable_fbc = dce110_compressor_enable_fbc,
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index 4c230f1de9a3..caee1c9f54bd 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -921,6 +921,37 @@ void dce110_edp_power_control(
}
}
+void dce110_edp_wait_for_T12(
+ struct dc_link *link)
+{
+ struct dc_context *ctx = link->ctx;
+
+ if (dal_graphics_object_id_get_connector_id(link->link_enc->connector)
+ != CONNECTOR_ID_EDP) {
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+
+ if (!link->panel_cntl)
+ return;
+
+ if (!link->panel_cntl->funcs->is_panel_powered_on(link->panel_cntl) &&
+ link->link_trace.time_stamp.edp_poweroff != 0) {
+ unsigned int t12_duration = 500; // Default T12 as per spec
+ unsigned long long current_ts = dm_get_timestamp(ctx);
+ unsigned long long time_since_edp_poweroff_ms =
+ div64_u64(dm_get_elapse_time_in_ns(
+ ctx,
+ current_ts,
+ link->link_trace.time_stamp.edp_poweroff), 1000000);
+
+ t12_duration += link->local_sink->edid_caps.panel_patch.extra_t12_ms; // Add extra T12
+
+ if (time_since_edp_poweroff_ms < t12_duration)
+ msleep(t12_duration - time_since_edp_poweroff_ms);
+ }
+}
+
/*todo: cloned in stream enc, fix*/
/*
* @brief
@@ -1628,7 +1659,7 @@ static struct dc_link *get_edp_link_with_sink(
return link;
}
-/**
+/*
* When ASIC goes from VBIOS/VGA mode to driver/accelerated mode we need:
* 1. Power down all DC HW blocks
* 2. Disable VGA engine on all controllers
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.c
index d54172d88f5f..8bbb499067f7 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.c
@@ -34,6 +34,7 @@
#include "inc/dce_calcs.h"
#include "dce/dce_mem_input.h"
+#include "dce110_mem_input_v.h"
static void set_flip_control(
struct dce_mem_input *mem_input110,
@@ -468,7 +469,7 @@ static void program_pixel_format(
}
}
-bool dce_mem_input_v_is_surface_pending(struct mem_input *mem_input)
+static bool dce_mem_input_v_is_surface_pending(struct mem_input *mem_input)
{
struct dce_mem_input *mem_input110 = TO_DCE_MEM_INPUT(mem_input);
uint32_t value;
@@ -483,7 +484,7 @@ bool dce_mem_input_v_is_surface_pending(struct mem_input *mem_input)
return false;
}
-bool dce_mem_input_v_program_surface_flip_and_addr(
+static bool dce_mem_input_v_program_surface_flip_and_addr(
struct mem_input *mem_input,
const struct dc_plane_address *address,
bool flip_immediate)
@@ -560,7 +561,7 @@ static const unsigned int *get_dvmm_hw_setting(
}
}
-void dce_mem_input_v_program_pte_vm(
+static void dce_mem_input_v_program_pte_vm(
struct mem_input *mem_input,
enum surface_pixel_format format,
union dc_tiling_info *tiling_info,
@@ -633,7 +634,7 @@ void dce_mem_input_v_program_pte_vm(
dm_write_reg(mem_input110->base.ctx, mmUNP_DVMM_PTE_ARB_CONTROL_C, value);
}
-void dce_mem_input_v_program_surface_config(
+static void dce_mem_input_v_program_surface_config(
struct mem_input *mem_input,
enum surface_pixel_format format,
union dc_tiling_info *tiling_info,
@@ -919,7 +920,7 @@ static void program_nbp_watermark_c(
marks);
}
-void dce_mem_input_v_program_display_marks(
+static void dce_mem_input_v_program_display_marks(
struct mem_input *mem_input,
struct dce_watermarks nbp,
struct dce_watermarks stutter,
@@ -942,7 +943,7 @@ void dce_mem_input_v_program_display_marks(
}
-void dce_mem_input_program_chroma_display_marks(
+static void dce_mem_input_program_chroma_display_marks(
struct mem_input *mem_input,
struct dce_watermarks nbp,
struct dce_watermarks stutter,
@@ -963,7 +964,7 @@ void dce_mem_input_program_chroma_display_marks(
stutter);
}
-void dce110_allocate_mem_input_v(
+static void dce110_allocate_mem_input_v(
struct mem_input *mi,
uint32_t h_total,/* for current stream */
uint32_t v_total,/* for current stream */
@@ -1005,7 +1006,7 @@ void dce110_allocate_mem_input_v(
}
-void dce110_free_mem_input_v(
+static void dce110_free_mem_input_v(
struct mem_input *mi,
uint32_t total_stream_num)
{
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
index 3f63822b8e28..d7fcc5cccdce 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
@@ -410,7 +410,7 @@ static const struct dc_plane_cap plane_cap = {
.pixel_format_support = {
.argb8888 = true,
.nv12 = false,
- .fp16 = false
+ .fp16 = true
},
.max_upscale_factor = {
@@ -715,7 +715,7 @@ static struct output_pixel_processor *dce110_opp_create(
return &opp->base;
}
-struct dce_aux *dce110_aux_engine_create(
+static struct dce_aux *dce110_aux_engine_create(
struct dc_context *ctx,
uint32_t inst)
{
@@ -753,7 +753,7 @@ static const struct dce_i2c_mask i2c_masks = {
I2C_COMMON_MASK_SH_LIST_DCE110(_MASK)
};
-struct dce_i2c_hw *dce110_i2c_hw_create(
+static struct dce_i2c_hw *dce110_i2c_hw_create(
struct dc_context *ctx,
uint32_t inst)
{
@@ -768,7 +768,7 @@ struct dce_i2c_hw *dce110_i2c_hw_create(
return dce_i2c_hw;
}
-struct clock_source *dce110_clock_source_create(
+static struct clock_source *dce110_clock_source_create(
struct dc_context *ctx,
struct dc_bios *bios,
enum clock_source_id id,
@@ -792,7 +792,7 @@ struct clock_source *dce110_clock_source_create(
return NULL;
}
-void dce110_clock_source_destroy(struct clock_source **clk_src)
+static void dce110_clock_source_destroy(struct clock_source **clk_src)
{
struct dce110_clk_src *dce110_clk_src;
@@ -1034,8 +1034,8 @@ static bool dce110_validate_bandwidth(
return result;
}
-enum dc_status dce110_validate_plane(const struct dc_plane_state *plane_state,
- struct dc_caps *caps)
+static enum dc_status dce110_validate_plane(const struct dc_plane_state *plane_state,
+ struct dc_caps *caps)
{
if (((plane_state->dst_rect.width * 2) < plane_state->src_rect.width) ||
((plane_state->dst_rect.height * 2) < plane_state->src_rect.height))
@@ -1089,7 +1089,7 @@ static bool dce110_validate_surface_sets(
return true;
}
-enum dc_status dce110_validate_global(
+static enum dc_status dce110_validate_global(
struct dc *dc,
struct dc_state *context)
{
@@ -1272,7 +1272,6 @@ static bool underlay_create(struct dc_context *ctx, struct resource_pool *pool)
/* update the public caps to indicate an underlay is available */
ctx->dc->caps.max_slave_planes = 1;
- ctx->dc->caps.max_slave_planes = 1;
return true;
}
@@ -1333,7 +1332,7 @@ static void bw_calcs_data_update_from_pplib(struct dc *dc)
1000);
}
-const struct resource_caps *dce110_resource_cap(
+static const struct resource_caps *dce110_resource_cap(
struct hw_asic_id *asic_id)
{
if (ASIC_REV_IS_STONEY(asic_id->hw_internal_rev))
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c
index 1ea7db8eeb98..d88a74559edd 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c
@@ -75,7 +75,7 @@ static void dce110_timing_generator_apply_front_porch_workaround(
}
}
-/**
+/*
*****************************************************************************
* Function: is_in_vertical_blank
*
@@ -116,7 +116,7 @@ void dce110_timing_generator_set_early_control(
dm_write_reg(tg->ctx, address, regval);
}
-/**
+/*
* Enable CRTC
* Enable CRTC - call ASIC Control Object to enable Timing generator.
*/
@@ -175,7 +175,7 @@ void dce110_timing_generator_program_blank_color(
dm_write_reg(tg->ctx, addr, value);
}
-/**
+/*
*****************************************************************************
* Function: disable_stereo
*
@@ -226,7 +226,7 @@ static void disable_stereo(struct timing_generator *tg)
}
#endif
-/**
+/*
* disable_crtc - call ASIC Control Object to disable Timing generator.
*/
bool dce110_timing_generator_disable_crtc(struct timing_generator *tg)
@@ -247,11 +247,10 @@ bool dce110_timing_generator_disable_crtc(struct timing_generator *tg)
return result == BP_RESULT_OK;
}
-/**
-* program_horz_count_by_2
-* Programs DxCRTC_HORZ_COUNT_BY2_EN - 1 for DVI 30bpp mode, 0 otherwise
-*
-*/
+/*
+ * program_horz_count_by_2
+ * Programs DxCRTC_HORZ_COUNT_BY2_EN - 1 for DVI 30bpp mode, 0 otherwise
+ */
static void program_horz_count_by_2(
struct timing_generator *tg,
const struct dc_crtc_timing *timing)
@@ -273,7 +272,7 @@ static void program_horz_count_by_2(
CRTC_REG(mmCRTC_COUNT_CONTROL), regval);
}
-/**
+/*
* program_timing_generator
* Program CRTC Timing Registers - DxCRTC_H_*, DxCRTC_V_*, Pixel repetition.
* Call ASIC Control Object to program Timings.
@@ -352,7 +351,7 @@ bool dce110_timing_generator_program_timing_generator(
return result == BP_RESULT_OK;
}
-/**
+/*
*****************************************************************************
* Function: set_drr
*
@@ -521,7 +520,7 @@ uint32_t dce110_timing_generator_get_vblank_counter(struct timing_generator *tg)
return field;
}
-/**
+/*
*****************************************************************************
* Function: dce110_timing_generator_get_position
*
@@ -557,7 +556,7 @@ void dce110_timing_generator_get_position(struct timing_generator *tg,
CRTC_VERT_COUNT_NOM);
}
-/**
+/*
*****************************************************************************
* Function: get_crtc_scanoutpos
*
@@ -1106,11 +1105,11 @@ void dce110_timing_generator_set_test_pattern(
}
}
-/**
-* dce110_timing_generator_validate_timing
-* The timing generators support a maximum display size of is 8192 x 8192 pixels,
-* including both active display and blanking periods. Check H Total and V Total.
-*/
+/*
+ * dce110_timing_generator_validate_timing
+ * The timing generators support a maximum display size of is 8192 x 8192 pixels,
+ * including both active display and blanking periods. Check H Total and V Total.
+ */
bool dce110_timing_generator_validate_timing(
struct timing_generator *tg,
const struct dc_crtc_timing *timing,
@@ -1167,9 +1166,9 @@ bool dce110_timing_generator_validate_timing(
return true;
}
-/**
-* Wait till we are at the beginning of VBlank.
-*/
+/*
+ * Wait till we are at the beginning of VBlank.
+ */
void dce110_timing_generator_wait_for_vblank(struct timing_generator *tg)
{
/* We want to catch beginning of VBlank here, so if the first try are
@@ -1191,9 +1190,9 @@ void dce110_timing_generator_wait_for_vblank(struct timing_generator *tg)
}
}
-/**
-* Wait till we are in VActive (anywhere in VActive)
-*/
+/*
+ * Wait till we are in VActive (anywhere in VActive)
+ */
void dce110_timing_generator_wait_for_vactive(struct timing_generator *tg)
{
while (dce110_timing_generator_is_in_vertical_blank(tg)) {
@@ -1204,7 +1203,7 @@ void dce110_timing_generator_wait_for_vactive(struct timing_generator *tg)
}
}
-/**
+/*
*****************************************************************************
* Function: dce110_timing_generator_setup_global_swap_lock
*
@@ -1215,7 +1214,6 @@ void dce110_timing_generator_wait_for_vactive(struct timing_generator *tg)
* @param [in] gsl_params: setup data
*****************************************************************************
*/
-
void dce110_timing_generator_setup_global_swap_lock(
struct timing_generator *tg,
const struct dcp_gsl_params *gsl_params)
@@ -1351,10 +1349,7 @@ void dce110_timing_generator_tear_down_global_swap_lock(
/* Restore DCP_GSL_PURPOSE_SURFACE_FLIP */
{
- uint32_t value_crtc_vtotal;
-
- value_crtc_vtotal = dm_read_reg(tg->ctx,
- CRTC_REG(mmCRTC_V_TOTAL));
+ dm_read_reg(tg->ctx, CRTC_REG(mmCRTC_V_TOTAL));
set_reg_field_value(value,
0,
@@ -1385,7 +1380,7 @@ void dce110_timing_generator_tear_down_global_swap_lock(
dm_write_reg(tg->ctx, address, value);
}
-/**
+/*
*****************************************************************************
* Function: is_counter_moving
*
@@ -1767,7 +1762,7 @@ void dce110_timing_generator_disable_reset_trigger(
dm_write_reg(tg->ctx, CRTC_REG(mmCRTC_TRIGB_CNTL), value);
}
-/**
+/*
*****************************************************************************
* @brief
* Checks whether CRTC triggered reset occurred
@@ -1794,7 +1789,7 @@ bool dce110_timing_generator_did_triggered_reset_occur(
return (force || vert_sync);
}
-/**
+/*
* dce110_timing_generator_disable_vga
* Turn OFF VGA Mode and Timing - DxVGA_CONTROL
* VGA Mode and VGA Timing is used by VBIOS on CRT Monitors;
@@ -1840,14 +1835,13 @@ void dce110_timing_generator_disable_vga(
dm_write_reg(tg->ctx, addr, value);
}
-/**
-* set_overscan_color_black
-*
-* @param :black_color is one of the color space
-* :this routine will set overscan black color according to the color space.
-* @return none
-*/
-
+/*
+ * set_overscan_color_black
+ *
+ * @param :black_color is one of the color space
+ * :this routine will set overscan black color according to the color space.
+ * @return none
+ */
void dce110_timing_generator_set_overscan_color_black(
struct timing_generator *tg,
const struct tg_color *color)
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator_v.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator_v.c
index a13a2f58944e..c509384fff54 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator_v.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator_v.c
@@ -46,17 +46,16 @@
*
**********************************************************************************/
-/**
-* Enable CRTCV
-*/
+/*
+ * Enable CRTCV
+ */
static bool dce110_timing_generator_v_enable_crtc(struct timing_generator *tg)
{
/*
-* Set MASTER_UPDATE_MODE to 0
-* This is needed for DRR, and also suggested to be default value by Syed.
-*/
-
+ * Set MASTER_UPDATE_MODE to 0
+ * This is needed for DRR, and also suggested to be default value by Syed.
+ */
uint32_t value;
value = 0;
@@ -209,9 +208,9 @@ static void dce110_timing_generator_v_wait_for_vblank(struct timing_generator *t
}
}
-/**
-* Wait till we are in VActive (anywhere in VActive)
-*/
+/*
+ * Wait till we are in VActive (anywhere in VActive)
+ */
static void dce110_timing_generator_v_wait_for_vactive(struct timing_generator *tg)
{
while (dce110_timing_generator_v_is_in_vertical_blank(tg)) {
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_transform_v.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_transform_v.c
index b1aaab5590cc..29438c6050db 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_transform_v.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_transform_v.c
@@ -217,16 +217,15 @@ static bool setup_scaling_configuration(
return is_scaling_needed;
}
-/**
-* Function:
-* void program_overscan
-*
-* Purpose: Programs overscan border
-* Input: overscan
-*
-* Output:
- void
-*/
+/*
+ * Function:
+ * void program_overscan
+ *
+ * Purpose: Programs overscan border
+ * Input: overscan
+ *
+ * Output: void
+ */
static void program_overscan(
struct dce_transform *xfm_dce,
const struct scaler_data *data)
diff --git a/drivers/gpu/drm/amd/display/dc/dce112/Makefile b/drivers/gpu/drm/amd/display/dc/dce112/Makefile
index 8e090446d511..9de6501702d2 100644
--- a/drivers/gpu/drm/amd/display/dc/dce112/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dce112/Makefile
@@ -23,6 +23,8 @@
# Makefile for the 'controller' sub-component of DAL.
# It provides the control and status of HW CRTC block.
+CFLAGS_$(AMDDALPATH)/dc/dce112/dce112_resource.o = $(call cc-disable-warning, override-init)
+
DCE112 = dce112_compressor.o dce112_hw_sequencer.o \
dce112_resource.o
diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
index f99b1c084590..ee55cda854bf 100644
--- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
@@ -59,7 +59,9 @@
#include "dce/dce_11_2_sh_mask.h"
#include "dce100/dce100_resource.h"
-#define DC_LOGGER \
+#include "dce112_resource.h"
+
+#define DC_LOGGER \
dc->ctx->logger
#ifndef mmDP_DPHY_INTERNAL_CTRL
@@ -617,7 +619,7 @@ static const struct encoder_feature_support link_enc_feature = {
.flags.bits.IS_TPS4_CAPABLE = true
};
-struct link_encoder *dce112_link_encoder_create(
+static struct link_encoder *dce112_link_encoder_create(
const struct encoder_init_data *enc_init_data)
{
struct dce110_link_encoder *enc110 =
@@ -671,7 +673,7 @@ static struct input_pixel_processor *dce112_ipp_create(
return &ipp->base;
}
-struct output_pixel_processor *dce112_opp_create(
+static struct output_pixel_processor *dce112_opp_create(
struct dc_context *ctx,
uint32_t inst)
{
@@ -686,7 +688,7 @@ struct output_pixel_processor *dce112_opp_create(
return &opp->base;
}
-struct dce_aux *dce112_aux_engine_create(
+static struct dce_aux *dce112_aux_engine_create(
struct dc_context *ctx,
uint32_t inst)
{
@@ -724,7 +726,7 @@ static const struct dce_i2c_mask i2c_masks = {
I2C_COMMON_MASK_SH_LIST_DCE110(_MASK)
};
-struct dce_i2c_hw *dce112_i2c_hw_create(
+static struct dce_i2c_hw *dce112_i2c_hw_create(
struct dc_context *ctx,
uint32_t inst)
{
@@ -739,7 +741,7 @@ struct dce_i2c_hw *dce112_i2c_hw_create(
return dce_i2c_hw;
}
-struct clock_source *dce112_clock_source_create(
+static struct clock_source *dce112_clock_source_create(
struct dc_context *ctx,
struct dc_bios *bios,
enum clock_source_id id,
@@ -763,7 +765,7 @@ struct clock_source *dce112_clock_source_create(
return NULL;
}
-void dce112_clock_source_destroy(struct clock_source **clk_src)
+static void dce112_clock_source_destroy(struct clock_source **clk_src)
{
kfree(TO_DCE110_CLK_SRC(*clk_src));
*clk_src = NULL;
@@ -1024,7 +1026,7 @@ enum dc_status dce112_add_stream_to_ctx(
return result;
}
-enum dc_status dce112_validate_global(
+static enum dc_status dce112_validate_global(
struct dc *dc,
struct dc_state *context)
{
@@ -1202,7 +1204,7 @@ static void bw_calcs_data_update_from_pplib(struct dc *dc)
dm_pp_notify_wm_clock_changes(dc->ctx, &clk_ranges);
}
-const struct resource_caps *dce112_resource_cap(
+static const struct resource_caps *dce112_resource_cap(
struct hw_asic_id *asic_id)
{
if (ASIC_REV_IS_POLARIS11_M(asic_id->hw_internal_rev) ||
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/Makefile b/drivers/gpu/drm/amd/display/dc/dce120/Makefile
index 37db1f8d45ea..a9cc4b73270b 100644
--- a/drivers/gpu/drm/amd/display/dc/dce120/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dce120/Makefile
@@ -24,6 +24,8 @@
# It provides the control and status of HW CRTC block.
+CFLAGS_$(AMDDALPATH)/dc/dce120/dce120_resource.o = $(call cc-disable-warning, override-init)
+
DCE120 = dce120_resource.o dce120_timing_generator.o \
dce120_hw_sequencer.o
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c
index 66a13aa39c95..d4afe6c824d2 100644
--- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c
@@ -50,6 +50,7 @@ struct dce120_hw_seq_reg_offsets {
uint32_t crtc;
};
+#if 0
static const struct dce120_hw_seq_reg_offsets reg_offsets[] = {
{
.crtc = (mmCRTC0_CRTC_GSL_CONTROL - mmCRTC0_CRTC_GSL_CONTROL),
@@ -79,7 +80,6 @@ static const struct dce120_hw_seq_reg_offsets reg_offsets[] = {
/*******************************************************************************
* Private definitions
******************************************************************************/
-#if 0
static void dce120_init_pte(struct dc_context *ctx, uint8_t controller_id)
{
uint32_t addr;
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
index f1e3d2888eac..c65e4d125c8e 100644
--- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
@@ -423,7 +423,7 @@ static const struct dce110_clk_src_mask cs_mask = {
CS_COMMON_MASK_SH_LIST_DCE_112(_MASK)
};
-struct output_pixel_processor *dce120_opp_create(
+static struct output_pixel_processor *dce120_opp_create(
struct dc_context *ctx,
uint32_t inst)
{
@@ -437,7 +437,7 @@ struct output_pixel_processor *dce120_opp_create(
ctx, inst, &opp_regs[inst], &opp_shift, &opp_mask);
return &opp->base;
}
-struct dce_aux *dce120_aux_engine_create(
+static struct dce_aux *dce120_aux_engine_create(
struct dc_context *ctx,
uint32_t inst)
{
@@ -475,7 +475,7 @@ static const struct dce_i2c_mask i2c_masks = {
I2C_COMMON_MASK_SH_LIST_DCE110(_MASK)
};
-struct dce_i2c_hw *dce120_i2c_hw_create(
+static struct dce_i2c_hw *dce120_i2c_hw_create(
struct dc_context *ctx,
uint32_t inst)
{
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c
index 915fbb8e8168..b57c466124e7 100644
--- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c
+++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c
@@ -69,7 +69,7 @@
#define CRTC_REG_SET_3(reg, field1, val1, field2, val2, field3, val3) \
CRTC_REG_SET_N(reg, 3, FD(reg##__##field1), val1, FD(reg##__##field2), val2, FD(reg##__##field3), val3)
-/**
+/*
*****************************************************************************
* Function: is_in_vertical_blank
*
@@ -98,7 +98,7 @@ static bool dce120_timing_generator_is_in_vertical_blank(
/* determine if given timing can be supported by TG */
-bool dce120_timing_generator_validate_timing(
+static bool dce120_timing_generator_validate_timing(
struct timing_generator *tg,
const struct dc_crtc_timing *timing,
enum signal_type signal)
@@ -125,7 +125,7 @@ bool dce120_timing_generator_validate_timing(
return true;
}
-bool dce120_tg_validate_timing(struct timing_generator *tg,
+static bool dce120_tg_validate_timing(struct timing_generator *tg,
const struct dc_crtc_timing *timing)
{
return dce120_timing_generator_validate_timing(tg, timing, SIGNAL_TYPE_NONE);
@@ -133,7 +133,7 @@ bool dce120_tg_validate_timing(struct timing_generator *tg,
/******** HW programming ************/
/* Disable/Enable Timing Generator */
-bool dce120_timing_generator_enable_crtc(struct timing_generator *tg)
+static bool dce120_timing_generator_enable_crtc(struct timing_generator *tg)
{
enum bp_result result;
struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
@@ -153,7 +153,7 @@ bool dce120_timing_generator_enable_crtc(struct timing_generator *tg)
return result == BP_RESULT_OK;
}
-void dce120_timing_generator_set_early_control(
+static void dce120_timing_generator_set_early_control(
struct timing_generator *tg,
uint32_t early_cntl)
{
@@ -166,7 +166,7 @@ void dce120_timing_generator_set_early_control(
/**************** TG current status ******************/
/* return the current frame counter. Used by Linux kernel DRM */
-uint32_t dce120_timing_generator_get_vblank_counter(
+static uint32_t dce120_timing_generator_get_vblank_counter(
struct timing_generator *tg)
{
struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
@@ -181,7 +181,7 @@ uint32_t dce120_timing_generator_get_vblank_counter(
}
/* Get current H and V position */
-void dce120_timing_generator_get_crtc_position(
+static void dce120_timing_generator_get_crtc_position(
struct timing_generator *tg,
struct crtc_position *position)
{
@@ -207,7 +207,7 @@ void dce120_timing_generator_get_crtc_position(
}
/* wait until TG is in beginning of vertical blank region */
-void dce120_timing_generator_wait_for_vblank(struct timing_generator *tg)
+static void dce120_timing_generator_wait_for_vblank(struct timing_generator *tg)
{
/* We want to catch beginning of VBlank here, so if the first try are
* in VBlank, we might be very close to Active, in this case wait for
@@ -229,7 +229,7 @@ void dce120_timing_generator_wait_for_vblank(struct timing_generator *tg)
}
/* wait until TG is in beginning of active region */
-void dce120_timing_generator_wait_for_vactive(struct timing_generator *tg)
+static void dce120_timing_generator_wait_for_vactive(struct timing_generator *tg)
{
while (dce120_timing_generator_is_in_vertical_blank(tg)) {
if (!tg->funcs->is_counter_moving(tg)) {
@@ -242,7 +242,7 @@ void dce120_timing_generator_wait_for_vactive(struct timing_generator *tg)
/*********** Timing Generator Synchronization routines ****/
/* Setups Global Swap Lock group, TimingServer or TimingClient*/
-void dce120_timing_generator_setup_global_swap_lock(
+static void dce120_timing_generator_setup_global_swap_lock(
struct timing_generator *tg,
const struct dcp_gsl_params *gsl_params)
{
@@ -279,7 +279,7 @@ void dce120_timing_generator_setup_global_swap_lock(
}
/* Clear all the register writes done by setup_global_swap_lock */
-void dce120_timing_generator_tear_down_global_swap_lock(
+static void dce120_timing_generator_tear_down_global_swap_lock(
struct timing_generator *tg)
{
struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
@@ -300,7 +300,7 @@ void dce120_timing_generator_tear_down_global_swap_lock(
}
/* Reset slave controllers on master VSync */
-void dce120_timing_generator_enable_reset_trigger(
+static void dce120_timing_generator_enable_reset_trigger(
struct timing_generator *tg,
int source)
{
@@ -347,7 +347,7 @@ void dce120_timing_generator_enable_reset_trigger(
}
/* disabling trigger-reset */
-void dce120_timing_generator_disable_reset_trigger(
+static void dce120_timing_generator_disable_reset_trigger(
struct timing_generator *tg)
{
struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
@@ -367,7 +367,7 @@ void dce120_timing_generator_disable_reset_trigger(
}
/* Checks whether CRTC triggered reset occurred */
-bool dce120_timing_generator_did_triggered_reset_occur(
+static bool dce120_timing_generator_did_triggered_reset_occur(
struct timing_generator *tg)
{
struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
@@ -384,7 +384,7 @@ bool dce120_timing_generator_did_triggered_reset_occur(
/******** Stuff to move to other virtual HW objects *****************/
/* Move to enable accelerated mode */
-void dce120_timing_generator_disable_vga(struct timing_generator *tg)
+static void dce120_timing_generator_disable_vga(struct timing_generator *tg)
{
uint32_t offset = 0;
uint32_t value = 0;
@@ -425,7 +425,7 @@ void dce120_timing_generator_disable_vga(struct timing_generator *tg)
}
/* TODO: Should we move it to transform */
/* Fully program CRTC timing in timing generator */
-void dce120_timing_generator_program_blanking(
+static void dce120_timing_generator_program_blanking(
struct timing_generator *tg,
const struct dc_crtc_timing *timing)
{
@@ -485,7 +485,7 @@ void dce120_timing_generator_program_blanking(
/* TODO: Should we move it to opp? */
/* Combine with below and move YUV/RGB color conversion to SW layer */
-void dce120_timing_generator_program_blank_color(
+static void dce120_timing_generator_program_blank_color(
struct timing_generator *tg,
const struct tg_color *black_color)
{
@@ -498,7 +498,7 @@ void dce120_timing_generator_program_blank_color(
CRTC_BLACK_COLOR_R_CR, black_color->color_r_cr);
}
/* Combine with above and move YUV/RGB color conversion to SW layer */
-void dce120_timing_generator_set_overscan_color_black(
+static void dce120_timing_generator_set_overscan_color_black(
struct timing_generator *tg,
const struct tg_color *color)
{
@@ -540,7 +540,7 @@ void dce120_timing_generator_set_overscan_color_black(
*/
}
-void dce120_timing_generator_set_drr(
+static void dce120_timing_generator_set_drr(
struct timing_generator *tg,
const struct drr_params *params)
{
@@ -589,50 +589,7 @@ void dce120_timing_generator_set_drr(
}
}
-/**
- *****************************************************************************
- * Function: dce120_timing_generator_get_position
- *
- * @brief
- * Returns CRTC vertical/horizontal counters
- *
- * @param [out] position
- *****************************************************************************
- */
-void dce120_timing_generator_get_position(struct timing_generator *tg,
- struct crtc_position *position)
-{
- uint32_t value;
- struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
-
- value = dm_read_reg_soc15(
- tg->ctx,
- mmCRTC0_CRTC_STATUS_POSITION,
- tg110->offsets.crtc);
-
- position->horizontal_count = get_reg_field_value(
- value,
- CRTC0_CRTC_STATUS_POSITION,
- CRTC_HORZ_COUNT);
-
- position->vertical_count = get_reg_field_value(
- value,
- CRTC0_CRTC_STATUS_POSITION,
- CRTC_VERT_COUNT);
-
- value = dm_read_reg_soc15(
- tg->ctx,
- mmCRTC0_CRTC_NOM_VERT_POSITION,
- tg110->offsets.crtc);
-
- position->nominal_vcount = get_reg_field_value(
- value,
- CRTC0_CRTC_NOM_VERT_POSITION,
- CRTC_VERT_COUNT_NOM);
-}
-
-
-void dce120_timing_generator_get_crtc_scanoutpos(
+static void dce120_timing_generator_get_crtc_scanoutpos(
struct timing_generator *tg,
uint32_t *v_blank_start,
uint32_t *v_blank_end,
@@ -661,7 +618,7 @@ void dce120_timing_generator_get_crtc_scanoutpos(
*v_position = position.vertical_count;
}
-void dce120_timing_generator_enable_advanced_request(
+static void dce120_timing_generator_enable_advanced_request(
struct timing_generator *tg,
bool enable,
const struct dc_crtc_timing *timing)
@@ -699,7 +656,7 @@ void dce120_timing_generator_enable_advanced_request(
value);
}
-void dce120_tg_program_blank_color(struct timing_generator *tg,
+static void dce120_tg_program_blank_color(struct timing_generator *tg,
const struct tg_color *black_color)
{
struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
@@ -722,7 +679,7 @@ void dce120_tg_program_blank_color(struct timing_generator *tg,
value);
}
-void dce120_tg_set_overscan_color(struct timing_generator *tg,
+static void dce120_tg_set_overscan_color(struct timing_generator *tg,
const struct tg_color *overscan_color)
{
struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
@@ -749,7 +706,7 @@ static void dce120_tg_program_timing(struct timing_generator *tg,
dce120_timing_generator_program_blanking(tg, timing);
}
-bool dce120_tg_is_blanked(struct timing_generator *tg)
+static bool dce120_tg_is_blanked(struct timing_generator *tg)
{
struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
uint32_t value = dm_read_reg_soc15(
@@ -770,7 +727,7 @@ bool dce120_tg_is_blanked(struct timing_generator *tg)
return false;
}
-void dce120_tg_set_blank(struct timing_generator *tg,
+static void dce120_tg_set_blank(struct timing_generator *tg,
bool enable_blanking)
{
struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
@@ -789,7 +746,7 @@ void dce120_tg_set_blank(struct timing_generator *tg,
bool dce120_tg_validate_timing(struct timing_generator *tg,
const struct dc_crtc_timing *timing);
-void dce120_tg_wait_for_state(struct timing_generator *tg,
+static void dce120_tg_wait_for_state(struct timing_generator *tg,
enum crtc_state state)
{
switch (state) {
@@ -806,7 +763,7 @@ void dce120_tg_wait_for_state(struct timing_generator *tg,
}
}
-void dce120_tg_set_colors(struct timing_generator *tg,
+static void dce120_tg_set_colors(struct timing_generator *tg,
const struct tg_color *blank_color,
const struct tg_color *overscan_color)
{
@@ -833,7 +790,7 @@ static void dce120_timing_generator_set_static_screen_control(
CRTC_STATIC_SCREEN_FRAME_COUNT, num_frames);
}
-void dce120_timing_generator_set_test_pattern(
+static void dce120_timing_generator_set_test_pattern(
struct timing_generator *tg,
/* TODO: replace 'controller_dp_test_pattern' by 'test_pattern_mode'
* because this is not DP-specific (which is probably somewhere in DP
diff --git a/drivers/gpu/drm/amd/display/dc/dce60/Makefile b/drivers/gpu/drm/amd/display/dc/dce60/Makefile
index 7036c3bd0f87..dda596fa1cd7 100644
--- a/drivers/gpu/drm/amd/display/dc/dce60/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dce60/Makefile
@@ -23,6 +23,8 @@
# Makefile for the 'controller' sub-component of DAL.
# It provides the control and status of HW CRTC block.
+CFLAGS_AMDDALPATH)/dc/dce60/dce60_resource.o = $(call cc-disable-warning, override-init)
+
DCE60 = dce60_timing_generator.o dce60_hw_sequencer.o \
dce60_resource.o
diff --git a/drivers/gpu/drm/amd/display/dc/dce60/dce60_resource.c b/drivers/gpu/drm/amd/display/dc/dce60/dce60_resource.c
index e9dd78c484d6..dcfa0a3efa00 100644
--- a/drivers/gpu/drm/amd/display/dc/dce60/dce60_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce60/dce60_resource.c
@@ -60,6 +60,8 @@
#include "dce/dce_i2c.h"
/* TODO remove this include */
+#include "dce60_resource.h"
+
#ifndef mmMC_HUB_RDREQ_DMIF_LIMIT
#include "gmc/gmc_6_0_d.h"
#include "gmc/gmc_6_0_sh_mask.h"
@@ -519,7 +521,7 @@ static struct output_pixel_processor *dce60_opp_create(
return &opp->base;
}
-struct dce_aux *dce60_aux_engine_create(
+static struct dce_aux *dce60_aux_engine_create(
struct dc_context *ctx,
uint32_t inst)
{
@@ -557,7 +559,7 @@ static const struct dce_i2c_mask i2c_masks = {
I2C_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK)
};
-struct dce_i2c_hw *dce60_i2c_hw_create(
+static struct dce_i2c_hw *dce60_i2c_hw_create(
struct dc_context *ctx,
uint32_t inst)
{
@@ -573,7 +575,7 @@ struct dce_i2c_hw *dce60_i2c_hw_create(
return dce_i2c_hw;
}
-struct dce_i2c_sw *dce60_i2c_sw_create(
+static struct dce_i2c_sw *dce60_i2c_sw_create(
struct dc_context *ctx)
{
struct dce_i2c_sw *dce_i2c_sw =
@@ -707,7 +709,7 @@ static const struct encoder_feature_support link_enc_feature = {
.flags.bits.IS_TPS3_CAPABLE = true
};
-struct link_encoder *dce60_link_encoder_create(
+static struct link_encoder *dce60_link_encoder_create(
const struct encoder_init_data *enc_init_data)
{
struct dce110_link_encoder *enc110 =
@@ -746,7 +748,7 @@ static struct panel_cntl *dce60_panel_cntl_create(const struct panel_cntl_init_d
return &panel_cntl->base;
}
-struct clock_source *dce60_clock_source_create(
+static struct clock_source *dce60_clock_source_create(
struct dc_context *ctx,
struct dc_bios *bios,
enum clock_source_id id,
@@ -770,7 +772,7 @@ struct clock_source *dce60_clock_source_create(
return NULL;
}
-void dce60_clock_source_destroy(struct clock_source **clk_src)
+static void dce60_clock_source_destroy(struct clock_source **clk_src)
{
kfree(TO_DCE110_CLK_SRC(*clk_src));
*clk_src = NULL;
@@ -860,7 +862,7 @@ static void dce60_resource_destruct(struct dce110_resource_pool *pool)
}
}
-bool dce60_validate_bandwidth(
+static bool dce60_validate_bandwidth(
struct dc *dc,
struct dc_state *context,
bool fast_validate)
@@ -905,7 +907,7 @@ static bool dce60_validate_surface_sets(
return true;
}
-enum dc_status dce60_validate_global(
+static enum dc_status dce60_validate_global(
struct dc *dc,
struct dc_state *context)
{
diff --git a/drivers/gpu/drm/amd/display/dc/dce60/dce60_timing_generator.c b/drivers/gpu/drm/amd/display/dc/dce60/dce60_timing_generator.c
index fc1af0ff0ca4..c1a85ee374d9 100644
--- a/drivers/gpu/drm/amd/display/dc/dce60/dce60_timing_generator.c
+++ b/drivers/gpu/drm/amd/display/dc/dce60/dce60_timing_generator.c
@@ -189,8 +189,8 @@ static bool dce60_is_tg_enabled(struct timing_generator *tg)
return field == 1;
}
-bool dce60_configure_crc(struct timing_generator *tg,
- const struct crc_params *params)
+static bool dce60_configure_crc(struct timing_generator *tg,
+ const struct crc_params *params)
{
/* Cannot configure crc on a CRTC that is disabled */
if (!dce60_is_tg_enabled(tg))
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/Makefile b/drivers/gpu/drm/amd/display/dc/dce80/Makefile
index 666fcb2bdbba..0a9d1a350d8b 100644
--- a/drivers/gpu/drm/amd/display/dc/dce80/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dce80/Makefile
@@ -23,6 +23,8 @@
# Makefile for the 'controller' sub-component of DAL.
# It provides the control and status of HW CRTC block.
+CFLAGS_$(AMDDALPATH)/dc/dce80/dce80_resource.o = $(call cc-disable-warning, override-init)
+
DCE80 = dce80_timing_generator.o dce80_hw_sequencer.o \
dce80_resource.o
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
index 390a0fa37239..612450f99278 100644
--- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
@@ -60,6 +60,8 @@
#include "dce/dce_i2c.h"
/* TODO remove this include */
+#include "dce80_resource.h"
+
#ifndef mmMC_HUB_RDREQ_DMIF_LIMIT
#include "gmc/gmc_7_1_d.h"
#include "gmc/gmc_7_1_sh_mask.h"
@@ -402,7 +404,7 @@ static const struct dc_plane_cap plane_cap = {
.pixel_format_support = {
.argb8888 = true,
.nv12 = false,
- .fp16 = false
+ .fp16 = true
},
.max_upscale_factor = {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/Makefile b/drivers/gpu/drm/amd/display/dc/dcn10/Makefile
index 733e6e6e43bd..62ad1a11bff9 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/Makefile
@@ -31,11 +31,4 @@ DCN10 = dcn10_init.o dcn10_resource.o dcn10_ipp.o dcn10_hw_sequencer.o \
AMD_DAL_DCN10 = $(addprefix $(AMDDALPATH)/dc/dcn10/,$(DCN10))
-# fix:
-# ...: '-mgeneral-regs-only' is incompatible with the use of floating-point types
-# aarch64 does not support soft-float, so use hard-float and handle this in code
-ifdef CONFIG_ARM64
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dcn10/dcn10_resource.o := -mgeneral-regs-only
-endif
-
AMD_DISPLAY_FILES += $(AMD_DAL_DCN10)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c
index 4d3f7d5e1473..904c2d278998 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c
@@ -577,7 +577,7 @@ void dpp1_power_on_degamma_lut(
struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
REG_SET(CM_MEM_PWR_CTRL, 0,
- SHARED_MEM_PWR_DIS, power_on == true ? 0:1);
+ SHARED_MEM_PWR_DIS, power_on ? 0:1);
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index cfc130e2d6fd..7a30d1d9b535 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -647,8 +647,13 @@ static void power_on_plane(
if (REG(DC_IP_REQUEST_CNTL)) {
REG_SET(DC_IP_REQUEST_CNTL, 0,
IP_REQUEST_EN, 1);
- hws->funcs.dpp_pg_control(hws, plane_id, true);
- hws->funcs.hubp_pg_control(hws, plane_id, true);
+
+ if (hws->funcs.dpp_pg_control)
+ hws->funcs.dpp_pg_control(hws, plane_id, true);
+
+ if (hws->funcs.hubp_pg_control)
+ hws->funcs.hubp_pg_control(hws, plane_id, true);
+
REG_SET(DC_IP_REQUEST_CNTL, 0,
IP_REQUEST_EN, 0);
DC_LOG_DEBUG(
@@ -1082,8 +1087,13 @@ void dcn10_plane_atomic_power_down(struct dc *dc,
if (REG(DC_IP_REQUEST_CNTL)) {
REG_SET(DC_IP_REQUEST_CNTL, 0,
IP_REQUEST_EN, 1);
- hws->funcs.dpp_pg_control(hws, dpp->inst, false);
- hws->funcs.hubp_pg_control(hws, hubp->inst, false);
+
+ if (hws->funcs.dpp_pg_control)
+ hws->funcs.dpp_pg_control(hws, dpp->inst, false);
+
+ if (hws->funcs.hubp_pg_control)
+ hws->funcs.hubp_pg_control(hws, hubp->inst, false);
+
dpp->funcs->dpp_reset(dpp);
REG_SET(DC_IP_REQUEST_CNTL, 0,
IP_REQUEST_EN, 0);
@@ -2624,7 +2634,7 @@ static void dcn10_update_dchubp_dpp(
hws->funcs.update_plane_addr(dc, pipe_ctx);
if (is_pipe_tree_visible(pipe_ctx))
- hubp->funcs->set_blank(hubp, false);
+ dc->hwss.set_hubp_blank(dc, pipe_ctx, false);
}
void dcn10_blank_pixel_data(
@@ -3135,13 +3145,16 @@ void dcn10_setup_stereo(struct pipe_ctx *pipe_ctx, struct dc *dc)
return;
}
-static struct hubp *get_hubp_by_inst(struct resource_pool *res_pool, int mpcc_inst)
+static struct pipe_ctx *get_pipe_ctx_by_hubp_inst(struct dc_state *context, int mpcc_inst)
{
int i;
- for (i = 0; i < res_pool->pipe_count; i++) {
- if (res_pool->hubps[i]->inst == mpcc_inst)
- return res_pool->hubps[i];
+ for (i = 0; i < MAX_PIPES; i++) {
+ if (context->res_ctx.pipe_ctx[i].plane_res.hubp
+ && context->res_ctx.pipe_ctx[i].plane_res.hubp->inst == mpcc_inst) {
+ return &context->res_ctx.pipe_ctx[i];
+ }
+
}
ASSERT(false);
return NULL;
@@ -3164,11 +3177,23 @@ void dcn10_wait_for_mpcc_disconnect(
for (mpcc_inst = 0; mpcc_inst < MAX_PIPES; mpcc_inst++) {
if (pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst]) {
- struct hubp *hubp = get_hubp_by_inst(res_pool, mpcc_inst);
+ struct pipe_ctx *restore_bottom_pipe;
+ struct pipe_ctx *restore_top_pipe;
+ struct pipe_ctx *inst_pipe_ctx = get_pipe_ctx_by_hubp_inst(dc->current_state, mpcc_inst);
+ ASSERT(inst_pipe_ctx);
res_pool->mpc->funcs->wait_for_idle(res_pool->mpc, mpcc_inst);
pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst] = false;
- hubp->funcs->set_blank(hubp, true);
+ /*
+ * Set top and bottom pipes NULL, as we don't want
+ * to blank those pipes when disconnecting from MPCC
+ */
+ restore_bottom_pipe = inst_pipe_ctx->bottom_pipe;
+ restore_top_pipe = inst_pipe_ctx->top_pipe;
+ inst_pipe_ctx->top_pipe = inst_pipe_ctx->bottom_pipe = NULL;
+ dc->hwss.set_hubp_blank(dc, inst_pipe_ctx, true);
+ inst_pipe_ctx->top_pipe = restore_top_pipe;
+ inst_pipe_ctx->bottom_pipe = restore_bottom_pipe;
}
}
@@ -3721,3 +3746,10 @@ void dcn10_get_clock(struct dc *dc,
dc->clk_mgr->funcs->get_clock(dc->clk_mgr, context, clock_type, clock_cfg);
}
+
+void dcn10_set_hubp_blank(const struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ bool blank_enable)
+{
+ pipe_ctx->plane_res.hubp->funcs->set_blank(pipe_ctx->plane_res.hubp, blank_enable);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
index e5691e499023..89e6dfb63da0 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
@@ -163,6 +163,8 @@ void dcn10_wait_for_mpcc_disconnect(
void dce110_edp_backlight_control(
struct dc_link *link,
bool enable);
+void dce110_edp_wait_for_T12(
+ struct dc_link *link);
void dce110_edp_power_control(
struct dc_link *link,
bool power_up);
@@ -202,5 +204,8 @@ void dcn10_wait_for_pending_cleared(struct dc *dc,
struct dc_state *context);
void dcn10_set_hdr_multiplier(struct pipe_ctx *pipe_ctx);
void dcn10_verify_allow_pstate_change_high(struct dc *dc);
+void dcn10_set_hubp_blank(const struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ bool blank_enable);
#endif /* __DC_HWSS_DCN10_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c
index 254300b06b43..2f1b802e66a1 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c
@@ -79,6 +79,7 @@ static const struct hw_sequencer_funcs dcn10_funcs = {
.set_backlight_level = dce110_set_backlight_level,
.set_abm_immediate_disable = dce110_set_abm_immediate_disable,
.set_pipe = dce110_set_pipe,
+ .set_hubp_blank = dcn10_set_hubp_blank,
};
static const struct hwseq_private_funcs dcn10_private_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
index 100ce0e28fd5..b096011acb49 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
@@ -470,7 +470,7 @@ void mpc1_cursor_lock(struct mpc *mpc, int opp_id, bool lock)
unsigned int mpc1_get_mpc_out_mux(struct mpc *mpc, int opp_id)
{
struct dcn10_mpc *mpc10 = TO_DCN10_MPC(mpc);
- uint32_t val = 0;
+ uint32_t val = 0xf;
if (opp_id < MAX_OPP && REG(MUX[opp_id]))
REG_GET(MUX[opp_id], MPC_OUT_MUX, &val);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
index f033397a84e9..6138f4887de7 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
@@ -659,6 +659,16 @@ void optc1_unlock(struct timing_generator *optc)
OTG_MASTER_UPDATE_LOCK, 0);
}
+bool optc1_is_locked(struct timing_generator *optc)
+{
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
+ uint32_t locked;
+
+ REG_GET(OTG_MASTER_UPDATE_LOCK, UPDATE_LOCK_STATUS, &locked);
+
+ return (locked == 1);
+}
+
void optc1_get_position(struct timing_generator *optc,
struct crtc_position *position)
{
@@ -1513,6 +1523,7 @@ static const struct timing_generator_funcs dcn10_tg_funcs = {
.enable_crtc_reset = optc1_enable_crtc_reset,
.disable_reset_trigger = optc1_disable_reset_trigger,
.lock = optc1_lock,
+ .is_locked = optc1_is_locked,
.unlock = optc1_unlock,
.enable_optc_clock = optc1_enable_optc_clock,
.set_drr = optc1_set_drr,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
index b12bd9aae52f..b222c67973d4 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
@@ -638,6 +638,7 @@ void optc1_set_blank(struct timing_generator *optc,
bool enable_blanking);
bool optc1_is_blanked(struct timing_generator *optc);
+bool optc1_is_locked(struct timing_generator *optc);
void optc1_program_blank_color(
struct timing_generator *optc,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
index bdc37831535e..90e912fef2b3 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
@@ -608,8 +608,8 @@ static const struct dc_debug_options debug_defaults_drv = {
.disable_pplib_clock_request = false,
.disable_pplib_wm_range = false,
.pplib_wm_report_mode = WM_REPORT_DEFAULT,
- .pipe_split_policy = MPC_SPLIT_DYNAMIC,
- .force_single_disp_pipe_split = true,
+ .pipe_split_policy = MPC_SPLIT_AVOID,
+ .force_single_disp_pipe_split = false,
.disable_dcc = DCC_ENABLE,
.voltage_align_fclk = true,
.disable_stereo_support = true,
@@ -1534,15 +1534,8 @@ static bool dcn10_resource_construct(
memcpy(dc->dcn_ip, &dcn10_ip_defaults, sizeof(dcn10_ip_defaults));
memcpy(dc->dcn_soc, &dcn10_soc_defaults, sizeof(dcn10_soc_defaults));
-#if defined(CONFIG_ARM64)
- /* Aarch64 does not support -msoft-float/-mfloat-abi=soft */
- DC_FP_START();
- dcn10_resource_construct_fp(dc);
- DC_FP_END();
-#else
/* Other architectures we build for build this with soft-float */
dcn10_resource_construct_fp(dc);
-#endif
pool->base.pp_smu = dcn10_pp_smu_create(ctx);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/Makefile b/drivers/gpu/drm/amd/display/dc/dcn20/Makefile
index 624cb1341ef1..5fcaf78334ff 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/Makefile
@@ -17,10 +17,6 @@ ifdef CONFIG_PPC64
CFLAGS_$(AMDDALPATH)/dc/dcn20/dcn20_resource.o := -mhard-float -maltivec
endif
-ifdef CONFIG_ARM64
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dcn20/dcn20_resource.o := -mgeneral-regs-only
-endif
-
ifdef CONFIG_CC_IS_GCC
ifeq ($(call cc-ifversion, -lt, 0701, y), y)
IS_OLD_GCC = 1
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
index cb822df21b7c..0726fb435e2a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
@@ -1062,8 +1062,13 @@ static void dcn20_power_on_plane(
if (REG(DC_IP_REQUEST_CNTL)) {
REG_SET(DC_IP_REQUEST_CNTL, 0,
IP_REQUEST_EN, 1);
- dcn20_dpp_pg_control(hws, pipe_ctx->plane_res.dpp->inst, true);
- dcn20_hubp_pg_control(hws, pipe_ctx->plane_res.hubp->inst, true);
+
+ if (hws->funcs.dpp_pg_control)
+ hws->funcs.dpp_pg_control(hws, pipe_ctx->plane_res.dpp->inst, true);
+
+ if (hws->funcs.hubp_pg_control)
+ hws->funcs.hubp_pg_control(hws, pipe_ctx->plane_res.hubp->inst, true);
+
REG_SET(DC_IP_REQUEST_CNTL, 0,
IP_REQUEST_EN, 0);
DC_LOG_DEBUG(
@@ -1570,8 +1575,8 @@ static void dcn20_update_dchubp_dpp(
- if (pipe_ctx->update_flags.bits.enable)
- hubp->funcs->set_blank(hubp, false);
+ if (is_pipe_tree_visible(pipe_ctx))
+ dc->hwss.set_hubp_blank(dc, pipe_ctx, false);
}
@@ -1765,6 +1770,14 @@ void dcn20_post_unlock_program_front_end(
}
}
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe->vtp_locked) {
+ dc->hwss.set_hubp_blank(dc, pipe, true);
+ pipe->vtp_locked = false;
+ }
+ }
/* WA to apply WM setting*/
if (hwseq->wa.DEGVIDCN21)
dc->res_pool->hubbub->funcs->apply_DEDCN21_147_wa(dc->res_pool->hubbub);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c
index de9dcbeea150..51a4166e9750 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c
@@ -94,6 +94,7 @@ static const struct hw_sequencer_funcs dcn20_funcs = {
.optimize_timing_for_fsft = dcn20_optimize_timing_for_fsft,
#endif
.set_disp_pattern_generator = dcn20_set_disp_pattern_generator,
+ .set_hubp_blank = dcn10_set_hubp_blank,
};
static const struct hwseq_private_funcs dcn20_private_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
index e04ecf0fc0db..5ed18cac57e8 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
@@ -2517,8 +2517,7 @@ struct pipe_ctx *dcn20_find_secondary_pipe(struct dc *dc,
* if this primary pipe has a bottom pipe in prev. state
* and if the bottom pipe is still available (which it should be),
* pick that pipe as secondary
- * Same logic applies for ODM pipes. Since mpo is not allowed with odm
- * check in else case.
+ * Same logic applies for ODM pipes
*/
if (dc->current_state->res_ctx.pipe_ctx[primary_pipe->pipe_idx].bottom_pipe) {
preferred_pipe_idx = dc->current_state->res_ctx.pipe_ctx[primary_pipe->pipe_idx].bottom_pipe->pipe_idx;
@@ -2526,7 +2525,9 @@ struct pipe_ctx *dcn20_find_secondary_pipe(struct dc *dc,
secondary_pipe = &res_ctx->pipe_ctx[preferred_pipe_idx];
secondary_pipe->pipe_idx = preferred_pipe_idx;
}
- } else if (dc->current_state->res_ctx.pipe_ctx[primary_pipe->pipe_idx].next_odm_pipe) {
+ }
+ if (secondary_pipe == NULL &&
+ dc->current_state->res_ctx.pipe_ctx[primary_pipe->pipe_idx].next_odm_pipe) {
preferred_pipe_idx = dc->current_state->res_ctx.pipe_ctx[primary_pipe->pipe_idx].next_odm_pipe->pipe_idx;
if (res_ctx->pipe_ctx[preferred_pipe_idx].stream == NULL) {
secondary_pipe = &res_ctx->pipe_ctx[preferred_pipe_idx];
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/Makefile b/drivers/gpu/drm/amd/display/dc/dcn21/Makefile
index 1ee5fc03b7b3..bb8c95141082 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/Makefile
@@ -13,10 +13,6 @@ ifdef CONFIG_PPC64
CFLAGS_$(AMDDALPATH)/dc/dcn21/dcn21_resource.o := -mhard-float -maltivec
endif
-ifdef CONFIG_ARM64
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dcn21/dcn21_resource.o := -mgeneral-regs-only
-endif
-
ifdef CONFIG_CC_IS_GCC
ifeq ($(call cc-ifversion, -lt, 0701, y), y)
IS_OLD_GCC = 1
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c
index 074e2713257f..0597391b2171 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c
@@ -99,6 +99,7 @@ static const struct hw_sequencer_funcs dcn21_funcs = {
#endif
.is_abm_supported = dcn21_is_abm_supported,
.set_disp_pattern_generator = dcn20_set_disp_pattern_generator,
+ .set_hubp_blank = dcn10_set_hubp_blank,
};
static const struct hwseq_private_funcs dcn21_private_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
index 1c88d2edd381..b000b43a820d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
@@ -296,7 +296,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn2_1_soc = {
.num_banks = 8,
.num_chans = 4,
.vmm_page_size_bytes = 4096,
- .dram_clock_change_latency_us = 23.84,
+ .dram_clock_change_latency_us = 11.72,
.return_bus_width_bytes = 64,
.dispclk_dppclk_vco_speed_mhz = 3600,
.xfc_bus_transport_time_us = 4,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/Makefile b/drivers/gpu/drm/amd/display/dc/dcn30/Makefile
index 248c2711aace..c20331eb62e0 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/Makefile
@@ -41,11 +41,6 @@ CFLAGS_$(AMDDALPATH)/dc/dcn30/dcn30_resource.o := -mhard-float -maltivec
CFLAGS_$(AMDDALPATH)/dc/dcn30/dcn30_optc.o := -mhard-float -maltivec
endif
-ifdef CONFIG_ARM64
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dcn30/dcn30_resource.o := -mgeneral-regs-only
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dcn30/dcn30_optc.o := -mgeneral-regs-only
-endif
-
ifdef CONFIG_CC_IS_GCC
ifeq ($(call cc-ifversion, -lt, 0701, y), y)
IS_OLD_GCC = 1
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c
index 9da66e491116..33985401f25c 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c
@@ -133,7 +133,6 @@ static void dpp3_power_on_gamcor_lut(
struct dpp *dpp_base,
bool power_on)
{
- uint32_t power_status;
struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
if (dpp_base->ctx->dc->debug.enable_mem_low_power.bits.cm) {
@@ -143,12 +142,6 @@ static void dpp3_power_on_gamcor_lut(
} else
REG_SET(CM_MEM_PWR_CTRL, 0,
GAMCOR_MEM_PWR_DIS, power_on == true ? 0:1);
-
- REG_GET(CM_MEM_PWR_STATUS, GAMCOR_MEM_PWR_STATE, &power_status);
- if (power_status != 0)
- BREAK_TO_DEBUGGER();
-
-
}
void dpp3_program_cm_dealpha(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.h
index 5fa150f34c60..705fbfc37502 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.h
@@ -62,6 +62,7 @@
HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_NO_OUTSTANDING_REQ, mask_sh),\
HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_VTG_SEL, mask_sh),\
HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_DISABLE, mask_sh),\
+ HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_IN_BLANK, mask_sh),\
HUBP_SF(HUBP0_DCSURF_ADDR_CONFIG, NUM_PIPES, mask_sh),\
HUBP_SF(HUBP0_DCSURF_ADDR_CONFIG, PIPE_INTERLEAVE, mask_sh),\
HUBP_SF(HUBP0_DCSURF_ADDR_CONFIG, MAX_COMPRESSED_FRAGS, mask_sh),\
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
index 3deb3fb1724d..e5cc8f8c363f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
@@ -814,6 +814,19 @@ bool dcn30_apply_idle_power_optimizations(struct dc *dc, bool enable)
return true;
}
+bool dcn30_does_plane_fit_in_mall(struct dc *dc, struct dc_plane_state *plane)
+{
+ // add meta size?
+ unsigned int surface_size = plane->plane_size.surface_pitch * plane->plane_size.surface_size.height *
+ (plane->format >= SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616 ? 8 : 4);
+ unsigned int mall_size = dc->caps.mall_size_total;
+
+ if (dc->debug.mall_size_override)
+ mall_size = 1024 * 1024 * dc->debug.mall_size_override;
+
+ return (surface_size + dc->caps.cursor_cache_size) < mall_size;
+}
+
void dcn30_hardware_release(struct dc *dc)
{
/* if pstate unsupported, force it supported */
@@ -823,6 +836,53 @@ void dcn30_hardware_release(struct dc *dc)
dc->res_pool->hubbub, true, true);
}
+void dcn30_set_hubp_blank(const struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ bool blank_enable)
+{
+ struct pipe_ctx *mpcc_pipe;
+ struct pipe_ctx *odm_pipe;
+
+ if (blank_enable) {
+ struct plane_resource *plane_res = &pipe_ctx->plane_res;
+ struct stream_resource *stream_res = &pipe_ctx->stream_res;
+
+ /* Wait for enter vblank */
+ stream_res->tg->funcs->wait_for_state(stream_res->tg, CRTC_STATE_VBLANK);
+
+ /* Blank HUBP to allow p-state during blank on all timings */
+ pipe_ctx->plane_res.hubp->funcs->set_blank(pipe_ctx->plane_res.hubp, true);
+ /* Confirm hubp in blank */
+ ASSERT(plane_res->hubp->funcs->hubp_in_blank(plane_res->hubp));
+ /* Toggle HUBP_DISABLE */
+ plane_res->hubp->funcs->hubp_soft_reset(plane_res->hubp, true);
+ plane_res->hubp->funcs->hubp_soft_reset(plane_res->hubp, false);
+ for (mpcc_pipe = pipe_ctx->bottom_pipe; mpcc_pipe; mpcc_pipe = mpcc_pipe->bottom_pipe) {
+ mpcc_pipe->plane_res.hubp->funcs->set_blank(mpcc_pipe->plane_res.hubp, true);
+ /* Confirm hubp in blank */
+ ASSERT(mpcc_pipe->plane_res.hubp->funcs->hubp_in_blank(mpcc_pipe->plane_res.hubp));
+ /* Toggle HUBP_DISABLE */
+ mpcc_pipe->plane_res.hubp->funcs->hubp_soft_reset(mpcc_pipe->plane_res.hubp, true);
+ mpcc_pipe->plane_res.hubp->funcs->hubp_soft_reset(mpcc_pipe->plane_res.hubp, false);
+
+ }
+ for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
+ odm_pipe->plane_res.hubp->funcs->set_blank(odm_pipe->plane_res.hubp, true);
+ /* Confirm hubp in blank */
+ ASSERT(odm_pipe->plane_res.hubp->funcs->hubp_in_blank(odm_pipe->plane_res.hubp));
+ /* Toggle HUBP_DISABLE */
+ odm_pipe->plane_res.hubp->funcs->hubp_soft_reset(odm_pipe->plane_res.hubp, true);
+ odm_pipe->plane_res.hubp->funcs->hubp_soft_reset(odm_pipe->plane_res.hubp, false);
+ }
+ } else {
+ pipe_ctx->plane_res.hubp->funcs->set_blank(pipe_ctx->plane_res.hubp, false);
+ for (mpcc_pipe = pipe_ctx->bottom_pipe; mpcc_pipe; mpcc_pipe = mpcc_pipe->bottom_pipe)
+ mpcc_pipe->plane_res.hubp->funcs->set_blank(mpcc_pipe->plane_res.hubp, false);
+ for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
+ odm_pipe->plane_res.hubp->funcs->set_blank(odm_pipe->plane_res.hubp, false);
+ }
+}
+
void dcn30_set_disp_pattern_generator(const struct dc *dc,
struct pipe_ctx *pipe_ctx,
enum controller_dp_test_pattern test_pattern,
@@ -831,6 +891,25 @@ void dcn30_set_disp_pattern_generator(const struct dc *dc,
const struct tg_color *solid_color,
int width, int height, int offset)
{
- pipe_ctx->stream_res.opp->funcs->opp_set_disp_pattern_generator(pipe_ctx->stream_res.opp, test_pattern,
- color_space, color_depth, solid_color, width, height, offset);
+ struct stream_resource *stream_res = &pipe_ctx->stream_res;
+
+ if (test_pattern != CONTROLLER_DP_TEST_PATTERN_VIDEOMODE) {
+ pipe_ctx->vtp_locked = false;
+ /* turning on DPG */
+ stream_res->opp->funcs->opp_set_disp_pattern_generator(stream_res->opp, test_pattern, color_space,
+ color_depth, solid_color, width, height, offset);
+
+ /* Defer hubp blank if tg is locked */
+ if (stream_res->tg->funcs->is_tg_enabled(stream_res->tg)) {
+ if (stream_res->tg->funcs->is_locked(stream_res->tg))
+ pipe_ctx->vtp_locked = true;
+ else
+ dc->hwss.set_hubp_blank(dc, pipe_ctx, true);
+ }
+ } else {
+ dc->hwss.set_hubp_blank(dc, pipe_ctx, false);
+ /* turning off DPG */
+ stream_res->opp->funcs->opp_set_disp_pattern_generator(stream_res->opp, test_pattern, color_space,
+ color_depth, solid_color, width, height, offset);
+ }
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.h
index 7d32c43aafe0..1103f6356e90 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.h
@@ -65,6 +65,8 @@ void dcn30_set_avmute(struct pipe_ctx *pipe_ctx, bool enable);
void dcn30_update_info_frame(struct pipe_ctx *pipe_ctx);
void dcn30_program_dmdata_engine(struct pipe_ctx *pipe_ctx);
+bool dcn30_does_plane_fit_in_mall(struct dc *dc, struct dc_plane_state *plane);
+
bool dcn30_apply_idle_power_optimizations(struct dc *dc, bool enable);
void dcn30_hardware_release(struct dc *dc);
@@ -77,4 +79,8 @@ void dcn30_set_disp_pattern_generator(const struct dc *dc,
const struct tg_color *solid_color,
int width, int height, int offset);
+void dcn30_set_hubp_blank(const struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ bool blank_enable);
+
#endif /* __DC_HWSS_DCN30_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c
index 6125fe440ad0..204444fead97 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c
@@ -71,6 +71,7 @@ static const struct hw_sequencer_funcs dcn30_funcs = {
.edp_backlight_control = dce110_edp_backlight_control,
.edp_power_control = dce110_edp_power_control,
.edp_wait_for_hpd_ready = dce110_edp_wait_for_hpd_ready,
+ .edp_wait_for_T12 = dce110_edp_wait_for_T12,
.set_cursor_position = dcn10_set_cursor_position,
.set_cursor_attribute = dcn10_set_cursor_attribute,
.set_cursor_sdr_white_level = dcn10_set_cursor_sdr_white_level,
@@ -91,11 +92,13 @@ static const struct hw_sequencer_funcs dcn30_funcs = {
.get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync,
.calc_vupdate_position = dcn10_calc_vupdate_position,
.apply_idle_power_optimizations = dcn30_apply_idle_power_optimizations,
+ .does_plane_fit_in_mall = dcn30_does_plane_fit_in_mall,
.set_backlight_level = dcn21_set_backlight_level,
.set_abm_immediate_disable = dcn21_set_abm_immediate_disable,
.hardware_release = dcn30_hardware_release,
.set_pipe = dcn21_set_pipe,
.set_disp_pattern_generator = dcn30_set_disp_pattern_generator,
+ .set_hubp_blank = dcn30_set_hubp_blank,
};
static const struct hwseq_private_funcs dcn30_private_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c
index 3ba3991ee612..8980c90b2277 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c
@@ -309,6 +309,7 @@ static struct timing_generator_funcs dcn30_tg_funcs = {
.enable_crtc_reset = optc1_enable_crtc_reset,
.disable_reset_trigger = optc1_disable_reset_trigger,
.lock = optc3_lock,
+ .is_locked = optc1_is_locked,
.unlock = optc1_unlock,
.lock_doublebuffer_enable = optc3_lock_doublebuffer_enable,
.lock_doublebuffer_disable = optc3_lock_doublebuffer_disable,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
index 5e126fdf6ec1..e5bb15d8487b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
@@ -2631,6 +2631,10 @@ static bool dcn30_resource_construct(
dc->caps.max_cursor_size = 256;
dc->caps.min_horizontal_blanking_period = 80;
dc->caps.dmdata_alloc_size = 2048;
+ dc->caps.mall_size_per_mem_channel = 8;
+ /* total size = mall per channel * num channels * 1024 * 1024 */
+ dc->caps.mall_size_total = dc->caps.mall_size_per_mem_channel * dc->ctx->dc_bios->vram_info.num_chans * 1048576;
+ dc->caps.cursor_cache_size = dc->caps.max_cursor_size * dc->caps.max_cursor_size * 8;
dc->caps.max_slave_planes = 1;
dc->caps.post_blend_color_processing = true;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/Makefile b/drivers/gpu/drm/amd/display/dc/dcn301/Makefile
index 2fd5d34e4ba6..3ca7d911d25c 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn301/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn301/Makefile
@@ -21,10 +21,6 @@ ifdef CONFIG_PPC64
CFLAGS_$(AMDDALPATH)/dc/dcn301/dcn301_resource.o := -mhard-float -maltivec
endif
-ifdef CONFIG_ARM64
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dcn301/dcn301_resource.o := -mgeneral-regs-only
-endif
-
ifdef CONFIG_CC_IS_GCC
ifeq ($(call cc-ifversion, -lt, 0701, y), y)
IS_OLD_GCC = 1
diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c
index bdad72140cbc..b8bf6d61005b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c
@@ -98,6 +98,7 @@ static const struct hw_sequencer_funcs dcn301_funcs = {
.set_abm_immediate_disable = dcn21_set_abm_immediate_disable,
.set_pipe = dcn21_set_pipe,
.set_disp_pattern_generator = dcn30_set_disp_pattern_generator,
+ .set_hubp_blank = dcn30_set_hubp_blank,
};
static const struct hwseq_private_funcs dcn301_private_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
index 4825c5c1c6ed..35f5bf08ae96 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
@@ -1731,6 +1731,7 @@ static struct resource_funcs dcn301_res_pool_funcs = {
.populate_dml_pipes = dcn30_populate_dml_pipes_from_context,
.acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer,
.add_stream_to_ctx = dcn30_add_stream_to_ctx,
+ .add_dsc_to_stream_resource = dcn20_add_dsc_to_stream_resource,
.remove_stream_from_ctx = dcn20_remove_stream_from_ctx,
.populate_dml_writeback_from_context = dcn30_populate_dml_writeback_from_context,
.set_mcif_arb_params = dcn30_set_mcif_arb_params,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn302/Makefile b/drivers/gpu/drm/amd/display/dc/dcn302/Makefile
index 36e44e1b07fa..8d4924b7dc22 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn302/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn302/Makefile
@@ -20,10 +20,6 @@ ifdef CONFIG_PPC64
CFLAGS_$(AMDDALPATH)/dc/dcn302/dcn302_resource.o := -mhard-float -maltivec
endif
-ifdef CONFIG_ARM64
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dcn302/dcn302_resource.o := -mgeneral-regs-only
-endif
-
ifdef CONFIG_CC_IS_GCC
ifeq ($(call cc-ifversion, -lt, 0701, y), y)
IS_OLD_GCC = 1
diff --git a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
index 808c4dcdb3ac..22ba0be88faf 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
@@ -53,6 +53,7 @@
#include "dce/dce_i2c_hw.h"
#include "dce/dce_panel_cntl.h"
#include "dce/dmub_abm.h"
+#include "dce/dmub_psr.h"
#include "hw_sequencer_private.h"
#include "reg_helper.h"
@@ -238,6 +239,7 @@ static const struct dc_debug_options debug_defaults_diags = {
.dwb_fi_phase = -1, // -1 = disable
.dmub_command_table = true,
.enable_tri_buf = true,
+ .disable_psr = true,
};
enum dcn302_clk_src_array_id {
@@ -1213,6 +1215,9 @@ static void dcn302_resource_destruct(struct resource_pool *pool)
dce_abm_destroy(&pool->multiple_abms[i]);
}
+ if (pool->psr != NULL)
+ dmub_psr_destroy(&pool->psr);
+
if (pool->dccg != NULL)
dcn_dccg_destroy(&pool->dccg);
}
@@ -1354,8 +1359,6 @@ static bool dcn302_resource_construct(
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
dc->debug = debug_defaults_drv;
- else if (dc->ctx->dce_environment == DCE_ENV_FPGA_MAXIMUS)
- dc->debug = debug_defaults_diags;
else
dc->debug = debug_defaults_diags;
@@ -1469,6 +1472,14 @@ static bool dcn302_resource_construct(
}
pool->timing_generator_count = i;
+ /* PSR */
+ pool->psr = dmub_psr_create(ctx);
+ if (pool->psr == NULL) {
+ dm_error("DC: failed to create psr!\n");
+ BREAK_TO_DEBUGGER();
+ goto create_fail;
+ }
+
/* ABMs */
for (i = 0; i < pool->res_cap->num_timing_generator; i++) {
pool->multiple_abms[i] = dmub_abm_create(ctx, &abm_regs[i], &abm_shift, &abm_mask);
diff --git a/drivers/gpu/drm/amd/display/dc/dml/Makefile b/drivers/gpu/drm/amd/display/dc/dml/Makefile
index a02a33dcd70b..6bb7f2905821 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dml/Makefile
@@ -33,10 +33,6 @@ ifdef CONFIG_PPC64
dml_ccflags := -mhard-float -maltivec
endif
-ifdef CONFIG_ARM64
-dml_rcflags := -mgeneral-regs-only
-endif
-
ifdef CONFIG_CC_IS_GCC
ifeq ($(call cc-ifversion, -lt, 0701, y), y)
IS_OLD_GCC = 1
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c
index 45f028986a8d..f33e3de08176 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c
@@ -3138,7 +3138,7 @@ static void CalculateFlipSchedule(
4.0 * (TimeForFetchingMetaPTEImmediateFlip / LineTime + 0.125),
1) / 4.0;
- if ((GPUVMEnable == true || DCCEnable == true)) {
+ if ((GPUVMEnable || DCCEnable)) {
mode_lib->vba.ImmediateFlipBW[0] = BandwidthAvailableForImmediateFlip
* ImmediateFlipBytes / TotImmediateFlipBytes;
TimeForFetchingRowInVBlankImmediateFlip = dml_max(
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
index 860e72a51534..80170f9721ce 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
@@ -2635,14 +2635,15 @@ static void dml20v2_DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndP
}
if (mode_lib->vba.DRAMClockChangeSupportsVActive &&
- mode_lib->vba.MinActiveDRAMClockChangeMargin > 60 &&
- mode_lib->vba.PrefetchMode[mode_lib->vba.VoltageLevel][mode_lib->vba.maxMpcComb] == 0) {
+ mode_lib->vba.MinActiveDRAMClockChangeMargin > 60) {
mode_lib->vba.DRAMClockChangeWatermark += 25;
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
- if (mode_lib->vba.DRAMClockChangeWatermark >
- dml_max(mode_lib->vba.StutterEnterPlusExitWatermark, mode_lib->vba.UrgentWatermark))
- mode_lib->vba.MinTTUVBlank[k] += 25;
+ if (mode_lib->vba.PrefetchMode[mode_lib->vba.VoltageLevel][mode_lib->vba.maxMpcComb] == 0) {
+ if (mode_lib->vba.DRAMClockChangeWatermark >
+ dml_max(mode_lib->vba.StutterEnterPlusExitWatermark, mode_lib->vba.UrgentWatermark))
+ mode_lib->vba.MinTTUVBlank[k] += 25;
+ }
}
mode_lib->vba.DRAMClockChangeSupport[0][0] = dm_dram_clock_change_vactive;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c
index 86ff24dffc3e..0bcec113ecac 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c
@@ -5121,48 +5121,48 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
for (j = 0; j < 2; j++) {
enum dm_validation_status status = DML_VALIDATION_OK;
- if (mode_lib->vba.ScaleRatioAndTapsSupport != true) {
+ if (!mode_lib->vba.ScaleRatioAndTapsSupport) {
status = DML_FAIL_SCALE_RATIO_TAP;
- } else if (mode_lib->vba.SourceFormatPixelAndScanSupport != true) {
+ } else if (!mode_lib->vba.SourceFormatPixelAndScanSupport) {
status = DML_FAIL_SOURCE_PIXEL_FORMAT;
- } else if (locals->ViewportSizeSupport[i][0] != true) {
+ } else if (!locals->ViewportSizeSupport[i][0]) {
status = DML_FAIL_VIEWPORT_SIZE;
- } else if (locals->DIOSupport[i] != true) {
+ } else if (!locals->DIOSupport[i]) {
status = DML_FAIL_DIO_SUPPORT;
- } else if (locals->NotEnoughDSCUnits[i] != false) {
+ } else if (locals->NotEnoughDSCUnits[i]) {
status = DML_FAIL_NOT_ENOUGH_DSC;
- } else if (locals->DSCCLKRequiredMoreThanSupported[i] != false) {
+ } else if (locals->DSCCLKRequiredMoreThanSupported[i]) {
status = DML_FAIL_DSC_CLK_REQUIRED;
- } else if (locals->ROBSupport[i][0] != true) {
+ } else if (!locals->ROBSupport[i][0]) {
status = DML_FAIL_REORDERING_BUFFER;
- } else if (locals->DISPCLK_DPPCLK_Support[i][j] != true) {
+ } else if (!locals->DISPCLK_DPPCLK_Support[i][j]) {
status = DML_FAIL_DISPCLK_DPPCLK;
- } else if (locals->TotalAvailablePipesSupport[i][j] != true) {
+ } else if (!locals->TotalAvailablePipesSupport[i][j]) {
status = DML_FAIL_TOTAL_AVAILABLE_PIPES;
- } else if (mode_lib->vba.NumberOfOTGSupport != true) {
+ } else if (!mode_lib->vba.NumberOfOTGSupport) {
status = DML_FAIL_NUM_OTG;
- } else if (mode_lib->vba.WritebackModeSupport != true) {
+ } else if (!mode_lib->vba.WritebackModeSupport) {
status = DML_FAIL_WRITEBACK_MODE;
- } else if (mode_lib->vba.WritebackLatencySupport != true) {
+ } else if (!mode_lib->vba.WritebackLatencySupport) {
status = DML_FAIL_WRITEBACK_LATENCY;
- } else if (mode_lib->vba.WritebackScaleRatioAndTapsSupport != true) {
+ } else if (!mode_lib->vba.WritebackScaleRatioAndTapsSupport) {
status = DML_FAIL_WRITEBACK_SCALE_RATIO_TAP;
- } else if (mode_lib->vba.CursorSupport != true) {
+ } else if (!mode_lib->vba.CursorSupport) {
status = DML_FAIL_CURSOR_SUPPORT;
- } else if (mode_lib->vba.PitchSupport != true) {
+ } else if (!mode_lib->vba.PitchSupport) {
status = DML_FAIL_PITCH_SUPPORT;
- } else if (locals->TotalVerticalActiveBandwidthSupport[i][0] != true) {
+ } else if (!locals->TotalVerticalActiveBandwidthSupport[i][0]) {
status = DML_FAIL_TOTAL_V_ACTIVE_BW;
- } else if (locals->PTEBufferSizeNotExceeded[i][j] != true) {
+ } else if (!locals->PTEBufferSizeNotExceeded[i][j]) {
status = DML_FAIL_PTE_BUFFER_SIZE;
- } else if (mode_lib->vba.NonsupportedDSCInputBPC != false) {
+ } else if (mode_lib->vba.NonsupportedDSCInputBPC) {
status = DML_FAIL_DSC_INPUT_BPC;
- } else if ((mode_lib->vba.HostVMEnable != false
- && locals->ImmediateFlipSupportedForState[i][j] != true)) {
+ } else if ((mode_lib->vba.HostVMEnable
+ && !locals->ImmediateFlipSupportedForState[i][j])) {
status = DML_FAIL_HOST_VM_IMMEDIATE_FLIP;
- } else if (locals->PrefetchSupported[i][j] != true) {
+ } else if (!locals->PrefetchSupported[i][j]) {
status = DML_FAIL_PREFETCH_SUPPORT;
- } else if (locals->VRatioInPrefetchSupported[i][j] != true) {
+ } else if (!locals->VRatioInPrefetchSupported[i][j]) {
status = DML_FAIL_V_RATIO_PREFETCH;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
index 319dec59bcd1..8fdb34aa70ff 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
@@ -1219,13 +1219,13 @@ static bool CalculatePrefetchSchedule(
dml_print("DML: prefetch_bw_equ: %f\n", prefetch_bw_equ);
if (prefetch_bw_equ > 0) {
- if (GPUVMEnable == true) {
+ if (GPUVMEnable) {
Tvm_equ = dml_max3(*Tno_bw + PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor / prefetch_bw_equ, Tvm_trips, LineTime / 4);
} else {
Tvm_equ = LineTime / 4;
}
- if ((GPUVMEnable == true || myPipe->DCCEnable == true)) {
+ if ((GPUVMEnable || myPipe->DCCEnable)) {
Tr0_equ = dml_max4(
(MetaRowByte + PixelPTEBytesPerRow * HostVMInefficiencyFactor) / prefetch_bw_equ,
Tr0_trips,
@@ -5558,7 +5558,7 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport(
}
}
- if (mode_lib->vba.MinActiveDRAMClockChangeMargin > 0 && PrefetchMode == 0) {
+ if (mode_lib->vba.MinActiveDRAMClockChangeMargin > 0) {
*DRAMClockChangeSupport = dm_dram_clock_change_vactive;
} else if (((mode_lib->vba.SynchronizedVBlank == true || mode_lib->vba.TotalNumberOfActiveOTG == 1 || SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank > 0) && PrefetchMode == 0)) {
*DRAMClockChangeSupport = dm_dram_clock_change_vblank;
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/Makefile b/drivers/gpu/drm/amd/display/dc/dsc/Makefile
index f2624a1156e5..8d31eb75c6a6 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dsc/Makefile
@@ -10,10 +10,6 @@ ifdef CONFIG_PPC64
dsc_ccflags := -mhard-float -maltivec
endif
-ifdef CONFIG_ARM64
-dsc_rcflags := -mgeneral-regs-only
-endif
-
ifdef CONFIG_CC_IS_GCC
ifeq ($(call cc-ifversion, -lt, 0701, y), y)
IS_OLD_GCC = 1
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/diagnostics/hw_factory_diag.c b/drivers/gpu/drm/amd/display/dc/gpio/diagnostics/hw_factory_diag.c
index df68430aeb0c..c6e28f6bf1a2 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/diagnostics/hw_factory_diag.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/diagnostics/hw_factory_diag.c
@@ -28,6 +28,7 @@
*/
#include "dm_services.h"
+#include "hw_factory_diag.h"
#include "include/gpio_types.h"
#include "../hw_factory.h"
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/diagnostics/hw_factory_diag.h b/drivers/gpu/drm/amd/display/dc/gpio/diagnostics/hw_factory_diag.h
index 8a74f6adb8ee..bf68eb1d9a1d 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/diagnostics/hw_factory_diag.h
+++ b/drivers/gpu/drm/amd/display/dc/gpio/diagnostics/hw_factory_diag.h
@@ -26,6 +26,8 @@
#ifndef __DAL_HW_FACTORY_DIAG_FPGA_H__
#define __DAL_HW_FACTORY_DIAG_FPGA_H__
+struct hw_factory;
+
/* Initialize HW factory function pointers and pin info */
void dal_hw_factory_diag_fpga_init(struct hw_factory *factory);
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/diagnostics/hw_translate_diag.c b/drivers/gpu/drm/amd/display/dc/gpio/diagnostics/hw_translate_diag.c
index bf9068846927..e5138a5a8eb5 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/diagnostics/hw_translate_diag.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/diagnostics/hw_translate_diag.c
@@ -24,6 +24,7 @@
*/
#include "dm_services.h"
+#include "hw_translate_diag.h"
#include "include/gpio_types.h"
#include "../hw_translate.h"
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_ddc.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_ddc.c
index 1ae153eab31d..7a8cec2d7a90 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/hw_ddc.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_ddc.c
@@ -107,13 +107,12 @@ static enum gpio_result set_config(
msleep(3);
}
} else {
- uint32_t reg2;
uint32_t sda_pd_dis = 0;
uint32_t scl_pd_dis = 0;
- reg2 = REG_GET_2(gpio.MASK_reg,
- DC_GPIO_SDA_PD_DIS, &sda_pd_dis,
- DC_GPIO_SCL_PD_DIS, &scl_pd_dis);
+ REG_GET_2(gpio.MASK_reg,
+ DC_GPIO_SDA_PD_DIS, &sda_pd_dis,
+ DC_GPIO_SCL_PD_DIS, &scl_pd_dis);
if (sda_pd_dis) {
REG_SET(gpio.MASK_reg, regval,
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c
index da73bfb3cacd..92c65d2fa7d7 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c
@@ -119,17 +119,3 @@ bool dal_hw_factory_init(
return false;
}
}
-
-void dal_hw_factory_destroy(
- struct dc_context *ctx,
- struct hw_factory **factory)
-{
- if (!factory || !*factory) {
- BREAK_TO_DEBUGGER();
- return;
- }
-
- kfree(*factory);
-
- *factory = NULL;
-}
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
index 2d77eac66cb0..8efa1b80546d 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
@@ -333,6 +333,7 @@ struct pipe_ctx {
union pipe_update_flags update_flags;
struct dwbc *dwbc;
struct mcif_wb *mcif_wb;
+ bool vtp_locked;
};
struct resource_context {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
index f7632fe25976..754832d216fd 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
@@ -190,6 +190,7 @@ struct timing_generator_funcs {
void (*set_blank)(struct timing_generator *tg,
bool enable_blanking);
bool (*is_blanked)(struct timing_generator *tg);
+ bool (*is_locked)(struct timing_generator *tg);
void (*set_overscan_blank_color) (struct timing_generator *tg, const struct tg_color *color);
void (*set_blank_color)(struct timing_generator *tg, const struct tg_color *color);
void (*set_colors)(struct timing_generator *tg,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
index 62804dc7b698..48378beb71c0 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
@@ -54,6 +54,7 @@ struct hw_sequencer_funcs {
/* Embedded Display Related */
void (*edp_power_control)(struct dc_link *link, bool enable);
void (*edp_wait_for_hpd_ready)(struct dc_link *link, bool power_up);
+ void (*edp_wait_for_T12)(struct dc_link *link);
/* Pipe Programming Related */
void (*init_hw)(struct dc *dc);
@@ -217,6 +218,8 @@ struct hw_sequencer_funcs {
/* Idle Optimization Related */
bool (*apply_idle_power_optimizations)(struct dc *dc, bool enable);
+ bool (*does_plane_fit_in_mall)(struct dc *dc, struct dc_plane_state *plane);
+
bool (*is_abm_supported)(struct dc *dc,
struct dc_state *context, struct dc_stream_state *stream);
@@ -227,6 +230,10 @@ struct hw_sequencer_funcs {
enum dc_color_depth color_depth,
const struct tg_color *solid_color,
int width, int height, int offset);
+
+ void (*set_hubp_blank)(const struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ bool blank_enable);
};
void color_space_to_black_color(
diff --git a/drivers/gpu/drm/amd/display/dc/irq/irq_service.c b/drivers/gpu/drm/amd/display/dc/irq/irq_service.c
index 6bf27bde8724..5f245bde54ff 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/irq_service.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/irq_service.c
@@ -79,7 +79,7 @@ void dal_irq_service_destroy(struct irq_service **irq_service)
*irq_service = NULL;
}
-const struct irq_source_info *find_irq_source_info(
+static const struct irq_source_info *find_irq_source_info(
struct irq_service *irq_service,
enum dc_irq_source source)
{
diff --git a/drivers/gpu/drm/amd/display/dc/os_types.h b/drivers/gpu/drm/amd/display/dc/os_types.h
index 95cb56929e79..126c2f3a4dd3 100644
--- a/drivers/gpu/drm/amd/display/dc/os_types.h
+++ b/drivers/gpu/drm/amd/display/dc/os_types.h
@@ -55,10 +55,6 @@
#include <asm/fpu/api.h>
#define DC_FP_START() kernel_fpu_begin()
#define DC_FP_END() kernel_fpu_end()
-#elif defined(CONFIG_ARM64)
-#include <asm/neon.h>
-#define DC_FP_START() kernel_neon_begin()
-#define DC_FP_END() kernel_neon_end()
#elif defined(CONFIG_PPC64)
#include <asm/switch_to.h>
#include <asm/cputable.h>
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn30.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn30.c
index f00df02ded81..7e6f4dbabe45 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn30.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn30.c
@@ -26,6 +26,7 @@
#include "../dmub_srv.h"
#include "dmub_reg.h"
#include "dmub_dcn20.h"
+#include "dmub_dcn30.h"
#include "sienna_cichlid_ip_offset.h"
#include "dcn/dcn_3_0_0_offset.h"
diff --git a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
index 0fdf7a3e96de..57f198de5e2c 100644
--- a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
+++ b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
@@ -409,16 +409,11 @@ void mod_build_vsc_infopacket(const struct dc_stream_state *stream,
}
/**
- *****************************************************************************
- * Function: mod_build_hf_vsif_infopacket
+ * mod_build_hf_vsif_infopacket - Prepare HDMI Vendor Specific info frame.
+ * Follows HDMI Spec to build up Vendor Specific info frame
*
- * @brief
- * Prepare HDMI Vendor Specific info frame.
- * Follows HDMI Spec to build up Vendor Specific info frame
- *
- * @param [in] stream: contains data we may need to construct VSIF (i.e. timing_3d_format, etc.)
- * @param [out] info_packet: output structure where to store VSIF
- *****************************************************************************
+ * @stream: contains data we may need to construct VSIF (i.e. timing_3d_format, etc.)
+ * @info_packet: output structure where to store VSIF
*/
void mod_build_hf_vsif_infopacket(const struct dc_stream_state *stream,
struct dc_info_packet *info_packet)
diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
index 4fd8bce95d84..3d4c66933f51 100644
--- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
+++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
@@ -278,7 +278,7 @@ static void fill_backlight_transform_table_v_2_2(struct dmcu_iram_parameters par
}
}
-void fill_iram_v_2(struct iram_table_v_2 *ram_table, struct dmcu_iram_parameters params)
+static void fill_iram_v_2(struct iram_table_v_2 *ram_table, struct dmcu_iram_parameters params)
{
unsigned int set = params.set;
@@ -452,7 +452,7 @@ void fill_iram_v_2(struct iram_table_v_2 *ram_table, struct dmcu_iram_parameters
params, ram_table);
}
-void fill_iram_v_2_2(struct iram_table_v_2_2 *ram_table, struct dmcu_iram_parameters params)
+static void fill_iram_v_2_2(struct iram_table_v_2_2 *ram_table, struct dmcu_iram_parameters params)
{
unsigned int set = params.set;
@@ -598,7 +598,7 @@ void fill_iram_v_2_2(struct iram_table_v_2_2 *ram_table, struct dmcu_iram_parame
params, ram_table, true);
}
-void fill_iram_v_2_3(struct iram_table_v_2_2 *ram_table, struct dmcu_iram_parameters params, bool big_endian)
+static void fill_iram_v_2_3(struct iram_table_v_2_2 *ram_table, struct dmcu_iram_parameters params, bool big_endian)
{
unsigned int i, j;
unsigned int set = params.set;
diff --git a/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_4_2_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_4_2_0_offset.h
new file mode 100644
index 000000000000..bd129266ebfd
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_4_2_0_offset.h
@@ -0,0 +1,345 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef _osssys_4_2_0_OFFSET_HEADER
+#define _osssys_4_2_0_OFFSET_HEADER
+
+
+
+// addressBlock: osssys_osssysdec
+// base address: 0x4280
+#define mmIH_VMID_0_LUT 0x0000
+#define mmIH_VMID_0_LUT_BASE_IDX 0
+#define mmIH_VMID_1_LUT 0x0001
+#define mmIH_VMID_1_LUT_BASE_IDX 0
+#define mmIH_VMID_2_LUT 0x0002
+#define mmIH_VMID_2_LUT_BASE_IDX 0
+#define mmIH_VMID_3_LUT 0x0003
+#define mmIH_VMID_3_LUT_BASE_IDX 0
+#define mmIH_VMID_4_LUT 0x0004
+#define mmIH_VMID_4_LUT_BASE_IDX 0
+#define mmIH_VMID_5_LUT 0x0005
+#define mmIH_VMID_5_LUT_BASE_IDX 0
+#define mmIH_VMID_6_LUT 0x0006
+#define mmIH_VMID_6_LUT_BASE_IDX 0
+#define mmIH_VMID_7_LUT 0x0007
+#define mmIH_VMID_7_LUT_BASE_IDX 0
+#define mmIH_VMID_8_LUT 0x0008
+#define mmIH_VMID_8_LUT_BASE_IDX 0
+#define mmIH_VMID_9_LUT 0x0009
+#define mmIH_VMID_9_LUT_BASE_IDX 0
+#define mmIH_VMID_10_LUT 0x000a
+#define mmIH_VMID_10_LUT_BASE_IDX 0
+#define mmIH_VMID_11_LUT 0x000b
+#define mmIH_VMID_11_LUT_BASE_IDX 0
+#define mmIH_VMID_12_LUT 0x000c
+#define mmIH_VMID_12_LUT_BASE_IDX 0
+#define mmIH_VMID_13_LUT 0x000d
+#define mmIH_VMID_13_LUT_BASE_IDX 0
+#define mmIH_VMID_14_LUT 0x000e
+#define mmIH_VMID_14_LUT_BASE_IDX 0
+#define mmIH_VMID_15_LUT 0x000f
+#define mmIH_VMID_15_LUT_BASE_IDX 0
+#define mmIH_VMID_0_LUT_MM 0x0010
+#define mmIH_VMID_0_LUT_MM_BASE_IDX 0
+#define mmIH_VMID_1_LUT_MM 0x0011
+#define mmIH_VMID_1_LUT_MM_BASE_IDX 0
+#define mmIH_VMID_2_LUT_MM 0x0012
+#define mmIH_VMID_2_LUT_MM_BASE_IDX 0
+#define mmIH_VMID_3_LUT_MM 0x0013
+#define mmIH_VMID_3_LUT_MM_BASE_IDX 0
+#define mmIH_VMID_4_LUT_MM 0x0014
+#define mmIH_VMID_4_LUT_MM_BASE_IDX 0
+#define mmIH_VMID_5_LUT_MM 0x0015
+#define mmIH_VMID_5_LUT_MM_BASE_IDX 0
+#define mmIH_VMID_6_LUT_MM 0x0016
+#define mmIH_VMID_6_LUT_MM_BASE_IDX 0
+#define mmIH_VMID_7_LUT_MM 0x0017
+#define mmIH_VMID_7_LUT_MM_BASE_IDX 0
+#define mmIH_VMID_8_LUT_MM 0x0018
+#define mmIH_VMID_8_LUT_MM_BASE_IDX 0
+#define mmIH_VMID_9_LUT_MM 0x0019
+#define mmIH_VMID_9_LUT_MM_BASE_IDX 0
+#define mmIH_VMID_10_LUT_MM 0x001a
+#define mmIH_VMID_10_LUT_MM_BASE_IDX 0
+#define mmIH_VMID_11_LUT_MM 0x001b
+#define mmIH_VMID_11_LUT_MM_BASE_IDX 0
+#define mmIH_VMID_12_LUT_MM 0x001c
+#define mmIH_VMID_12_LUT_MM_BASE_IDX 0
+#define mmIH_VMID_13_LUT_MM 0x001d
+#define mmIH_VMID_13_LUT_MM_BASE_IDX 0
+#define mmIH_VMID_14_LUT_MM 0x001e
+#define mmIH_VMID_14_LUT_MM_BASE_IDX 0
+#define mmIH_VMID_15_LUT_MM 0x001f
+#define mmIH_VMID_15_LUT_MM_BASE_IDX 0
+#define mmIH_COOKIE_0 0x0020
+#define mmIH_COOKIE_0_BASE_IDX 0
+#define mmIH_COOKIE_1 0x0021
+#define mmIH_COOKIE_1_BASE_IDX 0
+#define mmIH_COOKIE_2 0x0022
+#define mmIH_COOKIE_2_BASE_IDX 0
+#define mmIH_COOKIE_3 0x0023
+#define mmIH_COOKIE_3_BASE_IDX 0
+#define mmIH_COOKIE_4 0x0024
+#define mmIH_COOKIE_4_BASE_IDX 0
+#define mmIH_COOKIE_5 0x0025
+#define mmIH_COOKIE_5_BASE_IDX 0
+#define mmIH_COOKIE_6 0x0026
+#define mmIH_COOKIE_6_BASE_IDX 0
+#define mmIH_COOKIE_7 0x0027
+#define mmIH_COOKIE_7_BASE_IDX 0
+#define mmIH_REGISTER_LAST_PART0 0x003f
+#define mmIH_REGISTER_LAST_PART0_BASE_IDX 0
+#define mmSEM_REQ_INPUT_0 0x0040
+#define mmSEM_REQ_INPUT_0_BASE_IDX 0
+#define mmSEM_REQ_INPUT_1 0x0041
+#define mmSEM_REQ_INPUT_1_BASE_IDX 0
+#define mmSEM_REQ_INPUT_2 0x0042
+#define mmSEM_REQ_INPUT_2_BASE_IDX 0
+#define mmSEM_REQ_INPUT_3 0x0043
+#define mmSEM_REQ_INPUT_3_BASE_IDX 0
+#define mmSEM_REGISTER_LAST_PART0 0x007f
+#define mmSEM_REGISTER_LAST_PART0_BASE_IDX 0
+#define mmIH_RB_CNTL 0x0080
+#define mmIH_RB_CNTL_BASE_IDX 0
+#define mmIH_RB_BASE 0x0081
+#define mmIH_RB_BASE_BASE_IDX 0
+#define mmIH_RB_BASE_HI 0x0082
+#define mmIH_RB_BASE_HI_BASE_IDX 0
+#define mmIH_RB_RPTR 0x0083
+#define mmIH_RB_RPTR_BASE_IDX 0
+#define mmIH_RB_WPTR 0x0084
+#define mmIH_RB_WPTR_BASE_IDX 0
+#define mmIH_RB_WPTR_ADDR_HI 0x0085
+#define mmIH_RB_WPTR_ADDR_HI_BASE_IDX 0
+#define mmIH_RB_WPTR_ADDR_LO 0x0086
+#define mmIH_RB_WPTR_ADDR_LO_BASE_IDX 0
+#define mmIH_DOORBELL_RPTR 0x0087
+#define mmIH_DOORBELL_RPTR_BASE_IDX 0
+#define mmIH_RB_CNTL_RING1 0x008c
+#define mmIH_RB_CNTL_RING1_BASE_IDX 0
+#define mmIH_RB_BASE_RING1 0x008d
+#define mmIH_RB_BASE_RING1_BASE_IDX 0
+#define mmIH_RB_BASE_HI_RING1 0x008e
+#define mmIH_RB_BASE_HI_RING1_BASE_IDX 0
+#define mmIH_RB_RPTR_RING1 0x008f
+#define mmIH_RB_RPTR_RING1_BASE_IDX 0
+#define mmIH_RB_WPTR_RING1 0x0090
+#define mmIH_RB_WPTR_RING1_BASE_IDX 0
+#define mmIH_DOORBELL_RPTR_RING1 0x0093
+#define mmIH_DOORBELL_RPTR_RING1_BASE_IDX 0
+#define mmIH_RB_CNTL_RING2 0x0098
+#define mmIH_RB_CNTL_RING2_BASE_IDX 0
+#define mmIH_RB_BASE_RING2 0x0099
+#define mmIH_RB_BASE_RING2_BASE_IDX 0
+#define mmIH_RB_BASE_HI_RING2 0x009a
+#define mmIH_RB_BASE_HI_RING2_BASE_IDX 0
+#define mmIH_RB_RPTR_RING2 0x009b
+#define mmIH_RB_RPTR_RING2_BASE_IDX 0
+#define mmIH_RB_WPTR_RING2 0x009c
+#define mmIH_RB_WPTR_RING2_BASE_IDX 0
+#define mmIH_DOORBELL_RPTR_RING2 0x009f
+#define mmIH_DOORBELL_RPTR_RING2_BASE_IDX 0
+#define mmIH_VERSION 0x00a5
+#define mmIH_VERSION_BASE_IDX 0
+#define mmIH_CNTL 0x00c0
+#define mmIH_CNTL_BASE_IDX 0
+#define mmIH_CNTL2 0x00c1
+#define mmIH_CNTL2_BASE_IDX 0
+#define mmIH_STATUS 0x00c2
+#define mmIH_STATUS_BASE_IDX 0
+#define mmIH_PERFMON_CNTL 0x00c3
+#define mmIH_PERFMON_CNTL_BASE_IDX 0
+#define mmIH_PERFCOUNTER0_RESULT 0x00c4
+#define mmIH_PERFCOUNTER0_RESULT_BASE_IDX 0
+#define mmIH_PERFCOUNTER1_RESULT 0x00c5
+#define mmIH_PERFCOUNTER1_RESULT_BASE_IDX 0
+#define mmIH_DSM_MATCH_VALUE_BIT_31_0 0x00c7
+#define mmIH_DSM_MATCH_VALUE_BIT_31_0_BASE_IDX 0
+#define mmIH_DSM_MATCH_VALUE_BIT_63_32 0x00c8
+#define mmIH_DSM_MATCH_VALUE_BIT_63_32_BASE_IDX 0
+#define mmIH_DSM_MATCH_VALUE_BIT_95_64 0x00c9
+#define mmIH_DSM_MATCH_VALUE_BIT_95_64_BASE_IDX 0
+#define mmIH_DSM_MATCH_FIELD_CONTROL 0x00ca
+#define mmIH_DSM_MATCH_FIELD_CONTROL_BASE_IDX 0
+#define mmIH_DSM_MATCH_DATA_CONTROL 0x00cb
+#define mmIH_DSM_MATCH_DATA_CONTROL_BASE_IDX 0
+#define mmIH_DSM_MATCH_FCN_ID 0x00cc
+#define mmIH_DSM_MATCH_FCN_ID_BASE_IDX 0
+#define mmIH_LIMIT_INT_RATE_CNTL 0x00cd
+#define mmIH_LIMIT_INT_RATE_CNTL_BASE_IDX 0
+#define mmIH_VF_RB_STATUS 0x00ce
+#define mmIH_VF_RB_STATUS_BASE_IDX 0
+#define mmIH_VF_RB_STATUS2 0x00cf
+#define mmIH_VF_RB_STATUS2_BASE_IDX 0
+#define mmIH_VF_RB1_STATUS 0x00d0
+#define mmIH_VF_RB1_STATUS_BASE_IDX 0
+#define mmIH_VF_RB1_STATUS2 0x00d1
+#define mmIH_VF_RB1_STATUS2_BASE_IDX 0
+#define mmIH_VF_RB2_STATUS 0x00d2
+#define mmIH_VF_RB2_STATUS_BASE_IDX 0
+#define mmIH_VF_RB2_STATUS2 0x00d3
+#define mmIH_VF_RB2_STATUS2_BASE_IDX 0
+#define mmIH_INT_FLOOD_CNTL 0x00d5
+#define mmIH_INT_FLOOD_CNTL_BASE_IDX 0
+#define mmIH_RB0_INT_FLOOD_STATUS 0x00d6
+#define mmIH_RB0_INT_FLOOD_STATUS_BASE_IDX 0
+#define mmIH_RB1_INT_FLOOD_STATUS 0x00d7
+#define mmIH_RB1_INT_FLOOD_STATUS_BASE_IDX 0
+#define mmIH_RB2_INT_FLOOD_STATUS 0x00d8
+#define mmIH_RB2_INT_FLOOD_STATUS_BASE_IDX 0
+#define mmIH_INT_FLOOD_STATUS 0x00d9
+#define mmIH_INT_FLOOD_STATUS_BASE_IDX 0
+#define mmIH_STORM_CLIENT_LIST_CNTL 0x00da
+#define mmIH_STORM_CLIENT_LIST_CNTL_BASE_IDX 0
+#define mmIH_CLK_CTRL 0x00db
+#define mmIH_CLK_CTRL_BASE_IDX 0
+#define mmIH_INT_FLAGS 0x00dc
+#define mmIH_INT_FLAGS_BASE_IDX 0
+#define mmIH_LAST_INT_INFO0 0x00dd
+#define mmIH_LAST_INT_INFO0_BASE_IDX 0
+#define mmIH_LAST_INT_INFO1 0x00de
+#define mmIH_LAST_INT_INFO1_BASE_IDX 0
+#define mmIH_LAST_INT_INFO2 0x00df
+#define mmIH_LAST_INT_INFO2_BASE_IDX 0
+#define mmIH_SCRATCH 0x00e0
+#define mmIH_SCRATCH_BASE_IDX 0
+#define mmIH_CLIENT_CREDIT_ERROR 0x00e1
+#define mmIH_CLIENT_CREDIT_ERROR_BASE_IDX 0
+#define mmIH_GPU_IOV_VIOLATION_LOG 0x00e2
+#define mmIH_GPU_IOV_VIOLATION_LOG_BASE_IDX 0
+#define mmIH_COOKIE_REC_VIOLATION_LOG 0x00e3
+#define mmIH_COOKIE_REC_VIOLATION_LOG_BASE_IDX 0
+#define mmIH_CREDIT_STATUS 0x00e4
+#define mmIH_CREDIT_STATUS_BASE_IDX 0
+#define mmIH_MMHUB_ERROR 0x00e5
+#define mmIH_MMHUB_ERROR_BASE_IDX 0
+#define mmIH_MEM_POWER_CTRL 0x00e8
+#define mmIH_MEM_POWER_CTRL_BASE_IDX 0
+#define mmIH_REGISTER_LAST_PART2 0x00ff
+#define mmIH_REGISTER_LAST_PART2_BASE_IDX 0
+#define mmSEM_CLK_CTRL 0x0100
+#define mmSEM_CLK_CTRL_BASE_IDX 0
+#define mmSEM_UTC_CREDIT 0x0101
+#define mmSEM_UTC_CREDIT_BASE_IDX 0
+#define mmSEM_UTC_CONFIG 0x0102
+#define mmSEM_UTC_CONFIG_BASE_IDX 0
+#define mmSEM_UTCL2_TRAN_EN_LUT 0x0103
+#define mmSEM_UTCL2_TRAN_EN_LUT_BASE_IDX 0
+#define mmSEM_MCIF_CONFIG 0x0104
+#define mmSEM_MCIF_CONFIG_BASE_IDX 0
+#define mmSEM_PERFMON_CNTL 0x0105
+#define mmSEM_PERFMON_CNTL_BASE_IDX 0
+#define mmSEM_PERFCOUNTER0_RESULT 0x0106
+#define mmSEM_PERFCOUNTER0_RESULT_BASE_IDX 0
+#define mmSEM_PERFCOUNTER1_RESULT 0x0107
+#define mmSEM_PERFCOUNTER1_RESULT_BASE_IDX 0
+#define mmSEM_STATUS 0x0108
+#define mmSEM_STATUS_BASE_IDX 0
+#define mmSEM_MAILBOX_CLIENTCONFIG 0x0109
+#define mmSEM_MAILBOX_CLIENTCONFIG_BASE_IDX 0
+#define mmSEM_MAILBOX 0x010a
+#define mmSEM_MAILBOX_BASE_IDX 0
+#define mmSEM_MAILBOX_CONTROL 0x010b
+#define mmSEM_MAILBOX_CONTROL_BASE_IDX 0
+#define mmSEM_CHICKEN_BITS 0x010c
+#define mmSEM_CHICKEN_BITS_BASE_IDX 0
+#define mmSEM_MAILBOX_CLIENTCONFIG_EXTRA 0x010d
+#define mmSEM_MAILBOX_CLIENTCONFIG_EXTRA_BASE_IDX 0
+#define mmSEM_GPU_IOV_VIOLATION_LOG 0x010e
+#define mmSEM_GPU_IOV_VIOLATION_LOG_BASE_IDX 0
+#define mmSEM_OUTSTANDING_THRESHOLD 0x010f
+#define mmSEM_OUTSTANDING_THRESHOLD_BASE_IDX 0
+#define mmSEM_MEM_POWER_CTRL 0x0110
+#define mmSEM_MEM_POWER_CTRL_BASE_IDX 0
+#define mmSEM_REGISTER_LAST_PART2 0x017f
+#define mmSEM_REGISTER_LAST_PART2_BASE_IDX 0
+#define mmIH_ACTIVE_FCN_ID 0x0180
+#define mmIH_ACTIVE_FCN_ID_BASE_IDX 0
+#define mmIH_VIRT_RESET_REQ 0x0181
+#define mmIH_VIRT_RESET_REQ_BASE_IDX 0
+#define mmIH_CLIENT_CFG 0x0184
+#define mmIH_CLIENT_CFG_BASE_IDX 0
+#define mmIH_CLIENT_CFG_INDEX 0x0188
+#define mmIH_CLIENT_CFG_INDEX_BASE_IDX 0
+#define mmIH_CLIENT_CFG_DATA 0x0189
+#define mmIH_CLIENT_CFG_DATA_BASE_IDX 0
+#define mmIH_CID_REMAP_INDEX 0x018a
+#define mmIH_CID_REMAP_INDEX_BASE_IDX 0
+#define mmIH_CID_REMAP_DATA 0x018b
+#define mmIH_CID_REMAP_DATA_BASE_IDX 0
+#define mmIH_CHICKEN 0x018c
+#define mmIH_CHICKEN_BASE_IDX 0
+#define mmIH_MMHUB_CNTL 0x018d
+#define mmIH_MMHUB_CNTL_BASE_IDX 0
+#define mmIH_INT_DROP_CNTL 0x018e
+#define mmIH_INT_DROP_CNTL_BASE_IDX 0
+#define mmIH_INT_DROP_MATCH_VALUE0 0x018f
+#define mmIH_INT_DROP_MATCH_VALUE0_BASE_IDX 0
+#define mmIH_INT_DROP_MATCH_VALUE1 0x0190
+#define mmIH_INT_DROP_MATCH_VALUE1_BASE_IDX 0
+#define mmIH_INT_DROP_MATCH_MASK0 0x0191
+#define mmIH_INT_DROP_MATCH_MASK0_BASE_IDX 0
+#define mmIH_INT_DROP_MATCH_MASK1 0x0192
+#define mmIH_INT_DROP_MATCH_MASK1_BASE_IDX 0
+#define mmIH_REGISTER_LAST_PART1 0x019f
+#define mmIH_REGISTER_LAST_PART1_BASE_IDX 0
+#define mmSEM_ACTIVE_FCN_ID 0x01a0
+#define mmSEM_ACTIVE_FCN_ID_BASE_IDX 0
+#define mmSEM_VIRT_RESET_REQ 0x01a1
+#define mmSEM_VIRT_RESET_REQ_BASE_IDX 0
+#define mmSEM_RESP_SDMA0 0x01a4
+#define mmSEM_RESP_SDMA0_BASE_IDX 0
+#define mmSEM_RESP_SDMA1 0x01a5
+#define mmSEM_RESP_SDMA1_BASE_IDX 0
+#define mmSEM_RESP_UVD 0x01a6
+#define mmSEM_RESP_UVD_BASE_IDX 0
+#define mmSEM_RESP_VCE_0 0x01a7
+#define mmSEM_RESP_VCE_0_BASE_IDX 0
+#define mmSEM_RESP_ACP 0x01a8
+#define mmSEM_RESP_ACP_BASE_IDX 0
+#define mmSEM_RESP_ISP 0x01a9
+#define mmSEM_RESP_ISP_BASE_IDX 0
+#define mmSEM_RESP_VCE_1 0x01aa
+#define mmSEM_RESP_VCE_1_BASE_IDX 0
+#define mmSEM_RESP_VP8 0x01ab
+#define mmSEM_RESP_VP8_BASE_IDX 0
+#define mmSEM_RESP_GC 0x01ac
+#define mmSEM_RESP_GC_BASE_IDX 0
+#define mmSEM_RESP_UVD_1 0x01ad
+#define mmSEM_RESP_UVD_1_BASE_IDX 0
+#define mmSEM_CID_REMAP_INDEX 0x01b0
+#define mmSEM_CID_REMAP_INDEX_BASE_IDX 0
+#define mmSEM_CID_REMAP_DATA 0x01b1
+#define mmSEM_CID_REMAP_DATA_BASE_IDX 0
+#define mmSEM_ATOMIC_OP_LUT 0x01b2
+#define mmSEM_ATOMIC_OP_LUT_BASE_IDX 0
+#define mmSEM_EDC_CONFIG 0x01b3
+#define mmSEM_EDC_CONFIG_BASE_IDX 0
+#define mmSEM_CHICKEN_BITS2 0x01b4
+#define mmSEM_CHICKEN_BITS2_BASE_IDX 0
+#define mmSEM_MMHUB_CNTL 0x01b5
+#define mmSEM_MMHUB_CNTL_BASE_IDX 0
+#define mmSEM_REGISTER_LAST_PART1 0x01bf
+#define mmSEM_REGISTER_LAST_PART1_BASE_IDX 0
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_4_2_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_4_2_0_sh_mask.h
new file mode 100644
index 000000000000..3ea83ea9ce3a
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_4_2_0_sh_mask.h
@@ -0,0 +1,1300 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef _osssys_4_2_0_SH_MASK_HEADER
+#define _osssys_4_2_0_SH_MASK_HEADER
+
+
+// addressBlock: osssys_osssysdec
+//IH_VMID_0_LUT
+#define IH_VMID_0_LUT__PASID__SHIFT 0x0
+#define IH_VMID_0_LUT__PASID_MASK 0x0000FFFFL
+//IH_VMID_1_LUT
+#define IH_VMID_1_LUT__PASID__SHIFT 0x0
+#define IH_VMID_1_LUT__PASID_MASK 0x0000FFFFL
+//IH_VMID_2_LUT
+#define IH_VMID_2_LUT__PASID__SHIFT 0x0
+#define IH_VMID_2_LUT__PASID_MASK 0x0000FFFFL
+//IH_VMID_3_LUT
+#define IH_VMID_3_LUT__PASID__SHIFT 0x0
+#define IH_VMID_3_LUT__PASID_MASK 0x0000FFFFL
+//IH_VMID_4_LUT
+#define IH_VMID_4_LUT__PASID__SHIFT 0x0
+#define IH_VMID_4_LUT__PASID_MASK 0x0000FFFFL
+//IH_VMID_5_LUT
+#define IH_VMID_5_LUT__PASID__SHIFT 0x0
+#define IH_VMID_5_LUT__PASID_MASK 0x0000FFFFL
+//IH_VMID_6_LUT
+#define IH_VMID_6_LUT__PASID__SHIFT 0x0
+#define IH_VMID_6_LUT__PASID_MASK 0x0000FFFFL
+//IH_VMID_7_LUT
+#define IH_VMID_7_LUT__PASID__SHIFT 0x0
+#define IH_VMID_7_LUT__PASID_MASK 0x0000FFFFL
+//IH_VMID_8_LUT
+#define IH_VMID_8_LUT__PASID__SHIFT 0x0
+#define IH_VMID_8_LUT__PASID_MASK 0x0000FFFFL
+//IH_VMID_9_LUT
+#define IH_VMID_9_LUT__PASID__SHIFT 0x0
+#define IH_VMID_9_LUT__PASID_MASK 0x0000FFFFL
+//IH_VMID_10_LUT
+#define IH_VMID_10_LUT__PASID__SHIFT 0x0
+#define IH_VMID_10_LUT__PASID_MASK 0x0000FFFFL
+//IH_VMID_11_LUT
+#define IH_VMID_11_LUT__PASID__SHIFT 0x0
+#define IH_VMID_11_LUT__PASID_MASK 0x0000FFFFL
+//IH_VMID_12_LUT
+#define IH_VMID_12_LUT__PASID__SHIFT 0x0
+#define IH_VMID_12_LUT__PASID_MASK 0x0000FFFFL
+//IH_VMID_13_LUT
+#define IH_VMID_13_LUT__PASID__SHIFT 0x0
+#define IH_VMID_13_LUT__PASID_MASK 0x0000FFFFL
+//IH_VMID_14_LUT
+#define IH_VMID_14_LUT__PASID__SHIFT 0x0
+#define IH_VMID_14_LUT__PASID_MASK 0x0000FFFFL
+//IH_VMID_15_LUT
+#define IH_VMID_15_LUT__PASID__SHIFT 0x0
+#define IH_VMID_15_LUT__PASID_MASK 0x0000FFFFL
+//IH_VMID_0_LUT_MM
+#define IH_VMID_0_LUT_MM__PASID__SHIFT 0x0
+#define IH_VMID_0_LUT_MM__PASID_MASK 0x0000FFFFL
+//IH_VMID_1_LUT_MM
+#define IH_VMID_1_LUT_MM__PASID__SHIFT 0x0
+#define IH_VMID_1_LUT_MM__PASID_MASK 0x0000FFFFL
+//IH_VMID_2_LUT_MM
+#define IH_VMID_2_LUT_MM__PASID__SHIFT 0x0
+#define IH_VMID_2_LUT_MM__PASID_MASK 0x0000FFFFL
+//IH_VMID_3_LUT_MM
+#define IH_VMID_3_LUT_MM__PASID__SHIFT 0x0
+#define IH_VMID_3_LUT_MM__PASID_MASK 0x0000FFFFL
+//IH_VMID_4_LUT_MM
+#define IH_VMID_4_LUT_MM__PASID__SHIFT 0x0
+#define IH_VMID_4_LUT_MM__PASID_MASK 0x0000FFFFL
+//IH_VMID_5_LUT_MM
+#define IH_VMID_5_LUT_MM__PASID__SHIFT 0x0
+#define IH_VMID_5_LUT_MM__PASID_MASK 0x0000FFFFL
+//IH_VMID_6_LUT_MM
+#define IH_VMID_6_LUT_MM__PASID__SHIFT 0x0
+#define IH_VMID_6_LUT_MM__PASID_MASK 0x0000FFFFL
+//IH_VMID_7_LUT_MM
+#define IH_VMID_7_LUT_MM__PASID__SHIFT 0x0
+#define IH_VMID_7_LUT_MM__PASID_MASK 0x0000FFFFL
+//IH_VMID_8_LUT_MM
+#define IH_VMID_8_LUT_MM__PASID__SHIFT 0x0
+#define IH_VMID_8_LUT_MM__PASID_MASK 0x0000FFFFL
+//IH_VMID_9_LUT_MM
+#define IH_VMID_9_LUT_MM__PASID__SHIFT 0x0
+#define IH_VMID_9_LUT_MM__PASID_MASK 0x0000FFFFL
+//IH_VMID_10_LUT_MM
+#define IH_VMID_10_LUT_MM__PASID__SHIFT 0x0
+#define IH_VMID_10_LUT_MM__PASID_MASK 0x0000FFFFL
+//IH_VMID_11_LUT_MM
+#define IH_VMID_11_LUT_MM__PASID__SHIFT 0x0
+#define IH_VMID_11_LUT_MM__PASID_MASK 0x0000FFFFL
+//IH_VMID_12_LUT_MM
+#define IH_VMID_12_LUT_MM__PASID__SHIFT 0x0
+#define IH_VMID_12_LUT_MM__PASID_MASK 0x0000FFFFL
+//IH_VMID_13_LUT_MM
+#define IH_VMID_13_LUT_MM__PASID__SHIFT 0x0
+#define IH_VMID_13_LUT_MM__PASID_MASK 0x0000FFFFL
+//IH_VMID_14_LUT_MM
+#define IH_VMID_14_LUT_MM__PASID__SHIFT 0x0
+#define IH_VMID_14_LUT_MM__PASID_MASK 0x0000FFFFL
+//IH_VMID_15_LUT_MM
+#define IH_VMID_15_LUT_MM__PASID__SHIFT 0x0
+#define IH_VMID_15_LUT_MM__PASID_MASK 0x0000FFFFL
+//IH_COOKIE_0
+#define IH_COOKIE_0__CLIENT_ID__SHIFT 0x0
+#define IH_COOKIE_0__SOURCE_ID__SHIFT 0x8
+#define IH_COOKIE_0__RING_ID__SHIFT 0x10
+#define IH_COOKIE_0__VM_ID__SHIFT 0x18
+#define IH_COOKIE_0__RESERVED__SHIFT 0x1c
+#define IH_COOKIE_0__VMID_TYPE__SHIFT 0x1f
+#define IH_COOKIE_0__CLIENT_ID_MASK 0x000000FFL
+#define IH_COOKIE_0__SOURCE_ID_MASK 0x0000FF00L
+#define IH_COOKIE_0__RING_ID_MASK 0x00FF0000L
+#define IH_COOKIE_0__VM_ID_MASK 0x0F000000L
+#define IH_COOKIE_0__RESERVED_MASK 0x70000000L
+#define IH_COOKIE_0__VMID_TYPE_MASK 0x80000000L
+//IH_COOKIE_1
+#define IH_COOKIE_1__TIMESTAMP_31_0__SHIFT 0x0
+#define IH_COOKIE_1__TIMESTAMP_31_0_MASK 0xFFFFFFFFL
+//IH_COOKIE_2
+#define IH_COOKIE_2__TIMESTAMP_47_32__SHIFT 0x0
+#define IH_COOKIE_2__RESERVED__SHIFT 0x10
+#define IH_COOKIE_2__TIMESTAMP_SRC__SHIFT 0x1f
+#define IH_COOKIE_2__TIMESTAMP_47_32_MASK 0x0000FFFFL
+#define IH_COOKIE_2__RESERVED_MASK 0x7FFF0000L
+#define IH_COOKIE_2__TIMESTAMP_SRC_MASK 0x80000000L
+//IH_COOKIE_3
+#define IH_COOKIE_3__PAS_ID__SHIFT 0x0
+#define IH_COOKIE_3__RESERVED__SHIFT 0x10
+#define IH_COOKIE_3__PASID_SRC__SHIFT 0x1f
+#define IH_COOKIE_3__PAS_ID_MASK 0x0000FFFFL
+#define IH_COOKIE_3__RESERVED_MASK 0x7FFF0000L
+#define IH_COOKIE_3__PASID_SRC_MASK 0x80000000L
+//IH_COOKIE_4
+#define IH_COOKIE_4__CONTEXT_ID_31_0__SHIFT 0x0
+#define IH_COOKIE_4__CONTEXT_ID_31_0_MASK 0xFFFFFFFFL
+//IH_COOKIE_5
+#define IH_COOKIE_5__CONTEXT_ID_63_32__SHIFT 0x0
+#define IH_COOKIE_5__CONTEXT_ID_63_32_MASK 0xFFFFFFFFL
+//IH_COOKIE_6
+#define IH_COOKIE_6__CONTEXT_ID_95_64__SHIFT 0x0
+#define IH_COOKIE_6__CONTEXT_ID_95_64_MASK 0xFFFFFFFFL
+//IH_COOKIE_7
+#define IH_COOKIE_7__CONTEXT_ID_128_96__SHIFT 0x0
+#define IH_COOKIE_7__CONTEXT_ID_128_96_MASK 0xFFFFFFFFL
+//IH_REGISTER_LAST_PART0
+#define IH_REGISTER_LAST_PART0__RESERVED__SHIFT 0x0
+#define IH_REGISTER_LAST_PART0__RESERVED_MASK 0xFFFFFFFFL
+//SEM_REQ_INPUT_0
+#define SEM_REQ_INPUT_0__DATA__SHIFT 0x0
+#define SEM_REQ_INPUT_0__DATA_MASK 0xFFFFFFFFL
+//SEM_REQ_INPUT_1
+#define SEM_REQ_INPUT_1__DATA__SHIFT 0x0
+#define SEM_REQ_INPUT_1__DATA_MASK 0xFFFFFFFFL
+//SEM_REQ_INPUT_2
+#define SEM_REQ_INPUT_2__DATA__SHIFT 0x0
+#define SEM_REQ_INPUT_2__DATA_MASK 0xFFFFFFFFL
+//SEM_REQ_INPUT_3
+#define SEM_REQ_INPUT_3__DATA__SHIFT 0x0
+#define SEM_REQ_INPUT_3__DATA_MASK 0xFFFFFFFFL
+//SEM_REGISTER_LAST_PART0
+#define SEM_REGISTER_LAST_PART0__RESERVED__SHIFT 0x0
+#define SEM_REGISTER_LAST_PART0__RESERVED_MASK 0xFFFFFFFFL
+//IH_RB_CNTL
+#define IH_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define IH_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define IH_RB_CNTL__RB_GPU_TS_ENABLE__SHIFT 0x7
+#define IH_RB_CNTL__WPTR_WRITEBACK_ENABLE__SHIFT 0x8
+#define IH_RB_CNTL__RB_FULL_DRAIN_ENABLE__SHIFT 0x9
+#define IH_RB_CNTL__FULL_DRAIN_CLEAR__SHIFT 0xa
+#define IH_RB_CNTL__PAGE_RB_CLEAR__SHIFT 0xb
+#define IH_RB_CNTL__RB_USED_INT_THRESHOLD__SHIFT 0xc
+#define IH_RB_CNTL__WPTR_OVERFLOW_ENABLE__SHIFT 0x10
+#define IH_RB_CNTL__ENABLE_INTR__SHIFT 0x11
+#define IH_RB_CNTL__MC_SWAP__SHIFT 0x12
+#define IH_RB_CNTL__MC_SNOOP__SHIFT 0x14
+#define IH_RB_CNTL__RPTR_REARM__SHIFT 0x15
+#define IH_RB_CNTL__MC_RO__SHIFT 0x16
+#define IH_RB_CNTL__MC_VMID__SHIFT 0x18
+#define IH_RB_CNTL__MC_SPACE__SHIFT 0x1c
+#define IH_RB_CNTL__WPTR_OVERFLOW_CLEAR__SHIFT 0x1f
+#define IH_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define IH_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define IH_RB_CNTL__RB_GPU_TS_ENABLE_MASK 0x00000080L
+#define IH_RB_CNTL__WPTR_WRITEBACK_ENABLE_MASK 0x00000100L
+#define IH_RB_CNTL__RB_FULL_DRAIN_ENABLE_MASK 0x00000200L
+#define IH_RB_CNTL__FULL_DRAIN_CLEAR_MASK 0x00000400L
+#define IH_RB_CNTL__PAGE_RB_CLEAR_MASK 0x00000800L
+#define IH_RB_CNTL__RB_USED_INT_THRESHOLD_MASK 0x0000F000L
+#define IH_RB_CNTL__WPTR_OVERFLOW_ENABLE_MASK 0x00010000L
+#define IH_RB_CNTL__ENABLE_INTR_MASK 0x00020000L
+#define IH_RB_CNTL__MC_SWAP_MASK 0x000C0000L
+#define IH_RB_CNTL__MC_SNOOP_MASK 0x00100000L
+#define IH_RB_CNTL__RPTR_REARM_MASK 0x00200000L
+#define IH_RB_CNTL__MC_RO_MASK 0x00400000L
+#define IH_RB_CNTL__MC_VMID_MASK 0x0F000000L
+#define IH_RB_CNTL__MC_SPACE_MASK 0x70000000L
+#define IH_RB_CNTL__WPTR_OVERFLOW_CLEAR_MASK 0x80000000L
+//IH_RB_BASE
+#define IH_RB_BASE__ADDR__SHIFT 0x0
+#define IH_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//IH_RB_BASE_HI
+#define IH_RB_BASE_HI__ADDR__SHIFT 0x0
+#define IH_RB_BASE_HI__ADDR_MASK 0x000000FFL
+//IH_RB_RPTR
+#define IH_RB_RPTR__OFFSET__SHIFT 0x2
+#define IH_RB_RPTR__OFFSET_MASK 0x0003FFFCL
+//IH_RB_WPTR
+#define IH_RB_WPTR__RB_OVERFLOW__SHIFT 0x0
+#define IH_RB_WPTR__OFFSET__SHIFT 0x2
+#define IH_RB_WPTR__RB_LEFT_NONE__SHIFT 0x12
+#define IH_RB_WPTR__RB_MAY_OVERFLOW__SHIFT 0x13
+#define IH_RB_WPTR__RB_OVERFLOW_MASK 0x00000001L
+#define IH_RB_WPTR__OFFSET_MASK 0x0003FFFCL
+#define IH_RB_WPTR__RB_LEFT_NONE_MASK 0x00040000L
+#define IH_RB_WPTR__RB_MAY_OVERFLOW_MASK 0x00080000L
+//IH_RB_WPTR_ADDR_HI
+#define IH_RB_WPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define IH_RB_WPTR_ADDR_HI__ADDR_MASK 0x0000FFFFL
+//IH_RB_WPTR_ADDR_LO
+#define IH_RB_WPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define IH_RB_WPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//IH_DOORBELL_RPTR
+#define IH_DOORBELL_RPTR__OFFSET__SHIFT 0x0
+#define IH_DOORBELL_RPTR__ENABLE__SHIFT 0x1c
+#define IH_DOORBELL_RPTR__OFFSET_MASK 0x03FFFFFFL
+#define IH_DOORBELL_RPTR__ENABLE_MASK 0x10000000L
+//IH_RB_CNTL_RING1
+#define IH_RB_CNTL_RING1__RB_ENABLE__SHIFT 0x0
+#define IH_RB_CNTL_RING1__RB_SIZE__SHIFT 0x1
+#define IH_RB_CNTL_RING1__RB_GPU_TS_ENABLE__SHIFT 0x7
+#define IH_RB_CNTL_RING1__RB_FULL_DRAIN_ENABLE__SHIFT 0x9
+#define IH_RB_CNTL_RING1__FULL_DRAIN_CLEAR__SHIFT 0xa
+#define IH_RB_CNTL_RING1__PAGE_RB_CLEAR__SHIFT 0xb
+#define IH_RB_CNTL_RING1__RB_USED_INT_THRESHOLD__SHIFT 0xc
+#define IH_RB_CNTL_RING1__WPTR_OVERFLOW_ENABLE__SHIFT 0x10
+#define IH_RB_CNTL_RING1__MC_SWAP__SHIFT 0x12
+#define IH_RB_CNTL_RING1__MC_SNOOP__SHIFT 0x14
+#define IH_RB_CNTL_RING1__MC_RO__SHIFT 0x16
+#define IH_RB_CNTL_RING1__MC_VMID__SHIFT 0x18
+#define IH_RB_CNTL_RING1__MC_SPACE__SHIFT 0x1c
+#define IH_RB_CNTL_RING1__WPTR_OVERFLOW_CLEAR__SHIFT 0x1f
+#define IH_RB_CNTL_RING1__RB_ENABLE_MASK 0x00000001L
+#define IH_RB_CNTL_RING1__RB_SIZE_MASK 0x0000003EL
+#define IH_RB_CNTL_RING1__RB_GPU_TS_ENABLE_MASK 0x00000080L
+#define IH_RB_CNTL_RING1__RB_FULL_DRAIN_ENABLE_MASK 0x00000200L
+#define IH_RB_CNTL_RING1__FULL_DRAIN_CLEAR_MASK 0x00000400L
+#define IH_RB_CNTL_RING1__PAGE_RB_CLEAR_MASK 0x00000800L
+#define IH_RB_CNTL_RING1__RB_USED_INT_THRESHOLD_MASK 0x0000F000L
+#define IH_RB_CNTL_RING1__WPTR_OVERFLOW_ENABLE_MASK 0x00010000L
+#define IH_RB_CNTL_RING1__MC_SWAP_MASK 0x000C0000L
+#define IH_RB_CNTL_RING1__MC_SNOOP_MASK 0x00100000L
+#define IH_RB_CNTL_RING1__MC_RO_MASK 0x00400000L
+#define IH_RB_CNTL_RING1__MC_VMID_MASK 0x0F000000L
+#define IH_RB_CNTL_RING1__MC_SPACE_MASK 0x70000000L
+#define IH_RB_CNTL_RING1__WPTR_OVERFLOW_CLEAR_MASK 0x80000000L
+//IH_RB_BASE_RING1
+#define IH_RB_BASE_RING1__ADDR__SHIFT 0x0
+#define IH_RB_BASE_RING1__ADDR_MASK 0xFFFFFFFFL
+//IH_RB_BASE_HI_RING1
+#define IH_RB_BASE_HI_RING1__ADDR__SHIFT 0x0
+#define IH_RB_BASE_HI_RING1__ADDR_MASK 0x000000FFL
+//IH_RB_RPTR_RING1
+#define IH_RB_RPTR_RING1__OFFSET__SHIFT 0x2
+#define IH_RB_RPTR_RING1__OFFSET_MASK 0x0003FFFCL
+//IH_RB_WPTR_RING1
+#define IH_RB_WPTR_RING1__RB_OVERFLOW__SHIFT 0x0
+#define IH_RB_WPTR_RING1__OFFSET__SHIFT 0x2
+#define IH_RB_WPTR_RING1__RB_LEFT_NONE__SHIFT 0x12
+#define IH_RB_WPTR_RING1__RB_MAY_OVERFLOW__SHIFT 0x13
+#define IH_RB_WPTR_RING1__RB_OVERFLOW_MASK 0x00000001L
+#define IH_RB_WPTR_RING1__OFFSET_MASK 0x0003FFFCL
+#define IH_RB_WPTR_RING1__RB_LEFT_NONE_MASK 0x00040000L
+#define IH_RB_WPTR_RING1__RB_MAY_OVERFLOW_MASK 0x00080000L
+//IH_DOORBELL_RPTR_RING1
+#define IH_DOORBELL_RPTR_RING1__OFFSET__SHIFT 0x0
+#define IH_DOORBELL_RPTR_RING1__ENABLE__SHIFT 0x1c
+#define IH_DOORBELL_RPTR_RING1__OFFSET_MASK 0x03FFFFFFL
+#define IH_DOORBELL_RPTR_RING1__ENABLE_MASK 0x10000000L
+//IH_RB_CNTL_RING2
+#define IH_RB_CNTL_RING2__RB_ENABLE__SHIFT 0x0
+#define IH_RB_CNTL_RING2__RB_SIZE__SHIFT 0x1
+#define IH_RB_CNTL_RING2__RB_GPU_TS_ENABLE__SHIFT 0x7
+#define IH_RB_CNTL_RING2__RB_FULL_DRAIN_ENABLE__SHIFT 0x9
+#define IH_RB_CNTL_RING2__FULL_DRAIN_CLEAR__SHIFT 0xa
+#define IH_RB_CNTL_RING2__PAGE_RB_CLEAR__SHIFT 0xb
+#define IH_RB_CNTL_RING2__RB_USED_INT_THRESHOLD__SHIFT 0xc
+#define IH_RB_CNTL_RING2__WPTR_OVERFLOW_ENABLE__SHIFT 0x10
+#define IH_RB_CNTL_RING2__MC_SWAP__SHIFT 0x12
+#define IH_RB_CNTL_RING2__MC_SNOOP__SHIFT 0x14
+#define IH_RB_CNTL_RING2__MC_RO__SHIFT 0x16
+#define IH_RB_CNTL_RING2__MC_VMID__SHIFT 0x18
+#define IH_RB_CNTL_RING2__MC_SPACE__SHIFT 0x1c
+#define IH_RB_CNTL_RING2__WPTR_OVERFLOW_CLEAR__SHIFT 0x1f
+#define IH_RB_CNTL_RING2__RB_ENABLE_MASK 0x00000001L
+#define IH_RB_CNTL_RING2__RB_SIZE_MASK 0x0000003EL
+#define IH_RB_CNTL_RING2__RB_GPU_TS_ENABLE_MASK 0x00000080L
+#define IH_RB_CNTL_RING2__RB_FULL_DRAIN_ENABLE_MASK 0x00000200L
+#define IH_RB_CNTL_RING2__FULL_DRAIN_CLEAR_MASK 0x00000400L
+#define IH_RB_CNTL_RING2__PAGE_RB_CLEAR_MASK 0x00000800L
+#define IH_RB_CNTL_RING2__RB_USED_INT_THRESHOLD_MASK 0x0000F000L
+#define IH_RB_CNTL_RING2__WPTR_OVERFLOW_ENABLE_MASK 0x00010000L
+#define IH_RB_CNTL_RING2__MC_SWAP_MASK 0x000C0000L
+#define IH_RB_CNTL_RING2__MC_SNOOP_MASK 0x00100000L
+#define IH_RB_CNTL_RING2__MC_RO_MASK 0x00400000L
+#define IH_RB_CNTL_RING2__MC_VMID_MASK 0x0F000000L
+#define IH_RB_CNTL_RING2__MC_SPACE_MASK 0x70000000L
+#define IH_RB_CNTL_RING2__WPTR_OVERFLOW_CLEAR_MASK 0x80000000L
+//IH_RB_BASE_RING2
+#define IH_RB_BASE_RING2__ADDR__SHIFT 0x0
+#define IH_RB_BASE_RING2__ADDR_MASK 0xFFFFFFFFL
+//IH_RB_BASE_HI_RING2
+#define IH_RB_BASE_HI_RING2__ADDR__SHIFT 0x0
+#define IH_RB_BASE_HI_RING2__ADDR_MASK 0x000000FFL
+//IH_RB_RPTR_RING2
+#define IH_RB_RPTR_RING2__OFFSET__SHIFT 0x2
+#define IH_RB_RPTR_RING2__OFFSET_MASK 0x0003FFFCL
+//IH_RB_WPTR_RING2
+#define IH_RB_WPTR_RING2__RB_OVERFLOW__SHIFT 0x0
+#define IH_RB_WPTR_RING2__OFFSET__SHIFT 0x2
+#define IH_RB_WPTR_RING2__RB_LEFT_NONE__SHIFT 0x12
+#define IH_RB_WPTR_RING2__RB_MAY_OVERFLOW__SHIFT 0x13
+#define IH_RB_WPTR_RING2__RB_OVERFLOW_MASK 0x00000001L
+#define IH_RB_WPTR_RING2__OFFSET_MASK 0x0003FFFCL
+#define IH_RB_WPTR_RING2__RB_LEFT_NONE_MASK 0x00040000L
+#define IH_RB_WPTR_RING2__RB_MAY_OVERFLOW_MASK 0x00080000L
+//IH_DOORBELL_RPTR_RING2
+#define IH_DOORBELL_RPTR_RING2__OFFSET__SHIFT 0x0
+#define IH_DOORBELL_RPTR_RING2__ENABLE__SHIFT 0x1c
+#define IH_DOORBELL_RPTR_RING2__OFFSET_MASK 0x03FFFFFFL
+#define IH_DOORBELL_RPTR_RING2__ENABLE_MASK 0x10000000L
+//IH_VERSION
+#define IH_VERSION__MINVER__SHIFT 0x0
+#define IH_VERSION__MAJVER__SHIFT 0x8
+#define IH_VERSION__REV__SHIFT 0x10
+#define IH_VERSION__MINVER_MASK 0x0000007FL
+#define IH_VERSION__MAJVER_MASK 0x00007F00L
+#define IH_VERSION__REV_MASK 0x003F0000L
+//IH_CNTL
+#define IH_CNTL__WPTR_WRITEBACK_TIMER__SHIFT 0x0
+#define IH_CNTL__IH_IDLE_HYSTERESIS_CNTL__SHIFT 0x6
+#define IH_CNTL__IH_FIFO_HIGHWATER__SHIFT 0x8
+#define IH_CNTL__MC_WR_CLEAN_CNT__SHIFT 0x14
+#define IH_CNTL__WPTR_WRITEBACK_TIMER_MASK 0x0000001FL
+#define IH_CNTL__IH_IDLE_HYSTERESIS_CNTL_MASK 0x000000C0L
+#define IH_CNTL__IH_FIFO_HIGHWATER_MASK 0x00007F00L
+#define IH_CNTL__MC_WR_CLEAN_CNT_MASK 0x01F00000L
+//IH_CNTL2
+#define IH_CNTL2__SELF_IV_FORCE_WPTR_UPDATE_TIMEOUT__SHIFT 0x0
+#define IH_CNTL2__SELF_IV_FORCE_WPTR_UPDATE_ENABLE__SHIFT 0x8
+#define IH_CNTL2__SELF_IV_FORCE_WPTR_UPDATE_TIMEOUT_MASK 0x0000001FL
+#define IH_CNTL2__SELF_IV_FORCE_WPTR_UPDATE_ENABLE_MASK 0x00000100L
+//IH_STATUS
+#define IH_STATUS__IDLE__SHIFT 0x0
+#define IH_STATUS__INPUT_IDLE__SHIFT 0x1
+#define IH_STATUS__BUFFER_IDLE__SHIFT 0x2
+#define IH_STATUS__RB_FULL__SHIFT 0x3
+#define IH_STATUS__RB_FULL_DRAIN__SHIFT 0x4
+#define IH_STATUS__RB_OVERFLOW__SHIFT 0x5
+#define IH_STATUS__MC_WR_IDLE__SHIFT 0x6
+#define IH_STATUS__MC_WR_STALL__SHIFT 0x7
+#define IH_STATUS__MC_WR_CLEAN_PENDING__SHIFT 0x8
+#define IH_STATUS__MC_WR_CLEAN_STALL__SHIFT 0x9
+#define IH_STATUS__BIF_INTERRUPT_LINE__SHIFT 0xa
+#define IH_STATUS__SWITCH_READY__SHIFT 0xb
+#define IH_STATUS__RB1_FULL__SHIFT 0xc
+#define IH_STATUS__RB1_FULL_DRAIN__SHIFT 0xd
+#define IH_STATUS__RB1_OVERFLOW__SHIFT 0xe
+#define IH_STATUS__RB2_FULL__SHIFT 0xf
+#define IH_STATUS__RB2_FULL_DRAIN__SHIFT 0x10
+#define IH_STATUS__RB2_OVERFLOW__SHIFT 0x11
+#define IH_STATUS__SELF_INT_GEN_IDLE__SHIFT 0x12
+#define IH_STATUS__IDLE_MASK 0x00000001L
+#define IH_STATUS__INPUT_IDLE_MASK 0x00000002L
+#define IH_STATUS__BUFFER_IDLE_MASK 0x00000004L
+#define IH_STATUS__RB_FULL_MASK 0x00000008L
+#define IH_STATUS__RB_FULL_DRAIN_MASK 0x00000010L
+#define IH_STATUS__RB_OVERFLOW_MASK 0x00000020L
+#define IH_STATUS__MC_WR_IDLE_MASK 0x00000040L
+#define IH_STATUS__MC_WR_STALL_MASK 0x00000080L
+#define IH_STATUS__MC_WR_CLEAN_PENDING_MASK 0x00000100L
+#define IH_STATUS__MC_WR_CLEAN_STALL_MASK 0x00000200L
+#define IH_STATUS__BIF_INTERRUPT_LINE_MASK 0x00000400L
+#define IH_STATUS__SWITCH_READY_MASK 0x00000800L
+#define IH_STATUS__RB1_FULL_MASK 0x00001000L
+#define IH_STATUS__RB1_FULL_DRAIN_MASK 0x00002000L
+#define IH_STATUS__RB1_OVERFLOW_MASK 0x00004000L
+#define IH_STATUS__RB2_FULL_MASK 0x00008000L
+#define IH_STATUS__RB2_FULL_DRAIN_MASK 0x00010000L
+#define IH_STATUS__RB2_OVERFLOW_MASK 0x00020000L
+#define IH_STATUS__SELF_INT_GEN_IDLE_MASK 0x00040000L
+//IH_PERFMON_CNTL
+#define IH_PERFMON_CNTL__ENABLE0__SHIFT 0x0
+#define IH_PERFMON_CNTL__CLEAR0__SHIFT 0x1
+#define IH_PERFMON_CNTL__PERF_SEL0__SHIFT 0x2
+#define IH_PERFMON_CNTL__ENABLE1__SHIFT 0x10
+#define IH_PERFMON_CNTL__CLEAR1__SHIFT 0x11
+#define IH_PERFMON_CNTL__PERF_SEL1__SHIFT 0x12
+#define IH_PERFMON_CNTL__ENABLE0_MASK 0x00000001L
+#define IH_PERFMON_CNTL__CLEAR0_MASK 0x00000002L
+#define IH_PERFMON_CNTL__PERF_SEL0_MASK 0x000007FCL
+#define IH_PERFMON_CNTL__ENABLE1_MASK 0x00010000L
+#define IH_PERFMON_CNTL__CLEAR1_MASK 0x00020000L
+#define IH_PERFMON_CNTL__PERF_SEL1_MASK 0x07FC0000L
+//IH_PERFCOUNTER0_RESULT
+#define IH_PERFCOUNTER0_RESULT__PERF_COUNT__SHIFT 0x0
+#define IH_PERFCOUNTER0_RESULT__PERF_COUNT_MASK 0xFFFFFFFFL
+//IH_PERFCOUNTER1_RESULT
+#define IH_PERFCOUNTER1_RESULT__PERF_COUNT__SHIFT 0x0
+#define IH_PERFCOUNTER1_RESULT__PERF_COUNT_MASK 0xFFFFFFFFL
+//IH_DSM_MATCH_VALUE_BIT_31_0
+#define IH_DSM_MATCH_VALUE_BIT_31_0__VALUE__SHIFT 0x0
+#define IH_DSM_MATCH_VALUE_BIT_31_0__VALUE_MASK 0xFFFFFFFFL
+//IH_DSM_MATCH_VALUE_BIT_63_32
+#define IH_DSM_MATCH_VALUE_BIT_63_32__VALUE__SHIFT 0x0
+#define IH_DSM_MATCH_VALUE_BIT_63_32__VALUE_MASK 0xFFFFFFFFL
+//IH_DSM_MATCH_VALUE_BIT_95_64
+#define IH_DSM_MATCH_VALUE_BIT_95_64__VALUE__SHIFT 0x0
+#define IH_DSM_MATCH_VALUE_BIT_95_64__VALUE_MASK 0xFFFFFFFFL
+//IH_DSM_MATCH_FIELD_CONTROL
+#define IH_DSM_MATCH_FIELD_CONTROL__SRC_EN__SHIFT 0x0
+#define IH_DSM_MATCH_FIELD_CONTROL__FCNID_EN__SHIFT 0x1
+#define IH_DSM_MATCH_FIELD_CONTROL__TIMESTAMP_EN__SHIFT 0x2
+#define IH_DSM_MATCH_FIELD_CONTROL__RINGID_EN__SHIFT 0x3
+#define IH_DSM_MATCH_FIELD_CONTROL__VMID_EN__SHIFT 0x4
+#define IH_DSM_MATCH_FIELD_CONTROL__PASID_EN__SHIFT 0x5
+#define IH_DSM_MATCH_FIELD_CONTROL__CLIENT_ID_EN__SHIFT 0x6
+#define IH_DSM_MATCH_FIELD_CONTROL__SRC_EN_MASK 0x00000001L
+#define IH_DSM_MATCH_FIELD_CONTROL__FCNID_EN_MASK 0x00000002L
+#define IH_DSM_MATCH_FIELD_CONTROL__TIMESTAMP_EN_MASK 0x00000004L
+#define IH_DSM_MATCH_FIELD_CONTROL__RINGID_EN_MASK 0x00000008L
+#define IH_DSM_MATCH_FIELD_CONTROL__VMID_EN_MASK 0x00000010L
+#define IH_DSM_MATCH_FIELD_CONTROL__PASID_EN_MASK 0x00000020L
+#define IH_DSM_MATCH_FIELD_CONTROL__CLIENT_ID_EN_MASK 0x00000040L
+//IH_DSM_MATCH_DATA_CONTROL
+#define IH_DSM_MATCH_DATA_CONTROL__VALUE__SHIFT 0x0
+#define IH_DSM_MATCH_DATA_CONTROL__VALUE_MASK 0x0FFFFFFFL
+//IH_DSM_MATCH_FCN_ID
+#define IH_DSM_MATCH_FCN_ID__PF_VF__SHIFT 0x0
+#define IH_DSM_MATCH_FCN_ID__VF_ID__SHIFT 0x1
+#define IH_DSM_MATCH_FCN_ID__PF_VF_MASK 0x00000001L
+#define IH_DSM_MATCH_FCN_ID__VF_ID_MASK 0x0000001EL
+//IH_LIMIT_INT_RATE_CNTL
+#define IH_LIMIT_INT_RATE_CNTL__LIMIT_ENABLE__SHIFT 0x0
+#define IH_LIMIT_INT_RATE_CNTL__PERF_INTERVAL__SHIFT 0x1
+#define IH_LIMIT_INT_RATE_CNTL__PERF_THRESHOLD__SHIFT 0x5
+#define IH_LIMIT_INT_RATE_CNTL__RETURN_DELAY__SHIFT 0x11
+#define IH_LIMIT_INT_RATE_CNTL__PERF_RESULT__SHIFT 0x15
+#define IH_LIMIT_INT_RATE_CNTL__LIMIT_ENABLE_MASK 0x00000001L
+#define IH_LIMIT_INT_RATE_CNTL__PERF_INTERVAL_MASK 0x0000001EL
+#define IH_LIMIT_INT_RATE_CNTL__PERF_THRESHOLD_MASK 0x0000FFE0L
+#define IH_LIMIT_INT_RATE_CNTL__RETURN_DELAY_MASK 0x001E0000L
+#define IH_LIMIT_INT_RATE_CNTL__PERF_RESULT_MASK 0xFFE00000L
+//IH_VF_RB_STATUS
+#define IH_VF_RB_STATUS__RB_FULL_DRAIN_VF__SHIFT 0x0
+#define IH_VF_RB_STATUS__RB_OVERFLOW_VF__SHIFT 0x10
+#define IH_VF_RB_STATUS__RB_FULL_DRAIN_VF_MASK 0x0000FFFFL
+#define IH_VF_RB_STATUS__RB_OVERFLOW_VF_MASK 0xFFFF0000L
+//IH_VF_RB_STATUS2
+#define IH_VF_RB_STATUS2__RB_FULL_VF__SHIFT 0x0
+#define IH_VF_RB_STATUS2__BIF_INTERRUPT_LINE_VF__SHIFT 0x10
+#define IH_VF_RB_STATUS2__RB_FULL_VF_MASK 0x0000FFFFL
+#define IH_VF_RB_STATUS2__BIF_INTERRUPT_LINE_VF_MASK 0xFFFF0000L
+//IH_VF_RB1_STATUS
+#define IH_VF_RB1_STATUS__RB_FULL_DRAIN_VF__SHIFT 0x0
+#define IH_VF_RB1_STATUS__RB_OVERFLOW_VF__SHIFT 0x10
+#define IH_VF_RB1_STATUS__RB_FULL_DRAIN_VF_MASK 0x0000FFFFL
+#define IH_VF_RB1_STATUS__RB_OVERFLOW_VF_MASK 0xFFFF0000L
+//IH_VF_RB1_STATUS2
+#define IH_VF_RB1_STATUS2__RB_FULL_VF__SHIFT 0x0
+#define IH_VF_RB1_STATUS2__RB_FULL_VF_MASK 0x0000FFFFL
+//IH_VF_RB2_STATUS
+#define IH_VF_RB2_STATUS__RB_FULL_DRAIN_VF__SHIFT 0x0
+#define IH_VF_RB2_STATUS__RB_OVERFLOW_VF__SHIFT 0x10
+#define IH_VF_RB2_STATUS__RB_FULL_DRAIN_VF_MASK 0x0000FFFFL
+#define IH_VF_RB2_STATUS__RB_OVERFLOW_VF_MASK 0xFFFF0000L
+//IH_VF_RB2_STATUS2
+#define IH_VF_RB2_STATUS2__RB_FULL_VF__SHIFT 0x0
+#define IH_VF_RB2_STATUS2__RB_FULL_VF_MASK 0x0000FFFFL
+//IH_INT_FLOOD_CNTL
+#define IH_INT_FLOOD_CNTL__HIGHWATER__SHIFT 0x0
+#define IH_INT_FLOOD_CNTL__FLOOD_CNTL_ENABLE__SHIFT 0x3
+#define IH_INT_FLOOD_CNTL__CLEAR_INT_FLOOD_STATUS__SHIFT 0x4
+#define IH_INT_FLOOD_CNTL__HIGHWATER_MASK 0x00000007L
+#define IH_INT_FLOOD_CNTL__FLOOD_CNTL_ENABLE_MASK 0x00000008L
+#define IH_INT_FLOOD_CNTL__CLEAR_INT_FLOOD_STATUS_MASK 0x00000010L
+//IH_RB0_INT_FLOOD_STATUS
+#define IH_RB0_INT_FLOOD_STATUS__RB_INT_DROPPED_VF__SHIFT 0x0
+#define IH_RB0_INT_FLOOD_STATUS__RB_INT_DROPPED__SHIFT 0x1f
+#define IH_RB0_INT_FLOOD_STATUS__RB_INT_DROPPED_VF_MASK 0x0000FFFFL
+#define IH_RB0_INT_FLOOD_STATUS__RB_INT_DROPPED_MASK 0x80000000L
+//IH_RB1_INT_FLOOD_STATUS
+#define IH_RB1_INT_FLOOD_STATUS__RB_INT_DROPPED_VF__SHIFT 0x0
+#define IH_RB1_INT_FLOOD_STATUS__RB_INT_DROPPED__SHIFT 0x1f
+#define IH_RB1_INT_FLOOD_STATUS__RB_INT_DROPPED_VF_MASK 0x0000FFFFL
+#define IH_RB1_INT_FLOOD_STATUS__RB_INT_DROPPED_MASK 0x80000000L
+//IH_RB2_INT_FLOOD_STATUS
+#define IH_RB2_INT_FLOOD_STATUS__RB_INT_DROPPED_VF__SHIFT 0x0
+#define IH_RB2_INT_FLOOD_STATUS__RB_INT_DROPPED__SHIFT 0x1f
+#define IH_RB2_INT_FLOOD_STATUS__RB_INT_DROPPED_VF_MASK 0x0000FFFFL
+#define IH_RB2_INT_FLOOD_STATUS__RB_INT_DROPPED_MASK 0x80000000L
+//IH_INT_FLOOD_STATUS
+#define IH_INT_FLOOD_STATUS__INT_DROP_CNT__SHIFT 0x0
+#define IH_INT_FLOOD_STATUS__FIRST_DROP_INT_CLIENT_ID__SHIFT 0x8
+#define IH_INT_FLOOD_STATUS__FIRST_DROP_INT_SOURCE_ID__SHIFT 0x10
+#define IH_INT_FLOOD_STATUS__FIRST_DROP_INT_VF_ID__SHIFT 0x18
+#define IH_INT_FLOOD_STATUS__FIRST_DROP_INT_VF__SHIFT 0x1c
+#define IH_INT_FLOOD_STATUS__INT_DROPPED__SHIFT 0x1e
+#define IH_INT_FLOOD_STATUS__INT_DROP_CNT_MASK 0x000000FFL
+#define IH_INT_FLOOD_STATUS__FIRST_DROP_INT_CLIENT_ID_MASK 0x0000FF00L
+#define IH_INT_FLOOD_STATUS__FIRST_DROP_INT_SOURCE_ID_MASK 0x00FF0000L
+#define IH_INT_FLOOD_STATUS__FIRST_DROP_INT_VF_ID_MASK 0x0F000000L
+#define IH_INT_FLOOD_STATUS__FIRST_DROP_INT_VF_MASK 0x10000000L
+#define IH_INT_FLOOD_STATUS__INT_DROPPED_MASK 0x40000000L
+//IH_STORM_CLIENT_LIST_CNTL
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT1_IS_STORM_CLIENT__SHIFT 0x1
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT2_IS_STORM_CLIENT__SHIFT 0x2
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT3_IS_STORM_CLIENT__SHIFT 0x3
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT4_IS_STORM_CLIENT__SHIFT 0x4
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT5_IS_STORM_CLIENT__SHIFT 0x5
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT6_IS_STORM_CLIENT__SHIFT 0x6
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT7_IS_STORM_CLIENT__SHIFT 0x7
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT8_IS_STORM_CLIENT__SHIFT 0x8
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT9_IS_STORM_CLIENT__SHIFT 0x9
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT10_IS_STORM_CLIENT__SHIFT 0xa
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT11_IS_STORM_CLIENT__SHIFT 0xb
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT12_IS_STORM_CLIENT__SHIFT 0xc
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT13_IS_STORM_CLIENT__SHIFT 0xd
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT14_IS_STORM_CLIENT__SHIFT 0xe
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT15_IS_STORM_CLIENT__SHIFT 0xf
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT16_IS_STORM_CLIENT__SHIFT 0x10
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT17_IS_STORM_CLIENT__SHIFT 0x11
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT18_IS_STORM_CLIENT__SHIFT 0x12
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT19_IS_STORM_CLIENT__SHIFT 0x13
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT20_IS_STORM_CLIENT__SHIFT 0x14
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT21_IS_STORM_CLIENT__SHIFT 0x15
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT22_IS_STORM_CLIENT__SHIFT 0x16
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT23_IS_STORM_CLIENT__SHIFT 0x17
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT24_IS_STORM_CLIENT__SHIFT 0x18
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT25_IS_STORM_CLIENT__SHIFT 0x19
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT26_IS_STORM_CLIENT__SHIFT 0x1a
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT27_IS_STORM_CLIENT__SHIFT 0x1b
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT28_IS_STORM_CLIENT__SHIFT 0x1c
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT29_IS_STORM_CLIENT__SHIFT 0x1d
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT30_IS_STORM_CLIENT__SHIFT 0x1e
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT31_IS_STORM_CLIENT__SHIFT 0x1f
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT1_IS_STORM_CLIENT_MASK 0x00000002L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT2_IS_STORM_CLIENT_MASK 0x00000004L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT3_IS_STORM_CLIENT_MASK 0x00000008L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT4_IS_STORM_CLIENT_MASK 0x00000010L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT5_IS_STORM_CLIENT_MASK 0x00000020L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT6_IS_STORM_CLIENT_MASK 0x00000040L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT7_IS_STORM_CLIENT_MASK 0x00000080L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT8_IS_STORM_CLIENT_MASK 0x00000100L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT9_IS_STORM_CLIENT_MASK 0x00000200L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT10_IS_STORM_CLIENT_MASK 0x00000400L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT11_IS_STORM_CLIENT_MASK 0x00000800L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT12_IS_STORM_CLIENT_MASK 0x00001000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT13_IS_STORM_CLIENT_MASK 0x00002000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT14_IS_STORM_CLIENT_MASK 0x00004000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT15_IS_STORM_CLIENT_MASK 0x00008000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT16_IS_STORM_CLIENT_MASK 0x00010000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT17_IS_STORM_CLIENT_MASK 0x00020000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT18_IS_STORM_CLIENT_MASK 0x00040000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT19_IS_STORM_CLIENT_MASK 0x00080000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT20_IS_STORM_CLIENT_MASK 0x00100000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT21_IS_STORM_CLIENT_MASK 0x00200000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT22_IS_STORM_CLIENT_MASK 0x00400000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT23_IS_STORM_CLIENT_MASK 0x00800000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT24_IS_STORM_CLIENT_MASK 0x01000000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT25_IS_STORM_CLIENT_MASK 0x02000000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT26_IS_STORM_CLIENT_MASK 0x04000000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT27_IS_STORM_CLIENT_MASK 0x08000000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT28_IS_STORM_CLIENT_MASK 0x10000000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT29_IS_STORM_CLIENT_MASK 0x20000000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT30_IS_STORM_CLIENT_MASK 0x40000000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT31_IS_STORM_CLIENT_MASK 0x80000000L
+//IH_CLK_CTRL
+#define IH_CLK_CTRL__IH_RETRY_INT_CAM_MEM_CLK_SOFT_OVERRIDE__SHIFT 0x19
+#define IH_CLK_CTRL__IH_BUFFER_MEM_CLK_SOFT_OVERRIDE__SHIFT 0x1a
+#define IH_CLK_CTRL__DBUS_MUX_CLK_SOFT_OVERRIDE__SHIFT 0x1b
+#define IH_CLK_CTRL__OSSSYS_SHARE_CLK_SOFT_OVERRIDE__SHIFT 0x1c
+#define IH_CLK_CTRL__LIMIT_SMN_CLK_SOFT_OVERRIDE__SHIFT 0x1d
+#define IH_CLK_CTRL__DYN_CLK_SOFT_OVERRIDE__SHIFT 0x1e
+#define IH_CLK_CTRL__REG_CLK_SOFT_OVERRIDE__SHIFT 0x1f
+#define IH_CLK_CTRL__IH_RETRY_INT_CAM_MEM_CLK_SOFT_OVERRIDE_MASK 0x02000000L
+#define IH_CLK_CTRL__IH_BUFFER_MEM_CLK_SOFT_OVERRIDE_MASK 0x04000000L
+#define IH_CLK_CTRL__DBUS_MUX_CLK_SOFT_OVERRIDE_MASK 0x08000000L
+#define IH_CLK_CTRL__OSSSYS_SHARE_CLK_SOFT_OVERRIDE_MASK 0x10000000L
+#define IH_CLK_CTRL__LIMIT_SMN_CLK_SOFT_OVERRIDE_MASK 0x20000000L
+#define IH_CLK_CTRL__DYN_CLK_SOFT_OVERRIDE_MASK 0x40000000L
+#define IH_CLK_CTRL__REG_CLK_SOFT_OVERRIDE_MASK 0x80000000L
+//IH_INT_FLAGS
+#define IH_INT_FLAGS__CLIENT_0_FLAG__SHIFT 0x0
+#define IH_INT_FLAGS__CLIENT_1_FLAG__SHIFT 0x1
+#define IH_INT_FLAGS__CLIENT_2_FLAG__SHIFT 0x2
+#define IH_INT_FLAGS__CLIENT_3_FLAG__SHIFT 0x3
+#define IH_INT_FLAGS__CLIENT_4_FLAG__SHIFT 0x4
+#define IH_INT_FLAGS__CLIENT_5_FLAG__SHIFT 0x5
+#define IH_INT_FLAGS__CLIENT_6_FLAG__SHIFT 0x6
+#define IH_INT_FLAGS__CLIENT_7_FLAG__SHIFT 0x7
+#define IH_INT_FLAGS__CLIENT_8_FLAG__SHIFT 0x8
+#define IH_INT_FLAGS__CLIENT_9_FLAG__SHIFT 0x9
+#define IH_INT_FLAGS__CLIENT_10_FLAG__SHIFT 0xa
+#define IH_INT_FLAGS__CLIENT_11_FLAG__SHIFT 0xb
+#define IH_INT_FLAGS__CLIENT_12_FLAG__SHIFT 0xc
+#define IH_INT_FLAGS__CLIENT_13_FLAG__SHIFT 0xd
+#define IH_INT_FLAGS__CLIENT_14_FLAG__SHIFT 0xe
+#define IH_INT_FLAGS__CLIENT_15_FLAG__SHIFT 0xf
+#define IH_INT_FLAGS__CLIENT_16_FLAG__SHIFT 0x10
+#define IH_INT_FLAGS__CLIENT_17_FLAG__SHIFT 0x11
+#define IH_INT_FLAGS__CLIENT_18_FLAG__SHIFT 0x12
+#define IH_INT_FLAGS__CLIENT_19_FLAG__SHIFT 0x13
+#define IH_INT_FLAGS__CLIENT_20_FLAG__SHIFT 0x14
+#define IH_INT_FLAGS__CLIENT_21_FLAG__SHIFT 0x15
+#define IH_INT_FLAGS__CLIENT_22_FLAG__SHIFT 0x16
+#define IH_INT_FLAGS__CLIENT_23_FLAG__SHIFT 0x17
+#define IH_INT_FLAGS__CLIENT_24_FLAG__SHIFT 0x18
+#define IH_INT_FLAGS__CLIENT_25_FLAG__SHIFT 0x19
+#define IH_INT_FLAGS__CLIENT_26_FLAG__SHIFT 0x1a
+#define IH_INT_FLAGS__CLIENT_27_FLAG__SHIFT 0x1b
+#define IH_INT_FLAGS__CLIENT_28_FLAG__SHIFT 0x1c
+#define IH_INT_FLAGS__CLIENT_29_FLAG__SHIFT 0x1d
+#define IH_INT_FLAGS__CLIENT_30_FLAG__SHIFT 0x1e
+#define IH_INT_FLAGS__CLIENT_31_FLAG__SHIFT 0x1f
+#define IH_INT_FLAGS__CLIENT_0_FLAG_MASK 0x00000001L
+#define IH_INT_FLAGS__CLIENT_1_FLAG_MASK 0x00000002L
+#define IH_INT_FLAGS__CLIENT_2_FLAG_MASK 0x00000004L
+#define IH_INT_FLAGS__CLIENT_3_FLAG_MASK 0x00000008L
+#define IH_INT_FLAGS__CLIENT_4_FLAG_MASK 0x00000010L
+#define IH_INT_FLAGS__CLIENT_5_FLAG_MASK 0x00000020L
+#define IH_INT_FLAGS__CLIENT_6_FLAG_MASK 0x00000040L
+#define IH_INT_FLAGS__CLIENT_7_FLAG_MASK 0x00000080L
+#define IH_INT_FLAGS__CLIENT_8_FLAG_MASK 0x00000100L
+#define IH_INT_FLAGS__CLIENT_9_FLAG_MASK 0x00000200L
+#define IH_INT_FLAGS__CLIENT_10_FLAG_MASK 0x00000400L
+#define IH_INT_FLAGS__CLIENT_11_FLAG_MASK 0x00000800L
+#define IH_INT_FLAGS__CLIENT_12_FLAG_MASK 0x00001000L
+#define IH_INT_FLAGS__CLIENT_13_FLAG_MASK 0x00002000L
+#define IH_INT_FLAGS__CLIENT_14_FLAG_MASK 0x00004000L
+#define IH_INT_FLAGS__CLIENT_15_FLAG_MASK 0x00008000L
+#define IH_INT_FLAGS__CLIENT_16_FLAG_MASK 0x00010000L
+#define IH_INT_FLAGS__CLIENT_17_FLAG_MASK 0x00020000L
+#define IH_INT_FLAGS__CLIENT_18_FLAG_MASK 0x00040000L
+#define IH_INT_FLAGS__CLIENT_19_FLAG_MASK 0x00080000L
+#define IH_INT_FLAGS__CLIENT_20_FLAG_MASK 0x00100000L
+#define IH_INT_FLAGS__CLIENT_21_FLAG_MASK 0x00200000L
+#define IH_INT_FLAGS__CLIENT_22_FLAG_MASK 0x00400000L
+#define IH_INT_FLAGS__CLIENT_23_FLAG_MASK 0x00800000L
+#define IH_INT_FLAGS__CLIENT_24_FLAG_MASK 0x01000000L
+#define IH_INT_FLAGS__CLIENT_25_FLAG_MASK 0x02000000L
+#define IH_INT_FLAGS__CLIENT_26_FLAG_MASK 0x04000000L
+#define IH_INT_FLAGS__CLIENT_27_FLAG_MASK 0x08000000L
+#define IH_INT_FLAGS__CLIENT_28_FLAG_MASK 0x10000000L
+#define IH_INT_FLAGS__CLIENT_29_FLAG_MASK 0x20000000L
+#define IH_INT_FLAGS__CLIENT_30_FLAG_MASK 0x40000000L
+#define IH_INT_FLAGS__CLIENT_31_FLAG_MASK 0x80000000L
+//IH_LAST_INT_INFO0
+#define IH_LAST_INT_INFO0__CLIENT_ID__SHIFT 0x0
+#define IH_LAST_INT_INFO0__SOURCE_ID__SHIFT 0x8
+#define IH_LAST_INT_INFO0__RING_ID__SHIFT 0x10
+#define IH_LAST_INT_INFO0__VM_ID__SHIFT 0x18
+#define IH_LAST_INT_INFO0__VMID_TYPE__SHIFT 0x1f
+#define IH_LAST_INT_INFO0__CLIENT_ID_MASK 0x000000FFL
+#define IH_LAST_INT_INFO0__SOURCE_ID_MASK 0x0000FF00L
+#define IH_LAST_INT_INFO0__RING_ID_MASK 0x00FF0000L
+#define IH_LAST_INT_INFO0__VM_ID_MASK 0x0F000000L
+#define IH_LAST_INT_INFO0__VMID_TYPE_MASK 0x80000000L
+//IH_LAST_INT_INFO1
+#define IH_LAST_INT_INFO1__CONTEXT_ID__SHIFT 0x0
+#define IH_LAST_INT_INFO1__CONTEXT_ID_MASK 0xFFFFFFFFL
+//IH_LAST_INT_INFO2
+#define IH_LAST_INT_INFO2__PAS_ID__SHIFT 0x0
+#define IH_LAST_INT_INFO2__VF_ID__SHIFT 0x10
+#define IH_LAST_INT_INFO2__VF__SHIFT 0x14
+#define IH_LAST_INT_INFO2__PAS_ID_MASK 0x0000FFFFL
+#define IH_LAST_INT_INFO2__VF_ID_MASK 0x000F0000L
+#define IH_LAST_INT_INFO2__VF_MASK 0x00100000L
+//IH_SCRATCH
+#define IH_SCRATCH__DATA__SHIFT 0x0
+#define IH_SCRATCH__DATA_MASK 0xFFFFFFFFL
+//IH_CLIENT_CREDIT_ERROR
+#define IH_CLIENT_CREDIT_ERROR__CLEAR__SHIFT 0x0
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_1_ERROR__SHIFT 0x1
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_2_ERROR__SHIFT 0x2
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_3_ERROR__SHIFT 0x3
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_4_ERROR__SHIFT 0x4
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_5_ERROR__SHIFT 0x5
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_6_ERROR__SHIFT 0x6
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_7_ERROR__SHIFT 0x7
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_8_ERROR__SHIFT 0x8
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_9_ERROR__SHIFT 0x9
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_10_ERROR__SHIFT 0xa
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_11_ERROR__SHIFT 0xb
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_12_ERROR__SHIFT 0xc
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_13_ERROR__SHIFT 0xd
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_14_ERROR__SHIFT 0xe
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_15_ERROR__SHIFT 0xf
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_16_ERROR__SHIFT 0x10
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_17_ERROR__SHIFT 0x11
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_18_ERROR__SHIFT 0x12
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_19_ERROR__SHIFT 0x13
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_20_ERROR__SHIFT 0x14
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_21_ERROR__SHIFT 0x15
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_22_ERROR__SHIFT 0x16
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_23_ERROR__SHIFT 0x17
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_24_ERROR__SHIFT 0x18
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_25_ERROR__SHIFT 0x19
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_26_ERROR__SHIFT 0x1a
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_27_ERROR__SHIFT 0x1b
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_28_ERROR__SHIFT 0x1c
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_29_ERROR__SHIFT 0x1d
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_30_ERROR__SHIFT 0x1e
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_31_ERROR__SHIFT 0x1f
+#define IH_CLIENT_CREDIT_ERROR__CLEAR_MASK 0x00000001L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_1_ERROR_MASK 0x00000002L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_2_ERROR_MASK 0x00000004L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_3_ERROR_MASK 0x00000008L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_4_ERROR_MASK 0x00000010L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_5_ERROR_MASK 0x00000020L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_6_ERROR_MASK 0x00000040L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_7_ERROR_MASK 0x00000080L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_8_ERROR_MASK 0x00000100L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_9_ERROR_MASK 0x00000200L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_10_ERROR_MASK 0x00000400L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_11_ERROR_MASK 0x00000800L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_12_ERROR_MASK 0x00001000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_13_ERROR_MASK 0x00002000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_14_ERROR_MASK 0x00004000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_15_ERROR_MASK 0x00008000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_16_ERROR_MASK 0x00010000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_17_ERROR_MASK 0x00020000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_18_ERROR_MASK 0x00040000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_19_ERROR_MASK 0x00080000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_20_ERROR_MASK 0x00100000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_21_ERROR_MASK 0x00200000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_22_ERROR_MASK 0x00400000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_23_ERROR_MASK 0x00800000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_24_ERROR_MASK 0x01000000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_25_ERROR_MASK 0x02000000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_26_ERROR_MASK 0x04000000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_27_ERROR_MASK 0x08000000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_28_ERROR_MASK 0x10000000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_29_ERROR_MASK 0x20000000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_30_ERROR_MASK 0x40000000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_31_ERROR_MASK 0x80000000L
+//IH_GPU_IOV_VIOLATION_LOG
+#define IH_GPU_IOV_VIOLATION_LOG__VIOLATION_STATUS__SHIFT 0x0
+#define IH_GPU_IOV_VIOLATION_LOG__MULTIPLE_VIOLATION_STATUS__SHIFT 0x1
+#define IH_GPU_IOV_VIOLATION_LOG__ADDRESS__SHIFT 0x2
+#define IH_GPU_IOV_VIOLATION_LOG__OPCODE__SHIFT 0x12
+#define IH_GPU_IOV_VIOLATION_LOG__VF__SHIFT 0x13
+#define IH_GPU_IOV_VIOLATION_LOG__VF_ID__SHIFT 0x14
+#define IH_GPU_IOV_VIOLATION_LOG__INITIATOR_ID__SHIFT 0x18
+#define IH_GPU_IOV_VIOLATION_LOG__VIOLATION_STATUS_MASK 0x00000001L
+#define IH_GPU_IOV_VIOLATION_LOG__MULTIPLE_VIOLATION_STATUS_MASK 0x00000002L
+#define IH_GPU_IOV_VIOLATION_LOG__ADDRESS_MASK 0x0003FFFCL
+#define IH_GPU_IOV_VIOLATION_LOG__OPCODE_MASK 0x00040000L
+#define IH_GPU_IOV_VIOLATION_LOG__VF_MASK 0x00080000L
+#define IH_GPU_IOV_VIOLATION_LOG__VF_ID_MASK 0x00F00000L
+#define IH_GPU_IOV_VIOLATION_LOG__INITIATOR_ID_MASK 0xFF000000L
+//IH_COOKIE_REC_VIOLATION_LOG
+#define IH_COOKIE_REC_VIOLATION_LOG__VIOLATION_STATUS__SHIFT 0x0
+#define IH_COOKIE_REC_VIOLATION_LOG__CLIENT_ID__SHIFT 0x10
+#define IH_COOKIE_REC_VIOLATION_LOG__INITIATOR_ID__SHIFT 0x18
+#define IH_COOKIE_REC_VIOLATION_LOG__VIOLATION_STATUS_MASK 0x00000001L
+#define IH_COOKIE_REC_VIOLATION_LOG__CLIENT_ID_MASK 0x00FF0000L
+#define IH_COOKIE_REC_VIOLATION_LOG__INITIATOR_ID_MASK 0xFF000000L
+//IH_CREDIT_STATUS
+#define IH_CREDIT_STATUS__CLIENT_1_CREDIT_RETURNED__SHIFT 0x1
+#define IH_CREDIT_STATUS__CLIENT_2_CREDIT_RETURNED__SHIFT 0x2
+#define IH_CREDIT_STATUS__CLIENT_3_CREDIT_RETURNED__SHIFT 0x3
+#define IH_CREDIT_STATUS__CLIENT_4_CREDIT_RETURNED__SHIFT 0x4
+#define IH_CREDIT_STATUS__CLIENT_5_CREDIT_RETURNED__SHIFT 0x5
+#define IH_CREDIT_STATUS__CLIENT_6_CREDIT_RETURNED__SHIFT 0x6
+#define IH_CREDIT_STATUS__CLIENT_7_CREDIT_RETURNED__SHIFT 0x7
+#define IH_CREDIT_STATUS__CLIENT_8_CREDIT_RETURNED__SHIFT 0x8
+#define IH_CREDIT_STATUS__CLIENT_9_CREDIT_RETURNED__SHIFT 0x9
+#define IH_CREDIT_STATUS__CLIENT_10_CREDIT_RETURNED__SHIFT 0xa
+#define IH_CREDIT_STATUS__CLIENT_11_CREDIT_RETURNED__SHIFT 0xb
+#define IH_CREDIT_STATUS__CLIENT_12_CREDIT_RETURNED__SHIFT 0xc
+#define IH_CREDIT_STATUS__CLIENT_13_CREDIT_RETURNED__SHIFT 0xd
+#define IH_CREDIT_STATUS__CLIENT_14_CREDIT_RETURNED__SHIFT 0xe
+#define IH_CREDIT_STATUS__CLIENT_15_CREDIT_RETURNED__SHIFT 0xf
+#define IH_CREDIT_STATUS__CLIENT_16_CREDIT_RETURNED__SHIFT 0x10
+#define IH_CREDIT_STATUS__CLIENT_17_CREDIT_RETURNED__SHIFT 0x11
+#define IH_CREDIT_STATUS__CLIENT_18_CREDIT_RETURNED__SHIFT 0x12
+#define IH_CREDIT_STATUS__CLIENT_19_CREDIT_RETURNED__SHIFT 0x13
+#define IH_CREDIT_STATUS__CLIENT_20_CREDIT_RETURNED__SHIFT 0x14
+#define IH_CREDIT_STATUS__CLIENT_21_CREDIT_RETURNED__SHIFT 0x15
+#define IH_CREDIT_STATUS__CLIENT_22_CREDIT_RETURNED__SHIFT 0x16
+#define IH_CREDIT_STATUS__CLIENT_23_CREDIT_RETURNED__SHIFT 0x17
+#define IH_CREDIT_STATUS__CLIENT_24_CREDIT_RETURNED__SHIFT 0x18
+#define IH_CREDIT_STATUS__CLIENT_25_CREDIT_RETURNED__SHIFT 0x19
+#define IH_CREDIT_STATUS__CLIENT_26_CREDIT_RETURNED__SHIFT 0x1a
+#define IH_CREDIT_STATUS__CLIENT_27_CREDIT_RETURNED__SHIFT 0x1b
+#define IH_CREDIT_STATUS__CLIENT_28_CREDIT_RETURNED__SHIFT 0x1c
+#define IH_CREDIT_STATUS__CLIENT_29_CREDIT_RETURNED__SHIFT 0x1d
+#define IH_CREDIT_STATUS__CLIENT_30_CREDIT_RETURNED__SHIFT 0x1e
+#define IH_CREDIT_STATUS__CLIENT_31_CREDIT_RETURNED__SHIFT 0x1f
+#define IH_CREDIT_STATUS__CLIENT_1_CREDIT_RETURNED_MASK 0x00000002L
+#define IH_CREDIT_STATUS__CLIENT_2_CREDIT_RETURNED_MASK 0x00000004L
+#define IH_CREDIT_STATUS__CLIENT_3_CREDIT_RETURNED_MASK 0x00000008L
+#define IH_CREDIT_STATUS__CLIENT_4_CREDIT_RETURNED_MASK 0x00000010L
+#define IH_CREDIT_STATUS__CLIENT_5_CREDIT_RETURNED_MASK 0x00000020L
+#define IH_CREDIT_STATUS__CLIENT_6_CREDIT_RETURNED_MASK 0x00000040L
+#define IH_CREDIT_STATUS__CLIENT_7_CREDIT_RETURNED_MASK 0x00000080L
+#define IH_CREDIT_STATUS__CLIENT_8_CREDIT_RETURNED_MASK 0x00000100L
+#define IH_CREDIT_STATUS__CLIENT_9_CREDIT_RETURNED_MASK 0x00000200L
+#define IH_CREDIT_STATUS__CLIENT_10_CREDIT_RETURNED_MASK 0x00000400L
+#define IH_CREDIT_STATUS__CLIENT_11_CREDIT_RETURNED_MASK 0x00000800L
+#define IH_CREDIT_STATUS__CLIENT_12_CREDIT_RETURNED_MASK 0x00001000L
+#define IH_CREDIT_STATUS__CLIENT_13_CREDIT_RETURNED_MASK 0x00002000L
+#define IH_CREDIT_STATUS__CLIENT_14_CREDIT_RETURNED_MASK 0x00004000L
+#define IH_CREDIT_STATUS__CLIENT_15_CREDIT_RETURNED_MASK 0x00008000L
+#define IH_CREDIT_STATUS__CLIENT_16_CREDIT_RETURNED_MASK 0x00010000L
+#define IH_CREDIT_STATUS__CLIENT_17_CREDIT_RETURNED_MASK 0x00020000L
+#define IH_CREDIT_STATUS__CLIENT_18_CREDIT_RETURNED_MASK 0x00040000L
+#define IH_CREDIT_STATUS__CLIENT_19_CREDIT_RETURNED_MASK 0x00080000L
+#define IH_CREDIT_STATUS__CLIENT_20_CREDIT_RETURNED_MASK 0x00100000L
+#define IH_CREDIT_STATUS__CLIENT_21_CREDIT_RETURNED_MASK 0x00200000L
+#define IH_CREDIT_STATUS__CLIENT_22_CREDIT_RETURNED_MASK 0x00400000L
+#define IH_CREDIT_STATUS__CLIENT_23_CREDIT_RETURNED_MASK 0x00800000L
+#define IH_CREDIT_STATUS__CLIENT_24_CREDIT_RETURNED_MASK 0x01000000L
+#define IH_CREDIT_STATUS__CLIENT_25_CREDIT_RETURNED_MASK 0x02000000L
+#define IH_CREDIT_STATUS__CLIENT_26_CREDIT_RETURNED_MASK 0x04000000L
+#define IH_CREDIT_STATUS__CLIENT_27_CREDIT_RETURNED_MASK 0x08000000L
+#define IH_CREDIT_STATUS__CLIENT_28_CREDIT_RETURNED_MASK 0x10000000L
+#define IH_CREDIT_STATUS__CLIENT_29_CREDIT_RETURNED_MASK 0x20000000L
+#define IH_CREDIT_STATUS__CLIENT_30_CREDIT_RETURNED_MASK 0x40000000L
+#define IH_CREDIT_STATUS__CLIENT_31_CREDIT_RETURNED_MASK 0x80000000L
+//IH_MMHUB_ERROR
+#define IH_MMHUB_ERROR__IH_BRESP_01__SHIFT 0x1
+#define IH_MMHUB_ERROR__IH_BRESP_10__SHIFT 0x2
+#define IH_MMHUB_ERROR__IH_BRESP_11__SHIFT 0x3
+#define IH_MMHUB_ERROR__IH_BUSER_NACK_01__SHIFT 0x5
+#define IH_MMHUB_ERROR__IH_BUSER_NACK_10__SHIFT 0x6
+#define IH_MMHUB_ERROR__IH_BUSER_NACK_11__SHIFT 0x7
+#define IH_MMHUB_ERROR__IH_BRESP_01_MASK 0x00000002L
+#define IH_MMHUB_ERROR__IH_BRESP_10_MASK 0x00000004L
+#define IH_MMHUB_ERROR__IH_BRESP_11_MASK 0x00000008L
+#define IH_MMHUB_ERROR__IH_BUSER_NACK_01_MASK 0x00000020L
+#define IH_MMHUB_ERROR__IH_BUSER_NACK_10_MASK 0x00000040L
+#define IH_MMHUB_ERROR__IH_BUSER_NACK_11_MASK 0x00000080L
+//IH_MEM_POWER_CTRL
+#define IH_MEM_POWER_CTRL__IH_BUFFER_MEM_POWER_CTRL_EN__SHIFT 0x0
+#define IH_MEM_POWER_CTRL__IH_BUFFER_MEM_POWER_LS_EN__SHIFT 0x1
+#define IH_MEM_POWER_CTRL__IH_BUFFER_MEM_POWER_DS_EN__SHIFT 0x2
+#define IH_MEM_POWER_CTRL__IH_BUFFER_MEM_POWER_SD_EN__SHIFT 0x3
+#define IH_MEM_POWER_CTRL__IH_BUFFER_MEM_IDLE_HYSTERESIS__SHIFT 0x4
+#define IH_MEM_POWER_CTRL__IH_BUFFER_MEM_POWER_UP_RECOVER_DELAY__SHIFT 0x8
+#define IH_MEM_POWER_CTRL__IH_BUFFER_MEM_POWER_DOWN_LS_ENTER_DELAY__SHIFT 0xe
+#define IH_MEM_POWER_CTRL__IH_BUFFER_MEM_POWER_CTRL_EN_MASK 0x00000001L
+#define IH_MEM_POWER_CTRL__IH_BUFFER_MEM_POWER_LS_EN_MASK 0x00000002L
+#define IH_MEM_POWER_CTRL__IH_BUFFER_MEM_POWER_DS_EN_MASK 0x00000004L
+#define IH_MEM_POWER_CTRL__IH_BUFFER_MEM_POWER_SD_EN_MASK 0x00000008L
+#define IH_MEM_POWER_CTRL__IH_BUFFER_MEM_IDLE_HYSTERESIS_MASK 0x00000070L
+#define IH_MEM_POWER_CTRL__IH_BUFFER_MEM_POWER_UP_RECOVER_DELAY_MASK 0x00003F00L
+#define IH_MEM_POWER_CTRL__IH_BUFFER_MEM_POWER_DOWN_LS_ENTER_DELAY_MASK 0x0000C000L
+//IH_REGISTER_LAST_PART2
+#define IH_REGISTER_LAST_PART2__RESERVED__SHIFT 0x0
+#define IH_REGISTER_LAST_PART2__RESERVED_MASK 0xFFFFFFFFL
+//SEM_CLK_CTRL
+#define SEM_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define SEM_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define SEM_CLK_CTRL__RESERVED__SHIFT 0xc
+#define SEM_CLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x18
+#define SEM_CLK_CTRL__SOFT_OVERRIDE6__SHIFT 0x19
+#define SEM_CLK_CTRL__MEM_CLK_SOFT_OVERRIDE__SHIFT 0x1a
+#define SEM_CLK_CTRL__SOFT_OVERRIDE4__SHIFT 0x1b
+#define SEM_CLK_CTRL__SOFT_OVERRIDE3__SHIFT 0x1c
+#define SEM_CLK_CTRL__SOFT_OVERRIDE2__SHIFT 0x1d
+#define SEM_CLK_CTRL__DYN_CLK_SOFT_OVERRIDE__SHIFT 0x1e
+#define SEM_CLK_CTRL__REG_CLK_SOFT_OVERRIDE__SHIFT 0x1f
+#define SEM_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define SEM_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define SEM_CLK_CTRL__RESERVED_MASK 0x00FFF000L
+#define SEM_CLK_CTRL__SOFT_OVERRIDE7_MASK 0x01000000L
+#define SEM_CLK_CTRL__SOFT_OVERRIDE6_MASK 0x02000000L
+#define SEM_CLK_CTRL__MEM_CLK_SOFT_OVERRIDE_MASK 0x04000000L
+#define SEM_CLK_CTRL__SOFT_OVERRIDE4_MASK 0x08000000L
+#define SEM_CLK_CTRL__SOFT_OVERRIDE3_MASK 0x10000000L
+#define SEM_CLK_CTRL__SOFT_OVERRIDE2_MASK 0x20000000L
+#define SEM_CLK_CTRL__DYN_CLK_SOFT_OVERRIDE_MASK 0x40000000L
+#define SEM_CLK_CTRL__REG_CLK_SOFT_OVERRIDE_MASK 0x80000000L
+//SEM_UTC_CREDIT
+#define SEM_UTC_CREDIT__UTCL2_CREDIT__SHIFT 0x0
+#define SEM_UTC_CREDIT__WATERMARK__SHIFT 0x8
+#define SEM_UTC_CREDIT__UTCL2_CREDIT_MASK 0x0000001FL
+#define SEM_UTC_CREDIT__WATERMARK_MASK 0x00000F00L
+//SEM_UTC_CONFIG
+#define SEM_UTC_CONFIG__USE_MTYPE__SHIFT 0x0
+#define SEM_UTC_CONFIG__FORCE_SNOOP__SHIFT 0x3
+#define SEM_UTC_CONFIG__FORCE_GCC__SHIFT 0x4
+#define SEM_UTC_CONFIG__USE_PT_SNOOP__SHIFT 0x5
+#define SEM_UTC_CONFIG__USE_MTYPE_MASK 0x00000007L
+#define SEM_UTC_CONFIG__FORCE_SNOOP_MASK 0x00000008L
+#define SEM_UTC_CONFIG__FORCE_GCC_MASK 0x00000010L
+#define SEM_UTC_CONFIG__USE_PT_SNOOP_MASK 0x00000020L
+//SEM_UTCL2_TRAN_EN_LUT
+#define SEM_UTCL2_TRAN_EN_LUT__SDMA0_UTCL2_EN__SHIFT 0x0
+#define SEM_UTCL2_TRAN_EN_LUT__SDMA1_UTCL2_EN__SHIFT 0x1
+#define SEM_UTCL2_TRAN_EN_LUT__UVD_UTCL2_EN__SHIFT 0x2
+#define SEM_UTCL2_TRAN_EN_LUT__VCE0_UTCL2_EN__SHIFT 0x3
+#define SEM_UTCL2_TRAN_EN_LUT__ACP_UTCL2_EN__SHIFT 0x4
+#define SEM_UTCL2_TRAN_EN_LUT__ISP_UTCL2_EN__SHIFT 0x5
+#define SEM_UTCL2_TRAN_EN_LUT__VCE1_UTCL2_EN__SHIFT 0x6
+#define SEM_UTCL2_TRAN_EN_LUT__VP8_UTCL2_EN__SHIFT 0x7
+#define SEM_UTCL2_TRAN_EN_LUT__UVD1_UTCL2_EN__SHIFT 0x8
+#define SEM_UTCL2_TRAN_EN_LUT__RESERVED__SHIFT 0x9
+#define SEM_UTCL2_TRAN_EN_LUT__CP_UTCL2_EN__SHIFT 0x1f
+#define SEM_UTCL2_TRAN_EN_LUT__SDMA0_UTCL2_EN_MASK 0x00000001L
+#define SEM_UTCL2_TRAN_EN_LUT__SDMA1_UTCL2_EN_MASK 0x00000002L
+#define SEM_UTCL2_TRAN_EN_LUT__UVD_UTCL2_EN_MASK 0x00000004L
+#define SEM_UTCL2_TRAN_EN_LUT__VCE0_UTCL2_EN_MASK 0x00000008L
+#define SEM_UTCL2_TRAN_EN_LUT__ACP_UTCL2_EN_MASK 0x00000010L
+#define SEM_UTCL2_TRAN_EN_LUT__ISP_UTCL2_EN_MASK 0x00000020L
+#define SEM_UTCL2_TRAN_EN_LUT__VCE1_UTCL2_EN_MASK 0x00000040L
+#define SEM_UTCL2_TRAN_EN_LUT__VP8_UTCL2_EN_MASK 0x00000080L
+#define SEM_UTCL2_TRAN_EN_LUT__UVD1_UTCL2_EN_MASK 0x00000100L
+#define SEM_UTCL2_TRAN_EN_LUT__RESERVED_MASK 0x7FFFFE00L
+#define SEM_UTCL2_TRAN_EN_LUT__CP_UTCL2_EN_MASK 0x80000000L
+//SEM_MCIF_CONFIG
+#define SEM_MCIF_CONFIG__MC_REQ_SWAP__SHIFT 0x0
+#define SEM_MCIF_CONFIG__MC_WRREQ_CREDIT__SHIFT 0x2
+#define SEM_MCIF_CONFIG__MC_RDREQ_CREDIT__SHIFT 0x8
+#define SEM_MCIF_CONFIG__MC_REQ_SWAP_MASK 0x00000003L
+#define SEM_MCIF_CONFIG__MC_WRREQ_CREDIT_MASK 0x000000FCL
+#define SEM_MCIF_CONFIG__MC_RDREQ_CREDIT_MASK 0x00003F00L
+//SEM_PERFMON_CNTL
+#define SEM_PERFMON_CNTL__PERF_ENABLE0__SHIFT 0x0
+#define SEM_PERFMON_CNTL__PERF_CLEAR0__SHIFT 0x1
+#define SEM_PERFMON_CNTL__PERF_SEL0__SHIFT 0x2
+#define SEM_PERFMON_CNTL__PERF_ENABLE1__SHIFT 0xa
+#define SEM_PERFMON_CNTL__PERF_CLEAR1__SHIFT 0xb
+#define SEM_PERFMON_CNTL__PERF_SEL1__SHIFT 0xc
+#define SEM_PERFMON_CNTL__PERF_ENABLE0_MASK 0x00000001L
+#define SEM_PERFMON_CNTL__PERF_CLEAR0_MASK 0x00000002L
+#define SEM_PERFMON_CNTL__PERF_SEL0_MASK 0x000003FCL
+#define SEM_PERFMON_CNTL__PERF_ENABLE1_MASK 0x00000400L
+#define SEM_PERFMON_CNTL__PERF_CLEAR1_MASK 0x00000800L
+#define SEM_PERFMON_CNTL__PERF_SEL1_MASK 0x000FF000L
+//SEM_PERFCOUNTER0_RESULT
+#define SEM_PERFCOUNTER0_RESULT__PERF_COUNT__SHIFT 0x0
+#define SEM_PERFCOUNTER0_RESULT__PERF_COUNT_MASK 0xFFFFFFFFL
+//SEM_PERFCOUNTER1_RESULT
+#define SEM_PERFCOUNTER1_RESULT__PERF_COUNT__SHIFT 0x0
+#define SEM_PERFCOUNTER1_RESULT__PERF_COUNT_MASK 0xFFFFFFFFL
+//SEM_STATUS
+#define SEM_STATUS__SEM_IDLE__SHIFT 0x0
+#define SEM_STATUS__SEM_INTERNAL_IDLE__SHIFT 0x1
+#define SEM_STATUS__MC_RDREQ_FIFO_FULL__SHIFT 0x2
+#define SEM_STATUS__MC_WRREQ_FIFO_FULL__SHIFT 0x3
+#define SEM_STATUS__WRITE1_FIFO_FULL__SHIFT 0x4
+#define SEM_STATUS__CHECK0_FIFO_FULL__SHIFT 0x5
+#define SEM_STATUS__MC_RDREQ_PENDING__SHIFT 0x6
+#define SEM_STATUS__MC_WRREQ_PENDING__SHIFT 0x7
+#define SEM_STATUS__SDMA0_MAILBOX_PENDING__SHIFT 0x8
+#define SEM_STATUS__SDMA1_MAILBOX_PENDING__SHIFT 0x9
+#define SEM_STATUS__UVD_MAILBOX_PENDING__SHIFT 0xa
+#define SEM_STATUS__VCE_MAILBOX_PENDING__SHIFT 0xb
+#define SEM_STATUS__CPG1_MAILBOX_PENDING__SHIFT 0xc
+#define SEM_STATUS__CPG2_MAILBOX_PENDING__SHIFT 0xd
+#define SEM_STATUS__VCE1_MAILBOX_PENDING__SHIFT 0xe
+#define SEM_STATUS__ATC_REQ_PENDING__SHIFT 0xf
+#define SEM_STATUS__OUTSTANDING_CLEAN__SHIFT 0x10
+#define SEM_STATUS__INVREQ_FLUSH_VF_MISMATCH__SHIFT 0x11
+#define SEM_STATUS__INVREQ_NONFLUSH_VF_MISMATCH__SHIFT 0x12
+#define SEM_STATUS__INVREQ_CNT_IDLE__SHIFT 0x13
+#define SEM_STATUS__ENTRYLIST_IDLE__SHIFT 0x14
+#define SEM_STATUS__MIF_IDLE__SHIFT 0x15
+#define SEM_STATUS__REGISTER_IDLE__SHIFT 0x16
+#define SEM_STATUS__ATCL2_INVREQ_IDLE__SHIFT 0x17
+#define SEM_STATUS__UVD1_MAILBOX_PENDING__SHIFT 0x18
+#define SEM_STATUS__SWITCH_READY__SHIFT 0x1f
+#define SEM_STATUS__SEM_IDLE_MASK 0x00000001L
+#define SEM_STATUS__SEM_INTERNAL_IDLE_MASK 0x00000002L
+#define SEM_STATUS__MC_RDREQ_FIFO_FULL_MASK 0x00000004L
+#define SEM_STATUS__MC_WRREQ_FIFO_FULL_MASK 0x00000008L
+#define SEM_STATUS__WRITE1_FIFO_FULL_MASK 0x00000010L
+#define SEM_STATUS__CHECK0_FIFO_FULL_MASK 0x00000020L
+#define SEM_STATUS__MC_RDREQ_PENDING_MASK 0x00000040L
+#define SEM_STATUS__MC_WRREQ_PENDING_MASK 0x00000080L
+#define SEM_STATUS__SDMA0_MAILBOX_PENDING_MASK 0x00000100L
+#define SEM_STATUS__SDMA1_MAILBOX_PENDING_MASK 0x00000200L
+#define SEM_STATUS__UVD_MAILBOX_PENDING_MASK 0x00000400L
+#define SEM_STATUS__VCE_MAILBOX_PENDING_MASK 0x00000800L
+#define SEM_STATUS__CPG1_MAILBOX_PENDING_MASK 0x00001000L
+#define SEM_STATUS__CPG2_MAILBOX_PENDING_MASK 0x00002000L
+#define SEM_STATUS__VCE1_MAILBOX_PENDING_MASK 0x00004000L
+#define SEM_STATUS__ATC_REQ_PENDING_MASK 0x00008000L
+#define SEM_STATUS__OUTSTANDING_CLEAN_MASK 0x00010000L
+#define SEM_STATUS__INVREQ_FLUSH_VF_MISMATCH_MASK 0x00020000L
+#define SEM_STATUS__INVREQ_NONFLUSH_VF_MISMATCH_MASK 0x00040000L
+#define SEM_STATUS__INVREQ_CNT_IDLE_MASK 0x00080000L
+#define SEM_STATUS__ENTRYLIST_IDLE_MASK 0x00100000L
+#define SEM_STATUS__MIF_IDLE_MASK 0x00200000L
+#define SEM_STATUS__REGISTER_IDLE_MASK 0x00400000L
+#define SEM_STATUS__ATCL2_INVREQ_IDLE_MASK 0x00800000L
+#define SEM_STATUS__UVD1_MAILBOX_PENDING_MASK 0x01000000L
+#define SEM_STATUS__SWITCH_READY_MASK 0x80000000L
+//SEM_MAILBOX_CLIENTCONFIG
+#define SEM_MAILBOX_CLIENTCONFIG__CP_CLIENT0__SHIFT 0x0
+#define SEM_MAILBOX_CLIENTCONFIG__CP_CLIENT1__SHIFT 0x3
+#define SEM_MAILBOX_CLIENTCONFIG__CP_CLIENT2__SHIFT 0x6
+#define SEM_MAILBOX_CLIENTCONFIG__CP_CLIENT3__SHIFT 0x9
+#define SEM_MAILBOX_CLIENTCONFIG__SDMA_CLIENT0__SHIFT 0xc
+#define SEM_MAILBOX_CLIENTCONFIG__UVD_CLIENT0__SHIFT 0xf
+#define SEM_MAILBOX_CLIENTCONFIG__SDMA1_CLIENT0__SHIFT 0x12
+#define SEM_MAILBOX_CLIENTCONFIG__VCE_CLIENT0__SHIFT 0x15
+#define SEM_MAILBOX_CLIENTCONFIG__CP_CLIENT0_MASK 0x00000007L
+#define SEM_MAILBOX_CLIENTCONFIG__CP_CLIENT1_MASK 0x00000038L
+#define SEM_MAILBOX_CLIENTCONFIG__CP_CLIENT2_MASK 0x000001C0L
+#define SEM_MAILBOX_CLIENTCONFIG__CP_CLIENT3_MASK 0x00000E00L
+#define SEM_MAILBOX_CLIENTCONFIG__SDMA_CLIENT0_MASK 0x00007000L
+#define SEM_MAILBOX_CLIENTCONFIG__UVD_CLIENT0_MASK 0x00038000L
+#define SEM_MAILBOX_CLIENTCONFIG__SDMA1_CLIENT0_MASK 0x001C0000L
+#define SEM_MAILBOX_CLIENTCONFIG__VCE_CLIENT0_MASK 0x00E00000L
+//SEM_MAILBOX
+#define SEM_MAILBOX__HOSTPORT__SHIFT 0x0
+#define SEM_MAILBOX__RESERVED__SHIFT 0x10
+#define SEM_MAILBOX__HOSTPORT_MASK 0x0000FFFFL
+#define SEM_MAILBOX__RESERVED_MASK 0xFFFF0000L
+//SEM_MAILBOX_CONTROL
+#define SEM_MAILBOX_CONTROL__HOSTPORT_ENABLE__SHIFT 0x0
+#define SEM_MAILBOX_CONTROL__RESERVED__SHIFT 0x10
+#define SEM_MAILBOX_CONTROL__HOSTPORT_ENABLE_MASK 0x0000FFFFL
+#define SEM_MAILBOX_CONTROL__RESERVED_MASK 0xFFFF0000L
+//SEM_CHICKEN_BITS
+#define SEM_CHICKEN_BITS__VMID_PIPELINE_EN__SHIFT 0x0
+#define SEM_CHICKEN_BITS__ENTRY_PIPELINE_EN__SHIFT 0x1
+#define SEM_CHICKEN_BITS__CHECK_COUNTER_EN__SHIFT 0x2
+#define SEM_CHICKEN_BITS__ECC_BEHAVIOR__SHIFT 0x3
+#define SEM_CHICKEN_BITS__PHY_TRAN_EN__SHIFT 0x6
+#define SEM_CHICKEN_BITS__ADDR_CMP_UNTRAN_EN__SHIFT 0x7
+#define SEM_CHICKEN_BITS__IDLE_COUNTER_INDEX__SHIFT 0x8
+#define SEM_CHICKEN_BITS__OUTSTANDING_CLEAN_COUNTER_INDEX__SHIFT 0xa
+#define SEM_CHICKEN_BITS__ATCL2_BUS_ID__SHIFT 0xc
+#define SEM_CHICKEN_BITS__ATOMIC_EN__SHIFT 0xe
+#define SEM_CHICKEN_BITS__EXTERNAL_ATOMIC_CHECK__SHIFT 0xf
+#define SEM_CHICKEN_BITS__CLEAR_MAILBOX__SHIFT 0x10
+#define SEM_CHICKEN_BITS__INVACK_AFTER_OUTSTANDING_CLEAN__SHIFT 0x12
+#define SEM_CHICKEN_BITS__UTC_TAG_CONFLICT_CHECK__SHIFT 0x13
+#define SEM_CHICKEN_BITS__VMID_PIPELINE_EN_MASK 0x00000001L
+#define SEM_CHICKEN_BITS__ENTRY_PIPELINE_EN_MASK 0x00000002L
+#define SEM_CHICKEN_BITS__CHECK_COUNTER_EN_MASK 0x00000004L
+#define SEM_CHICKEN_BITS__ECC_BEHAVIOR_MASK 0x00000018L
+#define SEM_CHICKEN_BITS__PHY_TRAN_EN_MASK 0x00000040L
+#define SEM_CHICKEN_BITS__ADDR_CMP_UNTRAN_EN_MASK 0x00000080L
+#define SEM_CHICKEN_BITS__IDLE_COUNTER_INDEX_MASK 0x00000300L
+#define SEM_CHICKEN_BITS__OUTSTANDING_CLEAN_COUNTER_INDEX_MASK 0x00000C00L
+#define SEM_CHICKEN_BITS__ATCL2_BUS_ID_MASK 0x00003000L
+#define SEM_CHICKEN_BITS__ATOMIC_EN_MASK 0x00004000L
+#define SEM_CHICKEN_BITS__EXTERNAL_ATOMIC_CHECK_MASK 0x00008000L
+#define SEM_CHICKEN_BITS__CLEAR_MAILBOX_MASK 0x00030000L
+#define SEM_CHICKEN_BITS__INVACK_AFTER_OUTSTANDING_CLEAN_MASK 0x00040000L
+#define SEM_CHICKEN_BITS__UTC_TAG_CONFLICT_CHECK_MASK 0x00080000L
+//SEM_MAILBOX_CLIENTCONFIG_EXTRA
+#define SEM_MAILBOX_CLIENTCONFIG_EXTRA__VCE1_CLIENT0__SHIFT 0x0
+#define SEM_MAILBOX_CLIENTCONFIG_EXTRA__UVD1_CLIENT0__SHIFT 0x4
+#define SEM_MAILBOX_CLIENTCONFIG_EXTRA__VCE1_CLIENT0_MASK 0x0000000FL
+#define SEM_MAILBOX_CLIENTCONFIG_EXTRA__UVD1_CLIENT0_MASK 0x000000F0L
+//SEM_GPU_IOV_VIOLATION_LOG
+#define SEM_GPU_IOV_VIOLATION_LOG__VIOLATION_STATUS__SHIFT 0x0
+#define SEM_GPU_IOV_VIOLATION_LOG__MULTIPLE_VIOLATION_STATUS__SHIFT 0x1
+#define SEM_GPU_IOV_VIOLATION_LOG__ADDRESS__SHIFT 0x2
+#define SEM_GPU_IOV_VIOLATION_LOG__OPCODE__SHIFT 0x12
+#define SEM_GPU_IOV_VIOLATION_LOG__VF__SHIFT 0x13
+#define SEM_GPU_IOV_VIOLATION_LOG__VF_ID__SHIFT 0x14
+#define SEM_GPU_IOV_VIOLATION_LOG__INITIATOR_ID__SHIFT 0x18
+#define SEM_GPU_IOV_VIOLATION_LOG__VIOLATION_STATUS_MASK 0x00000001L
+#define SEM_GPU_IOV_VIOLATION_LOG__MULTIPLE_VIOLATION_STATUS_MASK 0x00000002L
+#define SEM_GPU_IOV_VIOLATION_LOG__ADDRESS_MASK 0x0003FFFCL
+#define SEM_GPU_IOV_VIOLATION_LOG__OPCODE_MASK 0x00040000L
+#define SEM_GPU_IOV_VIOLATION_LOG__VF_MASK 0x00080000L
+#define SEM_GPU_IOV_VIOLATION_LOG__VF_ID_MASK 0x00F00000L
+#define SEM_GPU_IOV_VIOLATION_LOG__INITIATOR_ID_MASK 0xFF000000L
+//SEM_OUTSTANDING_THRESHOLD
+#define SEM_OUTSTANDING_THRESHOLD__VALUE__SHIFT 0x0
+#define SEM_OUTSTANDING_THRESHOLD__VALUE_MASK 0x000000FFL
+//SEM_MEM_POWER_CTRL
+#define SEM_MEM_POWER_CTRL__MEM_POWER_CTRL_EN__SHIFT 0x0
+#define SEM_MEM_POWER_CTRL__MEM_POWER_LS_EN__SHIFT 0x1
+#define SEM_MEM_POWER_CTRL__MEM_POWER_DS_EN__SHIFT 0x2
+#define SEM_MEM_POWER_CTRL__MEM_POWER_SD_EN__SHIFT 0x3
+#define SEM_MEM_POWER_CTRL__MEM_IDLE_HYSTERESIS__SHIFT 0x4
+#define SEM_MEM_POWER_CTRL__MEM_POWER_UP_RECOVER_DELAY__SHIFT 0x8
+#define SEM_MEM_POWER_CTRL__MEM_POWER_DOWN_LS_ENTER_DELAY__SHIFT 0xe
+#define SEM_MEM_POWER_CTRL__MEM_POWER_CTRL_EN_MASK 0x00000001L
+#define SEM_MEM_POWER_CTRL__MEM_POWER_LS_EN_MASK 0x00000002L
+#define SEM_MEM_POWER_CTRL__MEM_POWER_DS_EN_MASK 0x00000004L
+#define SEM_MEM_POWER_CTRL__MEM_POWER_SD_EN_MASK 0x00000008L
+#define SEM_MEM_POWER_CTRL__MEM_IDLE_HYSTERESIS_MASK 0x00000070L
+#define SEM_MEM_POWER_CTRL__MEM_POWER_UP_RECOVER_DELAY_MASK 0x00003F00L
+#define SEM_MEM_POWER_CTRL__MEM_POWER_DOWN_LS_ENTER_DELAY_MASK 0x0000C000L
+//SEM_REGISTER_LAST_PART2
+#define SEM_REGISTER_LAST_PART2__RESERVED__SHIFT 0x0
+#define SEM_REGISTER_LAST_PART2__RESERVED_MASK 0xFFFFFFFFL
+//IH_ACTIVE_FCN_ID
+#define IH_ACTIVE_FCN_ID__VF_ID__SHIFT 0x0
+#define IH_ACTIVE_FCN_ID__RESERVED__SHIFT 0x4
+#define IH_ACTIVE_FCN_ID__PF_VF__SHIFT 0x1f
+#define IH_ACTIVE_FCN_ID__VF_ID_MASK 0x0000000FL
+#define IH_ACTIVE_FCN_ID__RESERVED_MASK 0x7FFFFFF0L
+#define IH_ACTIVE_FCN_ID__PF_VF_MASK 0x80000000L
+//IH_VIRT_RESET_REQ
+#define IH_VIRT_RESET_REQ__VF__SHIFT 0x0
+#define IH_VIRT_RESET_REQ__PF__SHIFT 0x1f
+#define IH_VIRT_RESET_REQ__VF_MASK 0x0000FFFFL
+#define IH_VIRT_RESET_REQ__PF_MASK 0x80000000L
+//IH_CLIENT_CFG
+#define IH_CLIENT_CFG__TOTAL_CLIENT_NUM__SHIFT 0x0
+#define IH_CLIENT_CFG__TOTAL_CLIENT_NUM_MASK 0x0000001FL
+//IH_CLIENT_CFG_INDEX
+#define IH_CLIENT_CFG_INDEX__INDEX__SHIFT 0x0
+#define IH_CLIENT_CFG_INDEX__INDEX_MASK 0x0000001FL
+//IH_CLIENT_CFG_DATA
+#define IH_CLIENT_CFG_DATA__CREDIT_RETURN_ADDR__SHIFT 0x0
+#define IH_CLIENT_CFG_DATA__CLIENT_TYPE__SHIFT 0x12
+#define IH_CLIENT_CFG_DATA__RING_ID__SHIFT 0x14
+#define IH_CLIENT_CFG_DATA__VF_RB_SELECT__SHIFT 0x16
+#define IH_CLIENT_CFG_DATA__OVERWRITE_RING_ID_WITH_ACTIVE_FCN_ID__SHIFT 0x18
+#define IH_CLIENT_CFG_DATA__CREDIT_RETURN_ADDR_MASK 0x0003FFFFL
+#define IH_CLIENT_CFG_DATA__CLIENT_TYPE_MASK 0x000C0000L
+#define IH_CLIENT_CFG_DATA__RING_ID_MASK 0x00300000L
+#define IH_CLIENT_CFG_DATA__VF_RB_SELECT_MASK 0x00C00000L
+#define IH_CLIENT_CFG_DATA__OVERWRITE_RING_ID_WITH_ACTIVE_FCN_ID_MASK 0x01000000L
+//IH_CID_REMAP_INDEX
+#define IH_CID_REMAP_INDEX__INDEX__SHIFT 0x0
+#define IH_CID_REMAP_INDEX__INDEX_MASK 0x00000003L
+//IH_CID_REMAP_DATA
+#define IH_CID_REMAP_DATA__CLIENT_ID__SHIFT 0x0
+#define IH_CID_REMAP_DATA__INITIATOR_ID__SHIFT 0x8
+#define IH_CID_REMAP_DATA__CLIENT_ID_REMAP__SHIFT 0x10
+#define IH_CID_REMAP_DATA__CLIENT_ID_MASK 0x000000FFL
+#define IH_CID_REMAP_DATA__INITIATOR_ID_MASK 0x0000FF00L
+#define IH_CID_REMAP_DATA__CLIENT_ID_REMAP_MASK 0x00FF0000L
+//IH_CHICKEN
+#define IH_CHICKEN__ACTIVE_FCN_ID_PROT_ENABLE__SHIFT 0x0
+#define IH_CHICKEN__MC_SPACE_FBPA_ENABLE__SHIFT 0x3
+#define IH_CHICKEN__MC_SPACE_GPA_ENABLE__SHIFT 0x4
+#define IH_CHICKEN__ACTIVE_FCN_ID_PROT_ENABLE_MASK 0x00000001L
+#define IH_CHICKEN__MC_SPACE_FBPA_ENABLE_MASK 0x00000008L
+#define IH_CHICKEN__MC_SPACE_GPA_ENABLE_MASK 0x00000010L
+//IH_MMHUB_CNTL
+#define IH_MMHUB_CNTL__UNITID__SHIFT 0x0
+#define IH_MMHUB_CNTL__IV_TLVL__SHIFT 0x8
+#define IH_MMHUB_CNTL__WPTR_WB_TLVL__SHIFT 0xc
+#define IH_MMHUB_CNTL__UNITID_MASK 0x0000003FL
+#define IH_MMHUB_CNTL__IV_TLVL_MASK 0x00000700L
+#define IH_MMHUB_CNTL__WPTR_WB_TLVL_MASK 0x00007000L
+//IH_INT_DROP_CNTL
+#define IH_INT_DROP_CNTL__INT_DROP_EN__SHIFT 0x0
+#define IH_INT_DROP_CNTL__CLIENT_ID_MATCH_EN__SHIFT 0x1
+#define IH_INT_DROP_CNTL__SOURCE_ID_MATCH_EN__SHIFT 0x2
+#define IH_INT_DROP_CNTL__VF_ID_MATCH_EN__SHIFT 0x3
+#define IH_INT_DROP_CNTL__VF_MATCH_EN__SHIFT 0x4
+#define IH_INT_DROP_CNTL__CONTEXT_ID_MATCH_EN__SHIFT 0x5
+#define IH_INT_DROP_CNTL__INT_DROP_MODE__SHIFT 0x6
+#define IH_INT_DROP_CNTL__UTCL2_RETRY_INT_DROP_EN__SHIFT 0x8
+#define IH_INT_DROP_CNTL__INT_DROPPED__SHIFT 0x10
+#define IH_INT_DROP_CNTL__INT_DROP_EN_MASK 0x00000001L
+#define IH_INT_DROP_CNTL__CLIENT_ID_MATCH_EN_MASK 0x00000002L
+#define IH_INT_DROP_CNTL__SOURCE_ID_MATCH_EN_MASK 0x00000004L
+#define IH_INT_DROP_CNTL__VF_ID_MATCH_EN_MASK 0x00000008L
+#define IH_INT_DROP_CNTL__VF_MATCH_EN_MASK 0x00000010L
+#define IH_INT_DROP_CNTL__CONTEXT_ID_MATCH_EN_MASK 0x00000020L
+#define IH_INT_DROP_CNTL__INT_DROP_MODE_MASK 0x000000C0L
+#define IH_INT_DROP_CNTL__UTCL2_RETRY_INT_DROP_EN_MASK 0x00000100L
+#define IH_INT_DROP_CNTL__INT_DROPPED_MASK 0x00010000L
+//IH_INT_DROP_MATCH_VALUE0
+#define IH_INT_DROP_MATCH_VALUE0__CLIENT_ID_MATCH_VALUE__SHIFT 0x0
+#define IH_INT_DROP_MATCH_VALUE0__SOURCE_ID_MATCH_VALUE__SHIFT 0x8
+#define IH_INT_DROP_MATCH_VALUE0__VF_ID_MATCH_VALUE__SHIFT 0x10
+#define IH_INT_DROP_MATCH_VALUE0__VF_MATCH_VALUE__SHIFT 0x17
+#define IH_INT_DROP_MATCH_VALUE0__CONTEXT_ID_39_32_MATCH_VALUE__SHIFT 0x18
+#define IH_INT_DROP_MATCH_VALUE0__CLIENT_ID_MATCH_VALUE_MASK 0x000000FFL
+#define IH_INT_DROP_MATCH_VALUE0__SOURCE_ID_MATCH_VALUE_MASK 0x0000FF00L
+#define IH_INT_DROP_MATCH_VALUE0__VF_ID_MATCH_VALUE_MASK 0x000F0000L
+#define IH_INT_DROP_MATCH_VALUE0__VF_MATCH_VALUE_MASK 0x00800000L
+#define IH_INT_DROP_MATCH_VALUE0__CONTEXT_ID_39_32_MATCH_VALUE_MASK 0xFF000000L
+//IH_INT_DROP_MATCH_VALUE1
+#define IH_INT_DROP_MATCH_VALUE1__CONTEXT_ID_31_0_MATCH_VALUE__SHIFT 0x0
+#define IH_INT_DROP_MATCH_VALUE1__CONTEXT_ID_31_0_MATCH_VALUE_MASK 0xFFFFFFFFL
+//IH_INT_DROP_MATCH_MASK0
+#define IH_INT_DROP_MATCH_MASK0__CLIENT_ID_MATCH_MASK__SHIFT 0x0
+#define IH_INT_DROP_MATCH_MASK0__SOURCE_ID_MATCH_MASK__SHIFT 0x8
+#define IH_INT_DROP_MATCH_MASK0__VF_ID_MATCH_MASK__SHIFT 0x10
+#define IH_INT_DROP_MATCH_MASK0__VF_MATCH_MASK__SHIFT 0x17
+#define IH_INT_DROP_MATCH_MASK0__CONTEXT_ID_39_32_MATCH_MASK__SHIFT 0x18
+#define IH_INT_DROP_MATCH_MASK0__CLIENT_ID_MATCH_MASK_MASK 0x000000FFL
+#define IH_INT_DROP_MATCH_MASK0__SOURCE_ID_MATCH_MASK_MASK 0x0000FF00L
+#define IH_INT_DROP_MATCH_MASK0__VF_ID_MATCH_MASK_MASK 0x000F0000L
+#define IH_INT_DROP_MATCH_MASK0__VF_MATCH_MASK_MASK 0x00800000L
+#define IH_INT_DROP_MATCH_MASK0__CONTEXT_ID_39_32_MATCH_MASK_MASK 0xFF000000L
+//IH_INT_DROP_MATCH_MASK1
+#define IH_INT_DROP_MATCH_MASK1__CONTEXT_ID_31_0_MATCH_MASK__SHIFT 0x0
+#define IH_INT_DROP_MATCH_MASK1__CONTEXT_ID_31_0_MATCH_MASK_MASK 0xFFFFFFFFL
+//IH_REGISTER_LAST_PART1
+#define IH_REGISTER_LAST_PART1__RESERVED__SHIFT 0x0
+#define IH_REGISTER_LAST_PART1__RESERVED_MASK 0xFFFFFFFFL
+//SEM_ACTIVE_FCN_ID
+#define SEM_ACTIVE_FCN_ID__VFID__SHIFT 0x0
+#define SEM_ACTIVE_FCN_ID__VF__SHIFT 0x1f
+#define SEM_ACTIVE_FCN_ID__VFID_MASK 0x0000000FL
+#define SEM_ACTIVE_FCN_ID__VF_MASK 0x80000000L
+//SEM_VIRT_RESET_REQ
+#define SEM_VIRT_RESET_REQ__VF__SHIFT 0x0
+#define SEM_VIRT_RESET_REQ__PF__SHIFT 0x1f
+#define SEM_VIRT_RESET_REQ__VF_MASK 0x0000FFFFL
+#define SEM_VIRT_RESET_REQ__PF_MASK 0x80000000L
+//SEM_RESP_SDMA0
+#define SEM_RESP_SDMA0__ADDR__SHIFT 0x2
+#define SEM_RESP_SDMA0__ADDR_MASK 0x000FFFFCL
+//SEM_RESP_SDMA1
+#define SEM_RESP_SDMA1__ADDR__SHIFT 0x2
+#define SEM_RESP_SDMA1__ADDR_MASK 0x000FFFFCL
+//SEM_RESP_UVD
+#define SEM_RESP_UVD__ADDR__SHIFT 0x2
+#define SEM_RESP_UVD__ADDR_MASK 0x000FFFFCL
+//SEM_RESP_VCE_0
+#define SEM_RESP_VCE_0__ADDR__SHIFT 0x2
+#define SEM_RESP_VCE_0__ADDR_MASK 0x000FFFFCL
+//SEM_RESP_ACP
+#define SEM_RESP_ACP__ADDR__SHIFT 0x2
+#define SEM_RESP_ACP__ADDR_MASK 0x000FFFFCL
+//SEM_RESP_ISP
+#define SEM_RESP_ISP__ADDR__SHIFT 0x2
+#define SEM_RESP_ISP__ADDR_MASK 0x000FFFFCL
+//SEM_RESP_VCE_1
+#define SEM_RESP_VCE_1__ADDR__SHIFT 0x2
+#define SEM_RESP_VCE_1__ADDR_MASK 0x000FFFFCL
+//SEM_RESP_VP8
+#define SEM_RESP_VP8__ADDR__SHIFT 0x2
+#define SEM_RESP_VP8__ADDR_MASK 0x000FFFFCL
+//SEM_RESP_GC
+#define SEM_RESP_GC__ADDR__SHIFT 0x2
+#define SEM_RESP_GC__ADDR_MASK 0x000FFFFCL
+//SEM_RESP_UVD_1
+#define SEM_RESP_UVD_1__ADDR__SHIFT 0x2
+#define SEM_RESP_UVD_1__ADDR_MASK 0x000FFFFCL
+//SEM_CID_REMAP_INDEX
+#define SEM_CID_REMAP_INDEX__INDEX__SHIFT 0x0
+#define SEM_CID_REMAP_INDEX__INDEX_MASK 0x00000003L
+//SEM_CID_REMAP_DATA
+#define SEM_CID_REMAP_DATA__CLIENT_ID__SHIFT 0x0
+#define SEM_CID_REMAP_DATA__INITIATOR_ID__SHIFT 0x8
+#define SEM_CID_REMAP_DATA__CLIENT_ID_REMAP__SHIFT 0x10
+#define SEM_CID_REMAP_DATA__CLIENT_ID_MASK 0x000000FFL
+#define SEM_CID_REMAP_DATA__INITIATOR_ID_MASK 0x0000FF00L
+#define SEM_CID_REMAP_DATA__CLIENT_ID_REMAP_MASK 0x00FF0000L
+//SEM_ATOMIC_OP_LUT
+#define SEM_ATOMIC_OP_LUT__SIGNAL_NORMAL__SHIFT 0x0
+#define SEM_ATOMIC_OP_LUT__SIGNAL_WRITE1__SHIFT 0x7
+#define SEM_ATOMIC_OP_LUT__WAIT_NORMAL__SHIFT 0xe
+#define SEM_ATOMIC_OP_LUT__WAIT_CHECK0__SHIFT 0x15
+#define SEM_ATOMIC_OP_LUT__SIGNAL_NORMAL_MASK 0x0000007FL
+#define SEM_ATOMIC_OP_LUT__SIGNAL_WRITE1_MASK 0x00003F80L
+#define SEM_ATOMIC_OP_LUT__WAIT_NORMAL_MASK 0x001FC000L
+#define SEM_ATOMIC_OP_LUT__WAIT_CHECK0_MASK 0x0FE00000L
+//SEM_EDC_CONFIG
+#define SEM_EDC_CONFIG__WRITE_DIS__SHIFT 0x0
+#define SEM_EDC_CONFIG__DIS_EDC__SHIFT 0x1
+#define SEM_EDC_CONFIG__WRITE_DIS_MASK 0x00000001L
+#define SEM_EDC_CONFIG__DIS_EDC_MASK 0x00000002L
+//SEM_CHICKEN_BITS2
+#define SEM_CHICKEN_BITS2__ACTIVE_FCN_ID_PROT_ENABLE__SHIFT 0x0
+#define SEM_CHICKEN_BITS2__MM_CLIENT_USE_CONFIG_VFID__SHIFT 0x1
+#define SEM_CHICKEN_BITS2__ACTIVE_FCN_ID_PROT_ENABLE_MASK 0x00000001L
+#define SEM_CHICKEN_BITS2__MM_CLIENT_USE_CONFIG_VFID_MASK 0x00000002L
+//SEM_MMHUB_CNTL
+#define SEM_MMHUB_CNTL__UNIT_ID__SHIFT 0x0
+#define SEM_MMHUB_CNTL__TLVL_VALUE__SHIFT 0x8
+#define SEM_MMHUB_CNTL__UNIT_ID_MASK 0x0000003FL
+#define SEM_MMHUB_CNTL__TLVL_VALUE_MASK 0x00000700L
+//SEM_REGISTER_LAST_PART1
+#define SEM_REGISTER_LAST_PART1__RESERVED__SHIFT 0x0
+#define SEM_REGISTER_LAST_PART1__RESERVED_MASK 0xFFFFFFFFL
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
index f775aac6c1bd..a41875ac5dfb 100644
--- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
@@ -103,6 +103,7 @@ enum pp_clock_type {
enum amd_pp_sensors {
AMDGPU_PP_SENSOR_GFX_SCLK = 0,
+ AMDGPU_PP_SENSOR_CPU_CLK,
AMDGPU_PP_SENSOR_VDDNB,
AMDGPU_PP_SENSOR_VDDGFX,
AMDGPU_PP_SENSOR_UVD_VCLK,
@@ -155,9 +156,11 @@ enum {
enum PP_OD_DPM_TABLE_COMMAND {
PP_OD_EDIT_SCLK_VDDC_TABLE,
PP_OD_EDIT_MCLK_VDDC_TABLE,
+ PP_OD_EDIT_CCLK_VDDC_TABLE,
PP_OD_EDIT_VDDC_CURVE,
PP_OD_RESTORE_DEFAULT_TABLE,
- PP_OD_COMMIT_DPM_TABLE
+ PP_OD_COMMIT_DPM_TABLE,
+ PP_OD_EDIT_VDDGFX_OFFSET
};
struct pp_states_info {
diff --git a/drivers/gpu/drm/amd/include/renoir_ip_offset.h b/drivers/gpu/drm/amd/include/renoir_ip_offset.h
index 07633e22e99a..7dff85c81e5a 100644
--- a/drivers/gpu/drm/amd/include/renoir_ip_offset.h
+++ b/drivers/gpu/drm/amd/include/renoir_ip_offset.h
@@ -33,7 +33,7 @@ struct IP_BASE_INSTANCE
struct IP_BASE
{
struct IP_BASE_INSTANCE instance[MAX_INSTANCE];
-};
+} __maybe_unused;
static const struct IP_BASE ACP_BASE ={ { { { 0x02403800, 0x00480000, 0, 0, 0 } },
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
index 7b6ef05a1d35..e9b569b76716 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
@@ -36,6 +36,7 @@
#include <linux/hwmon-sysfs.h>
#include <linux/nospec.h>
#include <linux/pm_runtime.h>
+#include <asm/processor.h>
#include "hwmgr.h"
static const struct cg_flag_name clocks[] = {
@@ -730,11 +731,18 @@ static ssize_t amdgpu_set_pp_table(struct device *dev,
*
* - minimum and maximum engine clock labeled OD_SCLK
*
- * - maximum memory clock labeled OD_MCLK
+ * - minimum(not available for Vega20 and Navi1x) and maximum memory
+ * clock labeled OD_MCLK
*
* - three <frequency, voltage> points labeled OD_VDDC_CURVE.
* They can be used to calibrate the sclk voltage curve.
*
+ * - voltage offset(in mV) applied on target voltage calculation.
+ * This is available for Sienna Cichlid, Navy Flounder and Dimgrey
+ * Cavefish. For these ASICs, the target voltage calculation can be
+ * illustrated by "voltage = voltage calculated from v/f curve +
+ * overdrive vddgfx offset"
+ *
* - a list of valid ranges for sclk, mclk, and voltage curve points
* labeled OD_RANGE
*
@@ -755,6 +763,11 @@ static ssize_t amdgpu_set_pp_table(struct device *dev,
* 600mV. "vc 2 1000 1000" will update point3 with clock set
* as 1000Mhz and voltage 1000mV.
*
+ * To update the voltage offset applied for gfxclk/voltage calculation,
+ * enter the new value by writing a string that contains "vo offset".
+ * This is supported by Sienna Cichlid, Navy Flounder and Dimgrey Cavefish.
+ * And the offset can be a positive or negative value.
+ *
* - When you have edited all of the states as needed, write "c" (commit)
* to the file to commit your changes
*
@@ -787,6 +800,8 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
if (*buf == 's')
type = PP_OD_EDIT_SCLK_VDDC_TABLE;
+ else if (*buf == 'p')
+ type = PP_OD_EDIT_CCLK_VDDC_TABLE;
else if (*buf == 'm')
type = PP_OD_EDIT_MCLK_VDDC_TABLE;
else if(*buf == 'r')
@@ -795,6 +810,8 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
type = PP_OD_COMMIT_DPM_TABLE;
else if (!strncmp(buf, "vc", 2))
type = PP_OD_EDIT_VDDC_CURVE;
+ else if (!strncmp(buf, "vo", 2))
+ type = PP_OD_EDIT_VDDGFX_OFFSET;
else
return -EINVAL;
@@ -802,7 +819,8 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
tmp_str = buf_cpy;
- if (type == PP_OD_EDIT_VDDC_CURVE)
+ if ((type == PP_OD_EDIT_VDDC_CURVE) ||
+ (type == PP_OD_EDIT_VDDGFX_OFFSET))
tmp_str++;
while (isspace(*++tmp_str));
@@ -898,7 +916,9 @@ static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev,
size = smu_print_clk_levels(&adev->smu, SMU_OD_SCLK, buf);
size += smu_print_clk_levels(&adev->smu, SMU_OD_MCLK, buf+size);
size += smu_print_clk_levels(&adev->smu, SMU_OD_VDDC_CURVE, buf+size);
+ size += smu_print_clk_levels(&adev->smu, SMU_OD_VDDGFX_OFFSET, buf+size);
size += smu_print_clk_levels(&adev->smu, SMU_OD_RANGE, buf+size);
+ size += smu_print_clk_levels(&adev->smu, SMU_OD_CCLK, buf+size);
} else if (adev->powerplay.pp_funcs->print_clock_levels) {
size = amdgpu_dpm_print_clock_levels(adev, OD_SCLK, buf);
size += amdgpu_dpm_print_clock_levels(adev, OD_MCLK, buf+size);
@@ -1346,6 +1366,138 @@ static ssize_t amdgpu_set_pp_dpm_fclk(struct device *dev,
return count;
}
+static ssize_t amdgpu_get_pp_dpm_vclk(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+ ssize_t size;
+ int ret;
+
+ if (amdgpu_in_reset(adev))
+ return -EPERM;
+
+ ret = pm_runtime_get_sync(ddev->dev);
+ if (ret < 0) {
+ pm_runtime_put_autosuspend(ddev->dev);
+ return ret;
+ }
+
+ if (is_support_sw_smu(adev))
+ size = smu_print_clk_levels(&adev->smu, SMU_VCLK, buf);
+ else
+ size = snprintf(buf, PAGE_SIZE, "\n");
+
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
+
+ return size;
+}
+
+static ssize_t amdgpu_set_pp_dpm_vclk(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+ int ret;
+ uint32_t mask = 0;
+
+ if (amdgpu_in_reset(adev))
+ return -EPERM;
+
+ ret = amdgpu_read_mask(buf, count, &mask);
+ if (ret)
+ return ret;
+
+ ret = pm_runtime_get_sync(ddev->dev);
+ if (ret < 0) {
+ pm_runtime_put_autosuspend(ddev->dev);
+ return ret;
+ }
+
+ if (is_support_sw_smu(adev))
+ ret = smu_force_clk_levels(&adev->smu, SMU_VCLK, mask);
+ else
+ ret = 0;
+
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
+
+ if (ret)
+ return -EINVAL;
+
+ return count;
+}
+
+static ssize_t amdgpu_get_pp_dpm_dclk(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+ ssize_t size;
+ int ret;
+
+ if (amdgpu_in_reset(adev))
+ return -EPERM;
+
+ ret = pm_runtime_get_sync(ddev->dev);
+ if (ret < 0) {
+ pm_runtime_put_autosuspend(ddev->dev);
+ return ret;
+ }
+
+ if (is_support_sw_smu(adev))
+ size = smu_print_clk_levels(&adev->smu, SMU_DCLK, buf);
+ else
+ size = snprintf(buf, PAGE_SIZE, "\n");
+
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
+
+ return size;
+}
+
+static ssize_t amdgpu_set_pp_dpm_dclk(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+ int ret;
+ uint32_t mask = 0;
+
+ if (amdgpu_in_reset(adev))
+ return -EPERM;
+
+ ret = amdgpu_read_mask(buf, count, &mask);
+ if (ret)
+ return ret;
+
+ ret = pm_runtime_get_sync(ddev->dev);
+ if (ret < 0) {
+ pm_runtime_put_autosuspend(ddev->dev);
+ return ret;
+ }
+
+ if (is_support_sw_smu(adev))
+ ret = smu_force_clk_levels(&adev->smu, SMU_DCLK, mask);
+ else
+ ret = 0;
+
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
+
+ if (ret)
+ return -EINVAL;
+
+ return count;
+}
+
static ssize_t amdgpu_get_pp_dpm_dcefclk(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -2025,6 +2177,8 @@ static struct amdgpu_device_attr amdgpu_device_attrs[] = {
AMDGPU_DEVICE_ATTR_RW(pp_dpm_mclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
AMDGPU_DEVICE_ATTR_RW(pp_dpm_socclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
AMDGPU_DEVICE_ATTR_RW(pp_dpm_fclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
+ AMDGPU_DEVICE_ATTR_RW(pp_dpm_vclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
+ AMDGPU_DEVICE_ATTR_RW(pp_dpm_dclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
AMDGPU_DEVICE_ATTR_RW(pp_dpm_dcefclk, ATTR_FLAG_BASIC),
AMDGPU_DEVICE_ATTR_RW(pp_dpm_pcie, ATTR_FLAG_BASIC),
AMDGPU_DEVICE_ATTR_RW(pp_sclk_od, ATTR_FLAG_BASIC),
@@ -2067,7 +2221,8 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_
} else if (DEVICE_ATTR_IS(pp_od_clk_voltage)) {
*states = ATTR_STATE_UNSUPPORTED;
if ((is_support_sw_smu(adev) && adev->smu.od_enabled) ||
- (!is_support_sw_smu(adev) && hwmgr->od_enabled))
+ (is_support_sw_smu(adev) && adev->smu.is_apu) ||
+ (!is_support_sw_smu(adev) && hwmgr->od_enabled))
*states = ATTR_STATE_SUPPORTED;
} else if (DEVICE_ATTR_IS(mem_busy_percent)) {
if (adev->flags & AMD_IS_APU || asic_type == CHIP_VEGA10)
@@ -2087,6 +2242,12 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_
} else if (DEVICE_ATTR_IS(gpu_metrics)) {
if (asic_type < CHIP_VEGA12)
*states = ATTR_STATE_UNSUPPORTED;
+ } else if (DEVICE_ATTR_IS(pp_dpm_vclk)) {
+ if (!(asic_type == CHIP_VANGOGH))
+ *states = ATTR_STATE_UNSUPPORTED;
+ } else if (DEVICE_ATTR_IS(pp_dpm_dclk)) {
+ if (!(asic_type == CHIP_VANGOGH))
+ *states = ATTR_STATE_UNSUPPORTED;
}
if (asic_type == CHIP_ARCTURUS) {
@@ -3465,6 +3626,27 @@ void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev)
*/
#if defined(CONFIG_DEBUG_FS)
+static void amdgpu_debugfs_prints_cpu_info(struct seq_file *m,
+ struct amdgpu_device *adev) {
+ uint16_t *p_val;
+ uint32_t size;
+ int i;
+
+ if (is_support_cclk_dpm(adev)) {
+ p_val = kcalloc(adev->smu.cpu_core_num, sizeof(uint16_t),
+ GFP_KERNEL);
+
+ if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_CPU_CLK,
+ (void *)p_val, &size)) {
+ for (i = 0; i < adev->smu.cpu_core_num; i++)
+ seq_printf(m, "\t%u MHz (CPU%d)\n",
+ *(p_val + i), i);
+ }
+
+ kfree(p_val);
+ }
+}
+
static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *adev)
{
uint32_t value;
@@ -3475,6 +3657,9 @@ static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *a
/* GPU Clocks */
size = sizeof(value);
seq_printf(m, "GFX Clocks and Power:\n");
+
+ amdgpu_debugfs_prints_cpu_info(m, adev);
+
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_MCLK, (void *)&value, &size))
seq_printf(m, "\t%u MHz (MCLK)\n", value/100);
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_SCLK, (void *)&value, &size))
diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h
index 4bdbcce7092d..a087e00382e6 100644
--- a/drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h
@@ -465,124 +465,658 @@ struct smu_context
uint32_t gfx_default_soft_max_freq;
uint32_t gfx_actual_hard_min_freq;
uint32_t gfx_actual_soft_max_freq;
+
+ /* APU only */
+ uint32_t cpu_default_soft_min_freq;
+ uint32_t cpu_default_soft_max_freq;
+ uint32_t cpu_actual_soft_min_freq;
+ uint32_t cpu_actual_soft_max_freq;
+ uint32_t cpu_core_id_select;
+ uint16_t cpu_core_num;
};
struct i2c_adapter;
+/**
+ * struct pptable_funcs - Callbacks used to interact with the SMU.
+ */
struct pptable_funcs {
+ /**
+ * @run_btc: Calibrate voltage/frequency curve to fit the system's
+ * power delivery and voltage margins. Required for adaptive
+ * voltage frequency scaling (AVFS).
+ */
int (*run_btc)(struct smu_context *smu);
+
+ /**
+ * @get_allowed_feature_mask: Get allowed feature mask.
+ * &feature_mask: Array to store feature mask.
+ * &num: Elements in &feature_mask.
+ */
int (*get_allowed_feature_mask)(struct smu_context *smu, uint32_t *feature_mask, uint32_t num);
+
+ /**
+ * @get_current_power_state: Get the current power state.
+ *
+ * Return: Current power state on success, negative errno on failure.
+ */
enum amd_pm_state_type (*get_current_power_state)(struct smu_context *smu);
+
+ /**
+ * @set_default_dpm_table: Retrieve the default overdrive settings from
+ * the SMU.
+ */
int (*set_default_dpm_table)(struct smu_context *smu);
+
int (*set_power_state)(struct smu_context *smu);
+
+ /**
+ * @populate_umd_state_clk: Populate the UMD power state table with
+ * defaults.
+ */
int (*populate_umd_state_clk)(struct smu_context *smu);
+
+ /**
+ * @print_clk_levels: Print DPM clock levels for a clock domain
+ * to buffer. Star current level.
+ *
+ * Used for sysfs interfaces.
+ */
int (*print_clk_levels)(struct smu_context *smu, enum smu_clk_type clk_type, char *buf);
+
+ /**
+ * @force_clk_levels: Set a range of allowed DPM levels for a clock
+ * domain.
+ * &clk_type: Clock domain.
+ * &mask: Range of allowed DPM levels.
+ */
int (*force_clk_levels)(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t mask);
+
+ /**
+ * @od_edit_dpm_table: Edit the custom overdrive DPM table.
+ * &type: Type of edit.
+ * &input: Edit parameters.
+ * &size: Size of &input.
+ */
int (*od_edit_dpm_table)(struct smu_context *smu,
enum PP_OD_DPM_TABLE_COMMAND type,
long *input, uint32_t size);
+
+ /**
+ * @get_clock_by_type_with_latency: Get the speed and latency of a clock
+ * domain.
+ */
int (*get_clock_by_type_with_latency)(struct smu_context *smu,
enum smu_clk_type clk_type,
struct
pp_clock_levels_with_latency
*clocks);
+ /**
+ * @get_clock_by_type_with_voltage: Get the speed and voltage of a clock
+ * domain.
+ */
+ int (*get_clock_by_type_with_voltage)(struct smu_context *smu,
+ enum amd_pp_clock_type type,
+ struct
+ pp_clock_levels_with_voltage
+ *clocks);
+
+ /**
+ * @get_power_profile_mode: Print all power profile modes to
+ * buffer. Star current mode.
+ */
int (*get_power_profile_mode)(struct smu_context *smu, char *buf);
+
+ /**
+ * @set_power_profile_mode: Set a power profile mode. Also used to
+ * create/set custom power profile modes.
+ * &input: Power profile mode parameters.
+ * &size: Size of &input.
+ */
int (*set_power_profile_mode)(struct smu_context *smu, long *input, uint32_t size);
+
+ /**
+ * @dpm_set_vcn_enable: Enable/disable VCN engine dynamic power
+ * management.
+ */
int (*dpm_set_vcn_enable)(struct smu_context *smu, bool enable);
+
+ /**
+ * @dpm_set_jpeg_enable: Enable/disable JPEG engine dynamic power
+ * management.
+ */
int (*dpm_set_jpeg_enable)(struct smu_context *smu, bool enable);
+
+ /**
+ * @read_sensor: Read data from a sensor.
+ * &sensor: Sensor to read data from.
+ * &data: Sensor reading.
+ * &size: Size of &data.
+ */
int (*read_sensor)(struct smu_context *smu, enum amd_pp_sensors sensor,
void *data, uint32_t *size);
+
+ /**
+ * @pre_display_config_changed: Prepare GPU for a display configuration
+ * change.
+ *
+ * Disable display tracking and pin memory clock speed to maximum. Used
+ * in display component synchronization.
+ */
int (*pre_display_config_changed)(struct smu_context *smu);
+
+ /**
+ * @display_config_changed: Notify the SMU of the current display
+ * configuration.
+ *
+ * Allows SMU to properly track blanking periods for memory clock
+ * adjustment. Used in display component synchronization.
+ */
int (*display_config_changed)(struct smu_context *smu);
+
int (*apply_clocks_adjust_rules)(struct smu_context *smu);
+
+ /**
+ * @notify_smc_display_config: Applies display requirements to the
+ * current power state.
+ *
+ * Optimize deep sleep DCEFclk and mclk for the current display
+ * configuration. Used in display component synchronization.
+ */
int (*notify_smc_display_config)(struct smu_context *smu);
+
+ /**
+ * @is_dpm_running: Check if DPM is running.
+ *
+ * Return: True if DPM is running, false otherwise.
+ */
bool (*is_dpm_running)(struct smu_context *smu);
+
+ /**
+ * @get_fan_speed_rpm: Get the current fan speed in RPM.
+ */
int (*get_fan_speed_rpm)(struct smu_context *smu, uint32_t *speed);
+
+ /**
+ * @set_watermarks_table: Configure and upload the watermarks tables to
+ * the SMU.
+ */
int (*set_watermarks_table)(struct smu_context *smu,
struct pp_smu_wm_range_sets *clock_ranges);
+
+ /**
+ * @get_thermal_temperature_range: Get safe thermal limits in Celcius.
+ */
int (*get_thermal_temperature_range)(struct smu_context *smu, struct smu_temperature_range *range);
+
+ /**
+ * @get_uclk_dpm_states: Get memory clock DPM levels in kHz.
+ * &clocks_in_khz: Array of DPM levels.
+ * &num_states: Elements in &clocks_in_khz.
+ */
int (*get_uclk_dpm_states)(struct smu_context *smu, uint32_t *clocks_in_khz, uint32_t *num_states);
+
+ /**
+ * @set_default_od_settings: Set the overdrive tables to defaults.
+ */
int (*set_default_od_settings)(struct smu_context *smu);
+
+ /**
+ * @set_performance_level: Set a performance level.
+ */
int (*set_performance_level)(struct smu_context *smu, enum amd_dpm_forced_level level);
+
+ /**
+ * @display_disable_memory_clock_switch: Enable/disable dynamic memory
+ * clock switching.
+ *
+ * Disabling this feature forces memory clock speed to maximum.
+ * Enabling sets the minimum memory clock capable of driving the
+ * current display configuration.
+ */
int (*display_disable_memory_clock_switch)(struct smu_context *smu, bool disable_memory_clock_switch);
+
+ /**
+ * @dump_pptable: Print the power play table to the system log.
+ */
void (*dump_pptable)(struct smu_context *smu);
+
+ /**
+ * @get_power_limit: Get the device's power limits.
+ */
int (*get_power_limit)(struct smu_context *smu);
+
+ /**
+ * @set_df_cstate: Set data fabric cstate.
+ */
int (*set_df_cstate)(struct smu_context *smu, enum pp_df_cstate state);
+
+ /**
+ * @allow_xgmi_power_down: Enable/disable external global memory
+ * interconnect power down.
+ */
int (*allow_xgmi_power_down)(struct smu_context *smu, bool en);
+
+ /**
+ * @update_pcie_parameters: Update and upload the system's PCIe
+ * capabilites to the SMU.
+ * &pcie_gen_cap: Maximum allowed PCIe generation.
+ * &pcie_width_cap: Maximum allowed PCIe width.
+ */
int (*update_pcie_parameters)(struct smu_context *smu, uint32_t pcie_gen_cap, uint32_t pcie_width_cap);
+
+ /**
+ * @i2c_init: Initialize i2c.
+ *
+ * The i2c bus is used internally by the SMU voltage regulators and
+ * other devices. The i2c's EEPROM also stores bad page tables on boards
+ * with ECC.
+ */
int (*i2c_init)(struct smu_context *smu, struct i2c_adapter *control);
+
+ /**
+ * @i2c_fini: Tear down i2c.
+ */
void (*i2c_fini)(struct smu_context *smu, struct i2c_adapter *control);
+
+ /**
+ * @get_unique_id: Get the GPU's unique id. Used for asset tracking.
+ */
void (*get_unique_id)(struct smu_context *smu);
+
+ /**
+ * @get_dpm_clock_table: Get a copy of the DPM clock table.
+ *
+ * Used by display component in bandwidth and watermark calculations.
+ */
int (*get_dpm_clock_table)(struct smu_context *smu, struct dpm_clocks *clock_table);
+
+ /**
+ * @init_microcode: Request the SMU's firmware from the kernel.
+ */
int (*init_microcode)(struct smu_context *smu);
+
+ /**
+ * @load_microcode: Load firmware onto the SMU.
+ */
int (*load_microcode)(struct smu_context *smu);
+
+ /**
+ * @fini_microcode: Release the SMU's firmware.
+ */
void (*fini_microcode)(struct smu_context *smu);
+
+ /**
+ * @init_smc_tables: Initialize the SMU tables.
+ */
int (*init_smc_tables)(struct smu_context *smu);
+
+ /**
+ * @fini_smc_tables: Release the SMU tables.
+ */
int (*fini_smc_tables)(struct smu_context *smu);
+
+ /**
+ * @init_power: Initialize the power gate table context.
+ */
int (*init_power)(struct smu_context *smu);
+
+ /**
+ * @fini_power: Release the power gate table context.
+ */
int (*fini_power)(struct smu_context *smu);
+
+ /**
+ * @check_fw_status: Check the SMU's firmware status.
+ *
+ * Return: Zero if check passes, negative errno on failure.
+ */
int (*check_fw_status)(struct smu_context *smu);
+
+ /**
+ * @setup_pptable: Initialize the power play table and populate it with
+ * default values.
+ */
int (*setup_pptable)(struct smu_context *smu);
+
+ /**
+ * @get_vbios_bootup_values: Get default boot values from the VBIOS.
+ */
int (*get_vbios_bootup_values)(struct smu_context *smu);
+
+ /**
+ * @check_fw_version: Print driver and SMU interface versions to the
+ * system log.
+ *
+ * Interface mismatch is not a critical failure.
+ */
int (*check_fw_version)(struct smu_context *smu);
+
+ /**
+ * @powergate_sdma: Power up/down system direct memory access.
+ */
int (*powergate_sdma)(struct smu_context *smu, bool gate);
+
+ /**
+ * @set_gfx_cgpg: Enable/disable graphics engine course grain power
+ * gating.
+ */
int (*set_gfx_cgpg)(struct smu_context *smu, bool enable);
+
+ /**
+ * @write_pptable: Write the power play table to the SMU.
+ */
int (*write_pptable)(struct smu_context *smu);
+
+ /**
+ * @set_driver_table_location: Send the location of the driver table to
+ * the SMU.
+ */
int (*set_driver_table_location)(struct smu_context *smu);
+
+ /**
+ * @set_tool_table_location: Send the location of the tool table to the
+ * SMU.
+ */
int (*set_tool_table_location)(struct smu_context *smu);
+
+ /**
+ * @notify_memory_pool_location: Send the location of the memory pool to
+ * the SMU.
+ */
int (*notify_memory_pool_location)(struct smu_context *smu);
+
+ /**
+ * @system_features_control: Enable/disable all SMU features.
+ */
int (*system_features_control)(struct smu_context *smu, bool en);
+
+ /**
+ * @send_smc_msg_with_param: Send a message with a parameter to the SMU.
+ * &msg: Type of message.
+ * &param: Message parameter.
+ * &read_arg: SMU response (optional).
+ */
int (*send_smc_msg_with_param)(struct smu_context *smu,
enum smu_message_type msg, uint32_t param, uint32_t *read_arg);
+
+ /**
+ * @send_smc_msg: Send a message to the SMU.
+ * &msg: Type of message.
+ * &read_arg: SMU response (optional).
+ */
int (*send_smc_msg)(struct smu_context *smu,
enum smu_message_type msg,
uint32_t *read_arg);
+
+ /**
+ * @init_display_count: Notify the SMU of the number of display
+ * components in current display configuration.
+ */
int (*init_display_count)(struct smu_context *smu, uint32_t count);
+
+ /**
+ * @set_allowed_mask: Notify the SMU of the features currently allowed
+ * by the driver.
+ */
int (*set_allowed_mask)(struct smu_context *smu);
+
+ /**
+ * @get_enabled_mask: Get a mask of features that are currently enabled
+ * on the SMU.
+ * &feature_mask: Array representing enabled feature mask.
+ * &num: Elements in &feature_mask.
+ */
int (*get_enabled_mask)(struct smu_context *smu, uint32_t *feature_mask, uint32_t num);
+
+ /**
+ * @feature_is_enabled: Test if a feature is enabled.
+ *
+ * Return: One if enabled, zero if disabled.
+ */
int (*feature_is_enabled)(struct smu_context *smu, enum smu_feature_mask mask);
+
+ /**
+ * @disable_all_features_with_exception: Disable all features with
+ * exception to those in &mask.
+ */
int (*disable_all_features_with_exception)(struct smu_context *smu, enum smu_feature_mask mask);
+
+ /**
+ * @notify_display_change: Enable fast memory clock switching.
+ *
+ * Allows for fine grained memory clock switching but has more stringent
+ * timing requirements.
+ */
int (*notify_display_change)(struct smu_context *smu);
+
+ /**
+ * @set_power_limit: Set power limit in watts.
+ */
int (*set_power_limit)(struct smu_context *smu, uint32_t n);
+
+ /**
+ * @init_max_sustainable_clocks: Populate max sustainable clock speed
+ * table with values from the SMU.
+ */
int (*init_max_sustainable_clocks)(struct smu_context *smu);
+
+ /**
+ * @enable_thermal_alert: Enable thermal alert interrupts.
+ */
int (*enable_thermal_alert)(struct smu_context *smu);
+
+ /**
+ * @disable_thermal_alert: Disable thermal alert interrupts.
+ */
int (*disable_thermal_alert)(struct smu_context *smu);
+
+ /**
+ * @set_min_dcef_deep_sleep: Set a minimum display fabric deep sleep
+ * clock speed in MHz.
+ */
int (*set_min_dcef_deep_sleep)(struct smu_context *smu, uint32_t clk);
+
+ /**
+ * @display_clock_voltage_request: Set a hard minimum frequency
+ * for a clock domain.
+ */
int (*display_clock_voltage_request)(struct smu_context *smu, struct
pp_display_clock_request
*clock_req);
+
+ /**
+ * @get_fan_control_mode: Get the current fan control mode.
+ */
uint32_t (*get_fan_control_mode)(struct smu_context *smu);
+
+ /**
+ * @set_fan_control_mode: Set the fan control mode.
+ */
int (*set_fan_control_mode)(struct smu_context *smu, uint32_t mode);
+
+ /**
+ * @set_fan_speed_rpm: Set a static fan speed in RPM.
+ */
int (*set_fan_speed_rpm)(struct smu_context *smu, uint32_t speed);
+
+ /**
+ * @set_xgmi_pstate: Set inter-chip global memory interconnect pstate.
+ * &pstate: Pstate to set. D0 if Nonzero, D3 otherwise.
+ */
int (*set_xgmi_pstate)(struct smu_context *smu, uint32_t pstate);
+
+ /**
+ * @gfx_off_control: Enable/disable graphics engine poweroff.
+ */
int (*gfx_off_control)(struct smu_context *smu, bool enable);
+
+
+ /**
+ * @get_gfx_off_status: Get graphics engine poweroff status.
+ *
+ * Return:
+ * 0 - GFXOFF(default).
+ * 1 - Transition out of GFX State.
+ * 2 - Not in GFXOFF.
+ * 3 - Transition into GFXOFF.
+ */
uint32_t (*get_gfx_off_status)(struct smu_context *smu);
+
+ /**
+ * @register_irq_handler: Register interupt request handlers.
+ */
int (*register_irq_handler)(struct smu_context *smu);
+
+ /**
+ * @set_azalia_d3_pme: Wake the audio decode engine from d3 sleep.
+ */
int (*set_azalia_d3_pme)(struct smu_context *smu);
+
+ /**
+ * @get_max_sustainable_clocks_by_dc: Get a copy of the max sustainable
+ * clock speeds table.
+ *
+ * Provides a way for the display component (DC) to get the max
+ * sustainable clocks from the SMU.
+ */
int (*get_max_sustainable_clocks_by_dc)(struct smu_context *smu, struct pp_smu_nv_clock_table *max_clocks);
+
+ /**
+ * @baco_is_support: Check if GPU supports BACO (Bus Active, Chip Off).
+ */
bool (*baco_is_support)(struct smu_context *smu);
+
+ /**
+ * @baco_get_state: Get the current BACO state.
+ *
+ * Return: Current BACO state.
+ */
enum smu_baco_state (*baco_get_state)(struct smu_context *smu);
+
+ /**
+ * @baco_set_state: Enter/exit BACO.
+ */
int (*baco_set_state)(struct smu_context *smu, enum smu_baco_state state);
+
+ /**
+ * @baco_enter: Enter BACO.
+ */
int (*baco_enter)(struct smu_context *smu);
+
+ /**
+ * @baco_exit: Exit Baco.
+ */
int (*baco_exit)(struct smu_context *smu);
+
+ /**
+ * @mode1_reset_is_support: Check if GPU supports mode1 reset.
+ */
bool (*mode1_reset_is_support)(struct smu_context *smu);
+
+ /**
+ * @mode1_reset: Perform mode1 reset.
+ *
+ * Complete GPU reset.
+ */
int (*mode1_reset)(struct smu_context *smu);
+
+ /**
+ * @mode2_reset: Perform mode2 reset.
+ *
+ * Mode2 reset generally does not reset as many IPs as mode1 reset. The
+ * IPs reset varies by asic.
+ */
int (*mode2_reset)(struct smu_context *smu);
+
+ /**
+ * @get_dpm_ultimate_freq: Get the hard frequency range of a clock
+ * domain in MHz.
+ */
int (*get_dpm_ultimate_freq)(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t *min, uint32_t *max);
+
+ /**
+ * @set_soft_freq_limited_range: Set the soft frequency range of a clock
+ * domain in MHz.
+ */
int (*set_soft_freq_limited_range)(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t min, uint32_t max);
+
+ /**
+ * @set_power_source: Notify the SMU of the current power source.
+ */
int (*set_power_source)(struct smu_context *smu, enum smu_power_src_type power_src);
+
+ /**
+ * @log_thermal_throttling_event: Print a thermal throttling warning to
+ * the system's log.
+ */
void (*log_thermal_throttling_event)(struct smu_context *smu);
+
+ /**
+ * @get_pp_feature_mask: Print a human readable table of enabled
+ * features to buffer.
+ */
size_t (*get_pp_feature_mask)(struct smu_context *smu, char *buf);
+
+ /**
+ * @set_pp_feature_mask: Request the SMU enable/disable features to
+ * match those enabled in &new_mask.
+ */
int (*set_pp_feature_mask)(struct smu_context *smu, uint64_t new_mask);
+
+ /**
+ * @get_gpu_metrics: Get a copy of the GPU metrics table from the SMU.
+ *
+ * Return: Size of &table
+ */
ssize_t (*get_gpu_metrics)(struct smu_context *smu, void **table);
+
+ /**
+ * @enable_mgpu_fan_boost: Enable multi-GPU fan boost.
+ */
int (*enable_mgpu_fan_boost)(struct smu_context *smu);
+
+ /**
+ * @gfx_ulv_control: Enable/disable ultra low voltage.
+ */
int (*gfx_ulv_control)(struct smu_context *smu, bool enablement);
+
+ /**
+ * @deep_sleep_control: Enable/disable deep sleep.
+ */
int (*deep_sleep_control)(struct smu_context *smu, bool enablement);
+
+ /**
+ * @get_fan_parameters: Get fan parameters.
+ *
+ * Get maximum fan speed from the power play table.
+ */
int (*get_fan_parameters)(struct smu_context *smu);
+
+ /**
+ * @post_init: Helper function for asic specific workarounds.
+ */
int (*post_init)(struct smu_context *smu);
+
+ /**
+ * @interrupt_work: Work task scheduled from SMU interrupt handler.
+ */
void (*interrupt_work)(struct smu_context *smu);
+
+ /**
+ * @gpo_control: Enable/disable graphics power optimization if supported.
+ */
int (*gpo_control)(struct smu_context *smu, bool enablement);
+
+ /**
+ * @gfx_state_change_set: Send the current graphics state to the SMU.
+ */
int (*gfx_state_change_set)(struct smu_context *smu, uint32_t state);
+
+ /**
+ * @set_fine_grain_gfx_freq_parameters: Set fine grain graphics clock
+ * parameters to defaults.
+ */
int (*set_fine_grain_gfx_freq_parameters)(struct smu_context *smu);
};
@@ -596,6 +1130,7 @@ typedef enum {
METRICS_CURR_DCLK1,
METRICS_CURR_FCLK,
METRICS_CURR_DCEFCLK,
+ METRICS_AVERAGE_CPUCLK,
METRICS_AVERAGE_GFXCLK,
METRICS_AVERAGE_SOCCLK,
METRICS_AVERAGE_FCLK,
@@ -636,6 +1171,12 @@ enum smu_cmn2asic_mapping_type {
#define FEA_MAP(fea) \
[SMU_FEATURE_##fea##_BIT] = {1, FEATURE_##fea##_BIT}
+#define FEA_MAP_REVERSE(fea) \
+ [SMU_FEATURE_DPM_##fea##_BIT] = {1, FEATURE_##fea##_DPM_BIT}
+
+#define FEA_MAP_HALF_REVERSE(fea) \
+ [SMU_FEATURE_DPM_##fea##CLK_BIT] = {1, FEATURE_##fea##_DPM_BIT}
+
#define TAB_MAP(tab) \
[SMU_TABLE_##tab] = {1, TABLE_##tab}
@@ -718,6 +1259,7 @@ extern const struct amdgpu_ip_block_version smu_v11_0_ip_block;
extern const struct amdgpu_ip_block_version smu_v12_0_ip_block;
bool is_support_sw_smu(struct amdgpu_device *adev);
+bool is_support_cclk_dpm(struct amdgpu_device *adev);
int smu_reset(struct smu_context *smu);
int smu_sys_get_pp_table(struct smu_context *smu, void **table);
int smu_sys_set_pp_table(struct smu_context *smu, void *buf, size_t size);
diff --git a/drivers/gpu/drm/amd/pm/inc/smu11_driver_if_vangogh.h b/drivers/gpu/drm/amd/pm/inc/smu11_driver_if_vangogh.h
index 1c19eae93ff1..6e23a3f803a7 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu11_driver_if_vangogh.h
+++ b/drivers/gpu/drm/amd/pm/inc/smu11_driver_if_vangogh.h
@@ -141,7 +141,6 @@ typedef struct {
uint32_t MaxGfxClk;
uint8_t NumDfPstatesEnabled;
- uint8_t NumDpmLevelsEnabled;
uint8_t NumDcfclkLevelsEnabled;
uint8_t NumDispClkLevelsEnabled; //applies to both dispclk and dppclk
uint8_t NumSocClkLevelsEnabled;
diff --git a/drivers/gpu/drm/amd/pm/inc/smu_types.h b/drivers/gpu/drm/amd/pm/inc/smu_types.h
index 720d15612fe1..b76270e8767c 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu_types.h
+++ b/drivers/gpu/drm/amd/pm/inc/smu_types.h
@@ -211,6 +211,7 @@
__SMU_DUMMY_MAP(SetGpoFeaturePMask), \
__SMU_DUMMY_MAP(DisallowGpo), \
__SMU_DUMMY_MAP(Enable2ndUSB20Port), \
+ __SMU_DUMMY_MAP(RequestActiveWgp), \
#undef __SMU_DUMMY_MAP
#define __SMU_DUMMY_MAP(type) SMU_MSG_##type
@@ -236,10 +237,12 @@ enum smu_clk_type {
SMU_SCLK,
SMU_MCLK,
SMU_PCIE,
+ SMU_OD_CCLK,
SMU_OD_SCLK,
SMU_OD_MCLK,
SMU_OD_VDDC_CURVE,
SMU_OD_RANGE,
+ SMU_OD_VDDGFX_OFFSET,
SMU_CLK_COUNT,
};
diff --git a/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h b/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h
index 13de692a4213..102a0cf12d7a 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h
+++ b/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h
@@ -31,7 +31,7 @@
#define SMU11_DRIVER_IF_VERSION_NV12 0x36
#define SMU11_DRIVER_IF_VERSION_NV14 0x36
#define SMU11_DRIVER_IF_VERSION_Sienna_Cichlid 0x3D
-#define SMU11_DRIVER_IF_VERSION_Navy_Flounder 0xC
+#define SMU11_DRIVER_IF_VERSION_Navy_Flounder 0xE
#define SMU11_DRIVER_IF_VERSION_VANGOGH 0x02
#define SMU11_DRIVER_IF_VERSION_Dimgrey_Cavefish 0xF
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/hwmgr.c
index 6a7de8b898fa..f2cef0930aa9 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/hwmgr.c
@@ -33,6 +33,7 @@
#include "ppsmc.h"
#include "amd_acpi.h"
#include "pp_psm.h"
+#include "vega10_hwmgr.h"
extern const struct pp_smumgr_func ci_smu_funcs;
extern const struct pp_smumgr_func smu8_smu_funcs;
@@ -46,7 +47,6 @@ extern const struct pp_smumgr_func vega12_smu_funcs;
extern const struct pp_smumgr_func smu10_smu_funcs;
extern const struct pp_smumgr_func vega20_smu_funcs;
-extern int vega10_hwmgr_init(struct pp_hwmgr *hwmgr);
extern int smu10_init_function_pointers(struct pp_hwmgr *hwmgr);
static int polaris_set_asic_special_caps(struct pp_hwmgr *hwmgr);
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c
index 83a6504e093c..b1038d30c8dc 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c
@@ -279,7 +279,7 @@ static const ATOM_VOLTAGE_OBJECT_V3 *atomctrl_lookup_voltage_type_v3(
*
* @hwmgr: input parameter: pointer to HwMgr
* @clock_value: input parameter: memory clock
- * @dividers: output parameter: memory PLL dividers
+ * @mpll_param: output parameter: memory clock parameters
* @strobe_mode: input parameter: 1 for strobe mode, 0 for performance mode
*/
int atomctrl_get_memory_pll_dividers_si(
@@ -332,7 +332,7 @@ int atomctrl_get_memory_pll_dividers_si(
*
* @hwmgr: input parameter: pointer to HwMgr
* @clock_value: input parameter: memory clock
- * @dividers: output parameter: memory PLL dividers
+ * @mpll_param: output parameter: memory clock parameters
*/
int atomctrl_get_memory_pll_dividers_vi(struct pp_hwmgr *hwmgr,
uint32_t clock_value, pp_atomctrl_memory_clock_param *mpll_param)
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/process_pptables_v1_0.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/process_pptables_v1_0.c
index 741e03ad5311..f2a55c1413f5 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/process_pptables_v1_0.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/process_pptables_v1_0.c
@@ -1362,6 +1362,7 @@ static int ppt_get_vce_state_table_entry_v1_0(struct pp_hwmgr *hwmgr, uint32_t i
* @hwmgr: Pointer to the hardware manager.
* @entry_index: The index of the entry to be extracted from the table.
* @power_state: The address of the PowerState instance being created.
+ * @call_back_func: The function to call into to fill power state
* Return: -1 if the entry cannot be retrieved.
*/
int get_powerplay_table_entry_v1_0(struct pp_hwmgr *hwmgr,
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
index e57e64bbacdc..88322781e447 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
@@ -251,7 +251,7 @@ static int smu10_set_hard_min_gfxclk_by_freq(struct pp_hwmgr *hwmgr, uint32_t cl
smu10_data->gfx_actual_soft_min_freq = clock;
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinGfxClk,
- smu10_data->gfx_actual_soft_min_freq,
+ clock,
NULL);
}
return 0;
@@ -558,7 +558,8 @@ static int smu10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
/* enable the pp_od_clk_voltage sysfs file */
hwmgr->od_enabled = 1;
-
+ /* disabled fine grain tuning function by default */
+ data->fine_grain_enabled = 0;
return result;
}
@@ -597,6 +598,7 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
uint32_t min_mclk = hwmgr->display_config->min_mem_set_clock/100;
uint32_t index_fclk = data->clock_vol_info.vdd_dep_on_fclk->count - 1;
uint32_t index_socclk = data->clock_vol_info.vdd_dep_on_socclk->count - 1;
+ uint32_t fine_grain_min_freq = 0, fine_grain_max_freq = 0;
if (hwmgr->smu_version < 0x1E3700) {
pr_info("smu firmware version too old, can not set dpm level\n");
@@ -613,6 +615,14 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
switch (level) {
case AMD_DPM_FORCED_LEVEL_HIGH:
case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
+ data->fine_grain_enabled = 0;
+
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &fine_grain_min_freq);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &fine_grain_max_freq);
+
+ data->gfx_actual_soft_min_freq = fine_grain_min_freq;
+ data->gfx_actual_soft_max_freq = fine_grain_max_freq;
+
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinGfxClk,
data->gfx_max_freq_limit/100,
@@ -648,6 +658,14 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
NULL);
break;
case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
+ data->fine_grain_enabled = 0;
+
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &fine_grain_min_freq);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &fine_grain_max_freq);
+
+ data->gfx_actual_soft_min_freq = fine_grain_min_freq;
+ data->gfx_actual_soft_max_freq = fine_grain_max_freq;
+
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinGfxClk,
min_sclk,
@@ -658,6 +676,14 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
NULL);
break;
case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
+ data->fine_grain_enabled = 0;
+
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &fine_grain_min_freq);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &fine_grain_max_freq);
+
+ data->gfx_actual_soft_min_freq = fine_grain_min_freq;
+ data->gfx_actual_soft_max_freq = fine_grain_max_freq;
+
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinFclkByFreq,
min_mclk,
@@ -668,6 +694,14 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
NULL);
break;
case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
+ data->fine_grain_enabled = 0;
+
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &fine_grain_min_freq);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &fine_grain_max_freq);
+
+ data->gfx_actual_soft_min_freq = fine_grain_min_freq;
+ data->gfx_actual_soft_max_freq = fine_grain_max_freq;
+
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinGfxClk,
SMU10_UMD_PSTATE_GFXCLK,
@@ -703,6 +737,14 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
NULL);
break;
case AMD_DPM_FORCED_LEVEL_AUTO:
+ data->fine_grain_enabled = 0;
+
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &fine_grain_min_freq);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &fine_grain_max_freq);
+
+ data->gfx_actual_soft_min_freq = fine_grain_min_freq;
+ data->gfx_actual_soft_max_freq = fine_grain_max_freq;
+
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinGfxClk,
min_sclk,
@@ -741,6 +783,14 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
NULL);
break;
case AMD_DPM_FORCED_LEVEL_LOW:
+ data->fine_grain_enabled = 0;
+
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &fine_grain_min_freq);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &fine_grain_max_freq);
+
+ data->gfx_actual_soft_min_freq = fine_grain_min_freq;
+ data->gfx_actual_soft_max_freq = fine_grain_max_freq;
+
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinGfxClk,
data->gfx_min_freq_limit/100,
@@ -759,6 +809,7 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
NULL);
break;
case AMD_DPM_FORCED_LEVEL_MANUAL:
+ data->fine_grain_enabled = 1;
case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
default:
break;
@@ -948,6 +999,8 @@ static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr,
struct smu10_voltage_dependency_table *mclk_table =
data->clock_vol_info.vdd_dep_on_fclk;
uint32_t i, now, size = 0;
+ uint32_t min_freq, max_freq = 0;
+ uint32_t ret = 0;
switch (type) {
case PP_SCLK:
@@ -983,18 +1036,28 @@ static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr,
break;
case OD_SCLK:
if (hwmgr->od_enabled) {
- size = sprintf(buf, "%s:\n", "OD_SCLK");
+ ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &min_freq);
+ if (ret)
+ return ret;
+ ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &max_freq);
+ if (ret)
+ return ret;
+ size = sprintf(buf, "%s:\n", "OD_SCLK");
size += sprintf(buf + size, "0: %10uMhz\n",
- (data->gfx_actual_soft_min_freq > 0) ? data->gfx_actual_soft_min_freq : data->gfx_min_freq_limit/100);
- size += sprintf(buf + size, "1: %10uMhz\n", data->gfx_max_freq_limit/100);
+ (data->gfx_actual_soft_min_freq > 0) ? data->gfx_actual_soft_min_freq : min_freq);
+ size += sprintf(buf + size, "1: %10uMhz\n",
+ (data->gfx_actual_soft_max_freq > 0) ? data->gfx_actual_soft_max_freq : max_freq);
}
break;
case OD_RANGE:
if (hwmgr->od_enabled) {
- uint32_t min_freq, max_freq = 0;
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &min_freq);
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &max_freq);
+ ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &min_freq);
+ if (ret)
+ return ret;
+ ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &max_freq);
+ if (ret)
+ return ret;
size = sprintf(buf, "%s:\n", "OD_RANGE");
size += sprintf(buf + size, "SCLK: %7uMHz %10uMHz\n",
@@ -1414,23 +1477,96 @@ static int smu10_set_fine_grain_clk_vol(struct pp_hwmgr *hwmgr,
enum PP_OD_DPM_TABLE_COMMAND type,
long *input, uint32_t size)
{
+ uint32_t min_freq, max_freq = 0;
+ struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
+ int ret = 0;
+
if (!hwmgr->od_enabled) {
pr_err("Fine grain not support\n");
return -EINVAL;
}
- if (size != 2) {
- pr_err("Input parameter number not correct\n");
+ if (!smu10_data->fine_grain_enabled) {
+ pr_err("Fine grain not started\n");
return -EINVAL;
}
if (type == PP_OD_EDIT_SCLK_VDDC_TABLE) {
- if (input[0] == 0)
- smu10_set_hard_min_gfxclk_by_freq(hwmgr, input[1]);
- else if (input[0] == 1)
- smu10_set_soft_max_gfxclk_by_freq(hwmgr, input[1]);
- else
+ if (size != 2) {
+ pr_err("Input parameter number not correct\n");
return -EINVAL;
+ }
+
+ if (input[0] == 0) {
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &min_freq);
+ if (input[1] < min_freq) {
+ pr_err("Fine grain setting minimum sclk (%ld) MHz is less than the minimum allowed (%d) MHz\n",
+ input[1], min_freq);
+ return -EINVAL;
+ }
+ smu10_data->gfx_actual_soft_min_freq = input[1];
+ } else if (input[0] == 1) {
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &max_freq);
+ if (input[1] > max_freq) {
+ pr_err("Fine grain setting maximum sclk (%ld) MHz is greater than the maximum allowed (%d) MHz\n",
+ input[1], max_freq);
+ return -EINVAL;
+ }
+ smu10_data->gfx_actual_soft_max_freq = input[1];
+ } else {
+ return -EINVAL;
+ }
+ } else if (type == PP_OD_RESTORE_DEFAULT_TABLE) {
+ if (size != 0) {
+ pr_err("Input parameter number not correct\n");
+ return -EINVAL;
+ }
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &min_freq);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &max_freq);
+
+ smu10_data->gfx_actual_soft_min_freq = min_freq;
+ smu10_data->gfx_actual_soft_max_freq = max_freq;
+
+ ret = smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetHardMinGfxClk,
+ min_freq,
+ NULL);
+ if (ret)
+ return ret;
+
+ ret = smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetSoftMaxGfxClk,
+ max_freq,
+ NULL);
+ if (ret)
+ return ret;
+ } else if (type == PP_OD_COMMIT_DPM_TABLE) {
+ if (size != 0) {
+ pr_err("Input parameter number not correct\n");
+ return -EINVAL;
+ }
+
+ if (smu10_data->gfx_actual_soft_min_freq > smu10_data->gfx_actual_soft_max_freq) {
+ pr_err("The setting minimun sclk (%d) MHz is greater than the setting maximum sclk (%d) MHz\n",
+ smu10_data->gfx_actual_soft_min_freq, smu10_data->gfx_actual_soft_max_freq);
+ return -EINVAL;
+ }
+
+ ret = smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetHardMinGfxClk,
+ smu10_data->gfx_actual_soft_min_freq,
+ NULL);
+ if (ret)
+ return ret;
+
+ ret = smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetSoftMaxGfxClk,
+ smu10_data->gfx_actual_soft_max_freq,
+ NULL);
+ if (ret)
+ return ret;
+ } else {
+ return -EINVAL;
}
return 0;
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.h
index 6c9b5f060902..808e0ecbe1f0 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.h
@@ -283,6 +283,7 @@ struct smu10_hwmgr {
uint32_t vclk_soft_min;
uint32_t dclk_soft_min;
uint32_t gfx_actual_soft_min_freq;
+ uint32_t gfx_actual_soft_max_freq;
uint32_t gfx_min_freq_limit;
uint32_t gfx_max_freq_limit; /* in 10Khz*/
@@ -299,6 +300,8 @@ struct smu10_hwmgr {
bool need_min_deep_sleep_dcefclk;
uint32_t deep_sleep_dcefclk;
uint32_t num_active_display;
+
+ bool fine_grain_enabled;
};
struct pp_hwmgr;
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
index 82676c086ce4..c57dc9ae81f2 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
@@ -235,7 +235,7 @@ static int smu7_get_current_pcie_lane_number(struct pp_hwmgr *hwmgr)
/**
* smu7_enable_smc_voltage_controller - Enable voltage control
*
- * @hwmgr the address of the powerplay hardware manager.
+ * @hwmgr: the address of the powerplay hardware manager.
* Return: always PP_Result_OK
*/
static int smu7_enable_smc_voltage_controller(struct pp_hwmgr *hwmgr)
@@ -4501,7 +4501,7 @@ static int smu7_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
* smu7_set_max_fan_rpm_output - Set maximum target operating fan output RPM
*
* @hwmgr: the address of the powerplay hardware manager.
- * @usMaxFanRpm: max operating fan RPM value.
+ * @us_max_fan_rpm: max operating fan RPM value.
* Return: The response that came from the SMC.
*/
static int smu7_set_max_fan_rpm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_fan_rpm)
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
index 1b47f94e0331..29c99642d22d 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
@@ -542,11 +542,11 @@ static int vega10_get_socclk_for_voltage_evv(struct pp_hwmgr *hwmgr,
#define ATOM_VIRTUAL_VOLTAGE_ID0 0xff01
/**
-* Get Leakage VDDC based on leakage ID.
-*
-* @param hwmgr the address of the powerplay hardware manager.
-* @return always 0.
-*/
+ * Get Leakage VDDC based on leakage ID.
+ *
+ * @hwmgr: the address of the powerplay hardware manager.
+ * return: always 0.
+ */
static int vega10_get_evv_voltages(struct pp_hwmgr *hwmgr)
{
struct vega10_hwmgr *data = hwmgr->backend;
@@ -600,9 +600,9 @@ static int vega10_get_evv_voltages(struct pp_hwmgr *hwmgr)
/**
* Change virtual leakage voltage to actual value.
*
- * @param hwmgr the address of the powerplay hardware manager.
- * @param pointer to changing voltage
- * @param pointer to leakage table
+ * @hwmgr: the address of the powerplay hardware manager.
+ * @voltage: pointer to changing voltage
+ * @leakage_table: pointer to leakage table
*/
static void vega10_patch_with_vdd_leakage(struct pp_hwmgr *hwmgr,
uint16_t *voltage, struct vega10_leakage_voltage *leakage_table)
@@ -624,13 +624,13 @@ static void vega10_patch_with_vdd_leakage(struct pp_hwmgr *hwmgr,
}
/**
-* Patch voltage lookup table by EVV leakages.
-*
-* @param hwmgr the address of the powerplay hardware manager.
-* @param pointer to voltage lookup table
-* @param pointer to leakage table
-* @return always 0
-*/
+ * Patch voltage lookup table by EVV leakages.
+ *
+ * @hwmgr: the address of the powerplay hardware manager.
+ * @lookup_table: pointer to voltage lookup table
+ * @leakage_table: pointer to leakage table
+ * return: always 0
+ */
static int vega10_patch_lookup_table_with_leakage(struct pp_hwmgr *hwmgr,
phm_ppt_v1_voltage_lookup_table *lookup_table,
struct vega10_leakage_voltage *leakage_table)
@@ -1001,13 +1001,12 @@ static int vega10_setup_asic_task(struct pp_hwmgr *hwmgr)
}
/**
-* Remove repeated voltage values and create table with unique values.
-*
-* @param hwmgr the address of the powerplay hardware manager.
-* @param vol_table the pointer to changing voltage table
-* @return 0 in success
-*/
-
+ * Remove repeated voltage values and create table with unique values.
+ *
+ * @hwmgr: the address of the powerplay hardware manager.
+ * @vol_table: the pointer to changing voltage table
+ * return: 0 in success
+ */
static int vega10_trim_voltage_table(struct pp_hwmgr *hwmgr,
struct pp_atomfwctrl_voltage_table *vol_table)
{
@@ -1151,11 +1150,11 @@ static void vega10_trim_voltage_table_to_fit_state_table(
}
/**
-* Create Voltage Tables.
-*
-* @param hwmgr the address of the powerplay hardware manager.
-* @return always 0
-*/
+ * Create Voltage Tables.
+ *
+ * @hwmgr: the address of the powerplay hardware manager.
+ * return: always 0
+ */
static int vega10_construct_voltage_tables(struct pp_hwmgr *hwmgr)
{
struct vega10_hwmgr *data = hwmgr->backend;
@@ -1212,11 +1211,11 @@ static int vega10_construct_voltage_tables(struct pp_hwmgr *hwmgr)
}
/*
- * @fn vega10_init_dpm_state
- * @brief Function to initialize all Soft Min/Max and Hard Min/Max to 0xff.
+ * vega10_init_dpm_state
+ * Function to initialize all Soft Min/Max and Hard Min/Max to 0xff.
*
- * @param dpm_state - the address of the DPM Table to initiailize.
- * @return None.
+ * @dpm_state: - the address of the DPM Table to initiailize.
+ * return: None.
*/
static void vega10_init_dpm_state(struct vega10_dpm_state *dpm_state)
{
@@ -1460,11 +1459,11 @@ static int vega10_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
}
/*
- * @fn vega10_populate_ulv_state
- * @brief Function to provide parameters for Utral Low Voltage state to SMC.
+ * vega10_populate_ulv_state
+ * Function to provide parameters for Utral Low Voltage state to SMC.
*
- * @param hwmgr - the address of the hardware manager.
- * @return Always 0.
+ * @hwmgr: - the address of the hardware manager.
+ * return: Always 0.
*/
static int vega10_populate_ulv_state(struct pp_hwmgr *hwmgr)
{
@@ -1545,13 +1544,13 @@ static int vega10_populate_smc_link_levels(struct pp_hwmgr *hwmgr)
}
/**
-* Populates single SMC GFXSCLK structure using the provided engine clock
-*
-* @param hwmgr the address of the hardware manager
-* @param gfx_clock the GFX clock to use to populate the structure.
-* @param current_gfxclk_level location in PPTable for the SMC GFXCLK structure.
-*/
-
+ * Populates single SMC GFXSCLK structure using the provided engine clock
+ *
+ * @hwmgr: the address of the hardware manager
+ * @gfx_clock: the GFX clock to use to populate the structure.
+ * @current_gfxclk_level: location in PPTable for the SMC GFXCLK structure.
+ * @acg_freq: ACG frequenty to return (MHz)
+ */
static int vega10_populate_single_gfx_level(struct pp_hwmgr *hwmgr,
uint32_t gfx_clock, PllSetting_t *current_gfxclk_level,
uint32_t *acg_freq)
@@ -1610,12 +1609,13 @@ static int vega10_populate_single_gfx_level(struct pp_hwmgr *hwmgr,
}
/**
- * @brief Populates single SMC SOCCLK structure using the provided clock.
+ * Populates single SMC SOCCLK structure using the provided clock.
*
- * @param hwmgr - the address of the hardware manager.
- * @param soc_clock - the SOC clock to use to populate the structure.
- * @param current_socclk_level - location in PPTable for the SMC SOCCLK structure.
- * @return 0 on success..
+ * @hwmgr: the address of the hardware manager.
+ * @soc_clock: the SOC clock to use to populate the structure.
+ * @current_soc_did: DFS divider to pass back to caller
+ * @current_vol_index: index of current VDD to pass back to caller
+ * return: 0 on success
*/
static int vega10_populate_single_soc_level(struct pp_hwmgr *hwmgr,
uint32_t soc_clock, uint8_t *current_soc_did,
@@ -1659,10 +1659,10 @@ static int vega10_populate_single_soc_level(struct pp_hwmgr *hwmgr,
}
/**
-* Populates all SMC SCLK levels' structure based on the trimmed allowed dpm engine clock states
-*
-* @param hwmgr the address of the hardware manager
-*/
+ * Populates all SMC SCLK levels' structure based on the trimmed allowed dpm engine clock states
+ *
+ * @hwmgr: the address of the hardware manager
+ */
static int vega10_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
{
struct vega10_hwmgr *data = hwmgr->backend;
@@ -1746,12 +1746,12 @@ static void vega10_populate_vddc_soc_levels(struct pp_hwmgr *hwmgr)
}
}
-/**
- * @brief Populates single SMC GFXCLK structure using the provided clock.
+/*
+ * Populates single SMC GFXCLK structure using the provided clock.
*
- * @param hwmgr - the address of the hardware manager.
- * @param mem_clock - the memory clock to use to populate the structure.
- * @return 0 on success..
+ * @hwmgr: the address of the hardware manager.
+ * @mem_clock: the memory clock to use to populate the structure.
+ * return: 0 on success..
*/
static int vega10_populate_single_memory_level(struct pp_hwmgr *hwmgr,
uint32_t mem_clock, uint8_t *current_mem_vid,
@@ -1808,10 +1808,10 @@ static int vega10_populate_single_memory_level(struct pp_hwmgr *hwmgr,
}
/**
- * @brief Populates all SMC MCLK levels' structure based on the trimmed allowed dpm memory clock states.
+ * Populates all SMC MCLK levels' structure based on the trimmed allowed dpm memory clock states.
*
- * @param pHwMgr - the address of the hardware manager.
- * @return PP_Result_OK on success.
+ * @hwmgr: the address of the hardware manager.
+ * return: PP_Result_OK on success.
*/
static int vega10_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
{
@@ -2486,12 +2486,11 @@ static void vega10_check_dpm_table_updated(struct pp_hwmgr *hwmgr)
}
/**
-* Initializes the SMC table and uploads it
-*
-* @param hwmgr the address of the powerplay hardware manager.
-* @param pInput the pointer to input data (PowerState)
-* @return always 0
-*/
+ * Initializes the SMC table and uploads it
+ *
+ * @hwmgr: the address of the powerplay hardware manager.
+ * return: always 0
+ */
static int vega10_init_smc_table(struct pp_hwmgr *hwmgr)
{
int result;
@@ -2864,11 +2863,11 @@ static int vega10_stop_dpm(struct pp_hwmgr *hwmgr, uint32_t bitmap)
}
/**
- * @brief Tell SMC to enabled the supported DPMs.
+ * Tell SMC to enabled the supported DPMs.
*
- * @param hwmgr - the address of the powerplay hardware manager.
- * @Param bitmap - bitmap for the features to enabled.
- * @return 0 on at least one DPM is successfully enabled.
+ * @hwmgr: the address of the powerplay hardware manager.
+ * @bitmap: bitmap for the features to enabled.
+ * return: 0 on at least one DPM is successfully enabled.
*/
static int vega10_start_dpm(struct pp_hwmgr *hwmgr, uint32_t bitmap)
{
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.h
index f752b4ad0c8a..07c06f8c90b0 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.h
@@ -442,5 +442,6 @@ int vega10_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate);
int vega10_update_samu_dpm(struct pp_hwmgr *hwmgr, bool bgate);
int vega10_update_acp_dpm(struct pp_hwmgr *hwmgr, bool bgate);
int vega10_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable);
+int vega10_hwmgr_init(struct pp_hwmgr *hwmgr);
#endif /* _VEGA10_HWMGR_H_ */
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c
index dc206fa88c5e..c0753029a8e2 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c
@@ -718,12 +718,11 @@ static int vega12_save_default_power_profile(struct pp_hwmgr *hwmgr)
#endif
/**
-* Initializes the SMC table and uploads it
-*
-* @param hwmgr the address of the powerplay hardware manager.
-* @param pInput the pointer to input data (PowerState)
-* @return always 0
-*/
+ * Initializes the SMC table and uploads it
+ *
+ * @hwmgr: the address of the powerplay hardware manager.
+ * return: always 0
+ */
static int vega12_init_smc_table(struct pp_hwmgr *hwmgr)
{
int result;
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c
index da84012b7fd5..87811b005b85 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c
@@ -771,12 +771,11 @@ static int vega20_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
}
/**
-* Initializes the SMC table and uploads it
-*
-* @param hwmgr the address of the powerplay hardware manager.
-* @param pInput the pointer to input data (PowerState)
-* @return always 0
-*/
+ * Initializes the SMC table and uploads it
+ *
+ * @hwmgr: the address of the powerplay hardware manager.
+ * return: always 0
+ */
static int vega20_init_smc_table(struct pp_hwmgr *hwmgr)
{
int result;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index 8b867a6d52b5..7fe61ad3ed10 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -288,6 +288,20 @@ bool is_support_sw_smu(struct amdgpu_device *adev)
return false;
}
+bool is_support_cclk_dpm(struct amdgpu_device *adev)
+{
+ struct smu_context *smu = &adev->smu;
+
+ if (!is_support_sw_smu(adev))
+ return false;
+
+ if (!smu_feature_is_enabled(smu, SMU_FEATURE_CCLK_DPM_BIT))
+ return false;
+
+ return true;
+}
+
+
int smu_sys_get_pp_table(struct smu_context *smu, void **table)
{
struct smu_table_context *smu_table = &smu->smu_table;
@@ -405,8 +419,6 @@ static int smu_set_funcs(struct amdgpu_device *adev)
break;
case CHIP_VANGOGH:
vangogh_set_ppt_funcs(smu);
- /* enable the OD by default to allow the fine grain tuning function */
- smu->od_enabled = true;
break;
default:
return -EINVAL;
@@ -478,9 +490,6 @@ static int smu_late_init(void *handle)
smu_set_fine_grain_gfx_freq_parameters(smu);
- if (adev->asic_type == CHIP_VANGOGH)
- return 0;
-
if (!smu->pm_enabled)
return 0;
@@ -490,6 +499,9 @@ static int smu_late_init(void *handle)
return ret;
}
+ if (adev->asic_type == CHIP_VANGOGH)
+ return 0;
+
ret = smu_set_default_od_settings(smu);
if (ret) {
dev_err(adev->dev, "Failed to setup default OD settings!\n");
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
index 51e83123f72a..7ebf9588983f 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
@@ -1673,7 +1673,7 @@ static int navi10_read_sensor(struct smu_context *smu,
*size = 4;
break;
case AMDGPU_PP_SENSOR_GFX_SCLK:
- ret = navi10_get_current_clk_freq_by_table(smu, SMU_GFXCLK, (uint32_t *)data);
+ ret = navi10_get_smu_metrics_data(smu, METRICS_AVERAGE_GFXCLK, (uint32_t *)data);
*(uint32_t *)data *= 100;
*size = 4;
break;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
index 9608745d732f..24f3c96a5e5e 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
@@ -314,6 +314,12 @@ static int sienna_cichlid_check_powerplay_table(struct smu_context *smu)
table_context->thermal_controller_type =
powerplay_table->thermal_controller_type;
+ /*
+ * Instead of having its own buffer space and get overdrive_table copied,
+ * smu->od_settings just points to the actual overdrive_table
+ */
+ smu->od_settings = &powerplay_table->overdrive_table;
+
return 0;
}
@@ -907,6 +913,22 @@ static bool sienna_cichlid_is_support_fine_grained_dpm(struct smu_context *smu,
return dpm_desc->SnapToDiscrete == 0 ? true : false;
}
+static bool sienna_cichlid_is_od_feature_supported(struct smu_11_0_7_overdrive_table *od_table,
+ enum SMU_11_0_7_ODFEATURE_CAP cap)
+{
+ return od_table->cap[cap];
+}
+
+static void sienna_cichlid_get_od_setting_range(struct smu_11_0_7_overdrive_table *od_table,
+ enum SMU_11_0_7_ODSETTING_ID setting,
+ uint32_t *min, uint32_t *max)
+{
+ if (min)
+ *min = od_table->min[setting];
+ if (max)
+ *max = od_table->max[setting];
+}
+
static int sienna_cichlid_print_clk_levels(struct smu_context *smu,
enum smu_clk_type clk_type, char *buf)
{
@@ -915,11 +937,16 @@ static int sienna_cichlid_print_clk_levels(struct smu_context *smu,
struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
struct smu_11_0_dpm_context *dpm_context = smu_dpm->dpm_context;
PPTable_t *pptable = (PPTable_t *)table_context->driver_pptable;
+ struct smu_11_0_7_overdrive_table *od_settings = smu->od_settings;
+ OverDriveTable_t *od_table =
+ (OverDriveTable_t *)table_context->overdrive_table;
int i, size = 0, ret = 0;
uint32_t cur_value = 0, value = 0, count = 0;
uint32_t freq_values[3] = {0};
uint32_t mark_index = 0;
uint32_t gen_speed, lane_width;
+ uint32_t min_value, max_value;
+ uint32_t smu_version;
switch (clk_type) {
case SMU_GFXCLK:
@@ -995,6 +1022,70 @@ static int sienna_cichlid_print_clk_levels(struct smu_context *smu,
(lane_width == dpm_context->dpm_tables.pcie_table.pcie_lane[i]) ?
"*" : "");
break;
+ case SMU_OD_SCLK:
+ if (!smu->od_enabled || !od_table || !od_settings)
+ break;
+
+ if (!sienna_cichlid_is_od_feature_supported(od_settings, SMU_11_0_7_ODCAP_GFXCLK_LIMITS))
+ break;
+
+ size += sprintf(buf + size, "OD_SCLK:\n");
+ size += sprintf(buf + size, "0: %uMhz\n1: %uMhz\n", od_table->GfxclkFmin, od_table->GfxclkFmax);
+ break;
+
+ case SMU_OD_MCLK:
+ if (!smu->od_enabled || !od_table || !od_settings)
+ break;
+
+ if (!sienna_cichlid_is_od_feature_supported(od_settings, SMU_11_0_7_ODCAP_UCLK_LIMITS))
+ break;
+
+ size += sprintf(buf + size, "OD_MCLK:\n");
+ size += sprintf(buf + size, "0: %uMhz\n1: %uMHz\n", od_table->UclkFmin, od_table->UclkFmax);
+ break;
+
+ case SMU_OD_VDDGFX_OFFSET:
+ if (!smu->od_enabled || !od_table || !od_settings)
+ break;
+
+ /*
+ * OD GFX Voltage Offset functionality is supported only by 58.41.0
+ * and onwards SMU firmwares.
+ */
+ smu_cmn_get_smc_version(smu, NULL, &smu_version);
+ if ((adev->asic_type == CHIP_SIENNA_CICHLID) &&
+ (smu_version < 0x003a2900))
+ break;
+
+ size += sprintf(buf + size, "OD_VDDGFX_OFFSET:\n");
+ size += sprintf(buf + size, "%dmV\n", od_table->VddGfxOffset);
+ break;
+
+ case SMU_OD_RANGE:
+ if (!smu->od_enabled || !od_table || !od_settings)
+ break;
+
+ size = sprintf(buf, "%s:\n", "OD_RANGE");
+
+ if (sienna_cichlid_is_od_feature_supported(od_settings, SMU_11_0_7_ODCAP_GFXCLK_LIMITS)) {
+ sienna_cichlid_get_od_setting_range(od_settings, SMU_11_0_7_ODSETTING_GFXCLKFMIN,
+ &min_value, NULL);
+ sienna_cichlid_get_od_setting_range(od_settings, SMU_11_0_7_ODSETTING_GFXCLKFMAX,
+ NULL, &max_value);
+ size += sprintf(buf + size, "SCLK: %7uMhz %10uMhz\n",
+ min_value, max_value);
+ }
+
+ if (sienna_cichlid_is_od_feature_supported(od_settings, SMU_11_0_7_ODCAP_UCLK_LIMITS)) {
+ sienna_cichlid_get_od_setting_range(od_settings, SMU_11_0_7_ODSETTING_UCLKFMIN,
+ &min_value, NULL);
+ sienna_cichlid_get_od_setting_range(od_settings, SMU_11_0_7_ODSETTING_UCLKFMAX,
+ NULL, &max_value);
+ size += sprintf(buf + size, "MCLK: %7uMhz %10uMhz\n",
+ min_value, max_value);
+ }
+ break;
+
default:
break;
}
@@ -1694,6 +1785,243 @@ static int sienna_cichlid_get_dpm_ultimate_freq(struct smu_context *smu,
return ret;
}
+static void sienna_cichlid_dump_od_table(struct smu_context *smu,
+ OverDriveTable_t *od_table)
+{
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t smu_version;
+
+ dev_dbg(smu->adev->dev, "OD: Gfxclk: (%d, %d)\n", od_table->GfxclkFmin,
+ od_table->GfxclkFmax);
+ dev_dbg(smu->adev->dev, "OD: Uclk: (%d, %d)\n", od_table->UclkFmin,
+ od_table->UclkFmax);
+
+ smu_cmn_get_smc_version(smu, NULL, &smu_version);
+ if (!((adev->asic_type == CHIP_SIENNA_CICHLID) &&
+ (smu_version < 0x003a2900)))
+ dev_dbg(smu->adev->dev, "OD: VddGfxOffset: %d\n", od_table->VddGfxOffset);
+}
+
+static int sienna_cichlid_set_default_od_settings(struct smu_context *smu)
+{
+ OverDriveTable_t *od_table =
+ (OverDriveTable_t *)smu->smu_table.overdrive_table;
+ OverDriveTable_t *boot_od_table =
+ (OverDriveTable_t *)smu->smu_table.boot_overdrive_table;
+ int ret = 0;
+
+ ret = smu_cmn_update_table(smu, SMU_TABLE_OVERDRIVE,
+ 0, (void *)od_table, false);
+ if (ret) {
+ dev_err(smu->adev->dev, "Failed to get overdrive table!\n");
+ return ret;
+ }
+
+ memcpy(boot_od_table, od_table, sizeof(OverDriveTable_t));
+
+ sienna_cichlid_dump_od_table(smu, od_table);
+
+ return 0;
+}
+
+static int sienna_cichlid_od_setting_check_range(struct smu_context *smu,
+ struct smu_11_0_7_overdrive_table *od_table,
+ enum SMU_11_0_7_ODSETTING_ID setting,
+ uint32_t value)
+{
+ if (value < od_table->min[setting]) {
+ dev_warn(smu->adev->dev, "OD setting (%d, %d) is less than the minimum allowed (%d)\n",
+ setting, value, od_table->min[setting]);
+ return -EINVAL;
+ }
+ if (value > od_table->max[setting]) {
+ dev_warn(smu->adev->dev, "OD setting (%d, %d) is greater than the maximum allowed (%d)\n",
+ setting, value, od_table->max[setting]);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int sienna_cichlid_od_edit_dpm_table(struct smu_context *smu,
+ enum PP_OD_DPM_TABLE_COMMAND type,
+ long input[], uint32_t size)
+{
+ struct smu_table_context *table_context = &smu->smu_table;
+ OverDriveTable_t *od_table =
+ (OverDriveTable_t *)table_context->overdrive_table;
+ struct smu_11_0_7_overdrive_table *od_settings =
+ (struct smu_11_0_7_overdrive_table *)smu->od_settings;
+ struct amdgpu_device *adev = smu->adev;
+ enum SMU_11_0_7_ODSETTING_ID freq_setting;
+ uint16_t *freq_ptr;
+ int i, ret = 0;
+ uint32_t smu_version;
+
+ if (!smu->od_enabled) {
+ dev_warn(smu->adev->dev, "OverDrive is not enabled!\n");
+ return -EINVAL;
+ }
+
+ if (!smu->od_settings) {
+ dev_err(smu->adev->dev, "OD board limits are not set!\n");
+ return -ENOENT;
+ }
+
+ if (!(table_context->overdrive_table && table_context->boot_overdrive_table)) {
+ dev_err(smu->adev->dev, "Overdrive table was not initialized!\n");
+ return -EINVAL;
+ }
+
+ switch (type) {
+ case PP_OD_EDIT_SCLK_VDDC_TABLE:
+ if (!sienna_cichlid_is_od_feature_supported(od_settings,
+ SMU_11_0_7_ODCAP_GFXCLK_LIMITS)) {
+ dev_warn(smu->adev->dev, "GFXCLK_LIMITS not supported!\n");
+ return -ENOTSUPP;
+ }
+
+ for (i = 0; i < size; i += 2) {
+ if (i + 2 > size) {
+ dev_info(smu->adev->dev, "invalid number of input parameters %d\n", size);
+ return -EINVAL;
+ }
+
+ switch (input[i]) {
+ case 0:
+ if (input[i + 1] > od_table->GfxclkFmax) {
+ dev_info(smu->adev->dev, "GfxclkFmin (%ld) must be <= GfxclkFmax (%u)!\n",
+ input[i + 1], od_table->GfxclkFmax);
+ return -EINVAL;
+ }
+
+ freq_setting = SMU_11_0_7_ODSETTING_GFXCLKFMIN;
+ freq_ptr = &od_table->GfxclkFmin;
+ break;
+
+ case 1:
+ if (input[i + 1] < od_table->GfxclkFmin) {
+ dev_info(smu->adev->dev, "GfxclkFmax (%ld) must be >= GfxclkFmin (%u)!\n",
+ input[i + 1], od_table->GfxclkFmin);
+ return -EINVAL;
+ }
+
+ freq_setting = SMU_11_0_7_ODSETTING_GFXCLKFMAX;
+ freq_ptr = &od_table->GfxclkFmax;
+ break;
+
+ default:
+ dev_info(smu->adev->dev, "Invalid SCLK_VDDC_TABLE index: %ld\n", input[i]);
+ dev_info(smu->adev->dev, "Supported indices: [0:min,1:max]\n");
+ return -EINVAL;
+ }
+
+ ret = sienna_cichlid_od_setting_check_range(smu, od_settings,
+ freq_setting, input[i + 1]);
+ if (ret)
+ return ret;
+
+ *freq_ptr = (uint16_t)input[i + 1];
+ }
+ break;
+
+ case PP_OD_EDIT_MCLK_VDDC_TABLE:
+ if (!sienna_cichlid_is_od_feature_supported(od_settings, SMU_11_0_7_ODCAP_UCLK_LIMITS)) {
+ dev_warn(smu->adev->dev, "UCLK_LIMITS not supported!\n");
+ return -ENOTSUPP;
+ }
+
+ for (i = 0; i < size; i += 2) {
+ if (i + 2 > size) {
+ dev_info(smu->adev->dev, "invalid number of input parameters %d\n", size);
+ return -EINVAL;
+ }
+
+ switch (input[i]) {
+ case 0:
+ if (input[i + 1] > od_table->UclkFmax) {
+ dev_info(smu->adev->dev, "UclkFmin (%ld) must be <= UclkFmax (%u)!\n",
+ input[i + 1], od_table->UclkFmax);
+ return -EINVAL;
+ }
+
+ freq_setting = SMU_11_0_7_ODSETTING_UCLKFMIN;
+ freq_ptr = &od_table->UclkFmin;
+ break;
+
+ case 1:
+ if (input[i + 1] < od_table->UclkFmin) {
+ dev_info(smu->adev->dev, "UclkFmax (%ld) must be >= UclkFmin (%u)!\n",
+ input[i + 1], od_table->UclkFmin);
+ return -EINVAL;
+ }
+
+ freq_setting = SMU_11_0_7_ODSETTING_UCLKFMAX;
+ freq_ptr = &od_table->UclkFmax;
+ break;
+
+ default:
+ dev_info(smu->adev->dev, "Invalid MCLK_VDDC_TABLE index: %ld\n", input[i]);
+ dev_info(smu->adev->dev, "Supported indices: [0:min,1:max]\n");
+ return -EINVAL;
+ }
+
+ ret = sienna_cichlid_od_setting_check_range(smu, od_settings,
+ freq_setting, input[i + 1]);
+ if (ret)
+ return ret;
+
+ *freq_ptr = (uint16_t)input[i + 1];
+ }
+ break;
+
+ case PP_OD_RESTORE_DEFAULT_TABLE:
+ memcpy(table_context->overdrive_table,
+ table_context->boot_overdrive_table,
+ sizeof(OverDriveTable_t));
+ fallthrough;
+
+ case PP_OD_COMMIT_DPM_TABLE:
+ sienna_cichlid_dump_od_table(smu, od_table);
+
+ ret = smu_cmn_update_table(smu, SMU_TABLE_OVERDRIVE,
+ 0, (void *)od_table, true);
+ if (ret) {
+ dev_err(smu->adev->dev, "Failed to import overdrive table!\n");
+ return ret;
+ }
+ break;
+
+ case PP_OD_EDIT_VDDGFX_OFFSET:
+ if (size != 1) {
+ dev_info(smu->adev->dev, "invalid number of parameters: %d\n", size);
+ return -EINVAL;
+ }
+
+ /*
+ * OD GFX Voltage Offset functionality is supported only by 58.41.0
+ * and onwards SMU firmwares.
+ */
+ smu_cmn_get_smc_version(smu, NULL, &smu_version);
+ if ((adev->asic_type == CHIP_SIENNA_CICHLID) &&
+ (smu_version < 0x003a2900)) {
+ dev_err(smu->adev->dev, "OD GFX Voltage offset functionality is supported "
+ "only by 58.41.0 and onwards SMU firmwares!\n");
+ return -EOPNOTSUPP;
+ }
+
+ od_table->VddGfxOffset = (int16_t)input[0];
+
+ sienna_cichlid_dump_od_table(smu, od_table);
+ break;
+
+ default:
+ return -ENOSYS;
+ }
+
+ return ret;
+}
+
static int sienna_cichlid_run_btc(struct smu_context *smu)
{
return smu_cmn_send_smc_msg(smu, SMU_MSG_RunDcBtc, NULL);
@@ -2372,7 +2700,7 @@ static void sienna_cichlid_fill_i2c_req(SwI2cRequest_t *req, bool write,
{
int i;
- req->I2CcontrollerPort = 0;
+ req->I2CcontrollerPort = 1;
req->I2CSpeed = 2;
req->SlaveAddress = address;
req->NumCmds = numbytes;
@@ -2817,6 +3145,8 @@ static const struct pptable_funcs sienna_cichlid_ppt_funcs = {
.mode1_reset = smu_v11_0_mode1_reset,
.get_dpm_ultimate_freq = sienna_cichlid_get_dpm_ultimate_freq,
.set_soft_freq_limited_range = smu_v11_0_set_soft_freq_limited_range,
+ .set_default_od_settings = sienna_cichlid_set_default_od_settings,
+ .od_edit_dpm_table = sienna_cichlid_od_edit_dpm_table,
.run_btc = sienna_cichlid_run_btc,
.set_power_source = smu_v11_0_set_power_source,
.get_pp_feature_mask = smu_cmn_get_pp_feature_mask,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
index b279dbbbce6b..147efe12973c 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
@@ -1119,6 +1119,7 @@ int smu_v11_0_gfx_off_control(struct smu_context *smu, bool enable)
case CHIP_SIENNA_CICHLID:
case CHIP_NAVY_FLOUNDER:
case CHIP_DIMGREY_CAVEFISH:
+ case CHIP_VANGOGH:
if (!(adev->pm.pp_feature & PP_GFXOFF_MASK))
return 0;
if (enable)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
index 8cb4fcee9a2c..dc41abe7b1d3 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
@@ -31,6 +31,10 @@
#include "smu_v11_5_ppsmc.h"
#include "smu_v11_5_pmfw.h"
#include "smu_cmn.h"
+#include "soc15_common.h"
+#include "asic_reg/gc/gc_10_3_0_offset.h"
+#include "asic_reg/gc/gc_10_3_0_sh_mask.h"
+#include <asm/processor.h>
/*
* DO NOT use these for err/warn/info/debug messages.
@@ -59,7 +63,8 @@ static struct cmn2asic_msg_mapping vangogh_message_map[SMU_MSG_MAX_COUNT] = {
MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion, 0),
MSG_MAP(GetDriverIfVersion, PPSMC_MSG_GetDriverIfVersion, 0),
MSG_MAP(EnableGfxOff, PPSMC_MSG_EnableGfxOff, 0),
- MSG_MAP(DisallowGfxOff, PPSMC_MSG_DisableGfxOff, 0),
+ MSG_MAP(AllowGfxOff, PPSMC_MSG_AllowGfxOff, 0),
+ MSG_MAP(DisallowGfxOff, PPSMC_MSG_DisallowGfxOff, 0),
MSG_MAP(PowerDownIspByTile, PPSMC_MSG_PowerDownIspByTile, 0),
MSG_MAP(PowerUpIspByTile, PPSMC_MSG_PowerUpIspByTile, 0),
MSG_MAP(PowerDownVcn, PPSMC_MSG_PowerDownVcn, 0),
@@ -118,6 +123,7 @@ static struct cmn2asic_msg_mapping vangogh_message_map[SMU_MSG_MAX_COUNT] = {
MSG_MAP(StopDramLogging, PPSMC_MSG_StopDramLogging, 0),
MSG_MAP(SetSoftMinCclk, PPSMC_MSG_SetSoftMinCclk, 0),
MSG_MAP(SetSoftMaxCclk, PPSMC_MSG_SetSoftMaxCclk, 0),
+ MSG_MAP(RequestActiveWgp, PPSMC_MSG_RequestActiveWgp, 0),
};
static struct cmn2asic_mapping vangogh_feature_mask_map[SMU_FEATURE_COUNT] = {
@@ -162,6 +168,9 @@ static struct cmn2asic_mapping vangogh_feature_mask_map[SMU_FEATURE_COUNT] = {
FEA_MAP(A55_DPM),
FEA_MAP(CVIP_DSP_DPM),
FEA_MAP(MSMU_LOW_POWER),
+ FEA_MAP_REVERSE(SOCCLK),
+ FEA_MAP_REVERSE(FCLK),
+ FEA_MAP_HALF_REVERSE(GFX),
};
static struct cmn2asic_mapping vangogh_table_map[SMU_TABLE_COUNT] = {
@@ -171,6 +180,14 @@ static struct cmn2asic_mapping vangogh_table_map[SMU_TABLE_COUNT] = {
TAB_MAP_VALID(DPMCLOCKS),
};
+static struct cmn2asic_mapping vangogh_workload_map[PP_SMC_POWER_PROFILE_COUNT] = {
+ WORKLOAD_MAP(PP_SMC_POWER_PROFILE_FULLSCREEN3D, WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT),
+ WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VIDEO, WORKLOAD_PPLIB_VIDEO_BIT),
+ WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VR, WORKLOAD_PPLIB_VR_BIT),
+ WORKLOAD_MAP(PP_SMC_POWER_PROFILE_COMPUTE, WORKLOAD_PPLIB_COMPUTE_BIT),
+ WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CUSTOM, WORKLOAD_PPLIB_CUSTOM_BIT),
+};
+
static int vangogh_tables_init(struct smu_context *smu)
{
struct smu_table_context *smu_table = &smu->smu_table;
@@ -242,6 +259,12 @@ static int vangogh_get_smu_metrics_data(struct smu_context *smu,
case METRICS_AVERAGE_SOCCLK:
*value = metrics->SocclkFrequency;
break;
+ case METRICS_AVERAGE_VCLK:
+ *value = metrics->VclkFrequency;
+ break;
+ case METRICS_AVERAGE_DCLK:
+ *value = metrics->DclkFrequency;
+ break;
case METRICS_AVERAGE_UCLK:
*value = metrics->MemclkFrequency;
break;
@@ -252,7 +275,8 @@ static int vangogh_get_smu_metrics_data(struct smu_context *smu,
*value = metrics->UvdActivity;
break;
case METRICS_AVERAGE_SOCKETPOWER:
- *value = metrics->CurrentSocketPower;
+ *value = (metrics->CurrentSocketPower << 8) /
+ 1000 ;
break;
case METRICS_TEMPERATURE_EDGE:
*value = metrics->GfxTemperature / 100 *
@@ -271,6 +295,10 @@ static int vangogh_get_smu_metrics_data(struct smu_context *smu,
case METRICS_VOLTAGE_VDDSOC:
*value = metrics->Voltage[1];
break;
+ case METRICS_AVERAGE_CPUCLK:
+ memcpy(value, &metrics->CoreFrequency[0],
+ smu->cpu_core_num * sizeof(uint16_t));
+ break;
default:
*value = UINT_MAX;
break;
@@ -307,6 +335,13 @@ static int vangogh_init_smc_tables(struct smu_context *smu)
if (ret)
return ret;
+#ifdef CONFIG_X86
+ /* AMD x86 APU only */
+ smu->cpu_core_num = boot_cpu_data.x86_max_cores;
+#else
+ smu->cpu_core_num = 4;
+#endif
+
return smu_v11_0_init_smc_tables(smu);
}
@@ -316,17 +351,13 @@ static int vangogh_dpm_set_vcn_enable(struct smu_context *smu, bool enable)
if (enable) {
/* vcn dpm on is a prerequisite for vcn power gate messages */
- if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
- ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 0, NULL);
- if (ret)
- return ret;
- }
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 0, NULL);
+ if (ret)
+ return ret;
} else {
- if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
- ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerDownVcn, 0, NULL);
- if (ret)
- return ret;
- }
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerDownVcn, 0, NULL);
+ if (ret)
+ return ret;
}
return ret;
@@ -337,54 +368,18 @@ static int vangogh_dpm_set_jpeg_enable(struct smu_context *smu, bool enable)
int ret = 0;
if (enable) {
- if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_JPEG_PG_BIT)) {
- ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerUpJpeg, 0, NULL);
- if (ret)
- return ret;
- }
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerUpJpeg, 0, NULL);
+ if (ret)
+ return ret;
} else {
- if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_JPEG_PG_BIT)) {
- ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerDownJpeg, 0, NULL);
- if (ret)
- return ret;
- }
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerDownJpeg, 0, NULL);
+ if (ret)
+ return ret;
}
return ret;
}
-static int vangogh_get_allowed_feature_mask(struct smu_context *smu,
- uint32_t *feature_mask,
- uint32_t num)
-{
- struct amdgpu_device *adev = smu->adev;
-
- if (num > 2)
- return -EINVAL;
-
- memset(feature_mask, 0, sizeof(uint32_t) * num);
-
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_GFX_DPM_BIT)
- | FEATURE_MASK(FEATURE_MP0CLK_DPM_BIT)
- | FEATURE_MASK(FEATURE_DS_SOCCLK_BIT)
- | FEATURE_MASK(FEATURE_PPT_BIT)
- | FEATURE_MASK(FEATURE_TDC_BIT)
- | FEATURE_MASK(FEATURE_FAN_CONTROLLER_BIT)
- | FEATURE_MASK(FEATURE_DS_LCLK_BIT)
- | FEATURE_MASK(FEATURE_DS_DCFCLK_BIT);
-
- if (adev->pm.pp_feature & PP_SOCCLK_DPM_MASK)
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_SOCCLK_DPM_BIT);
-
- if (adev->pm.pp_feature & PP_DCEFCLK_DPM_MASK)
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DCFCLK_DPM_BIT);
-
- if (smu->adev->pg_flags & AMD_PG_SUPPORT_ATHUB)
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_ATHUB_PG_BIT);
-
- return 0;
-}
-
static bool vangogh_is_dpm_running(struct smu_context *smu)
{
int ret = 0;
@@ -402,14 +397,68 @@ static bool vangogh_is_dpm_running(struct smu_context *smu)
return !!(feature_enabled & SMC_DPM_FEATURE);
}
+static int vangogh_get_dpm_clk_limited(struct smu_context *smu, enum smu_clk_type clk_type,
+ uint32_t dpm_level, uint32_t *freq)
+{
+ DpmClocks_t *clk_table = smu->smu_table.clocks_table;
+
+ if (!clk_table || clk_type >= SMU_CLK_COUNT)
+ return -EINVAL;
+
+ switch (clk_type) {
+ case SMU_SOCCLK:
+ if (dpm_level >= clk_table->NumSocClkLevelsEnabled)
+ return -EINVAL;
+ *freq = clk_table->SocClocks[dpm_level];
+ break;
+ case SMU_VCLK:
+ if (dpm_level >= clk_table->VcnClkLevelsEnabled)
+ return -EINVAL;
+ *freq = clk_table->VcnClocks[dpm_level].vclk;
+ break;
+ case SMU_DCLK:
+ if (dpm_level >= clk_table->VcnClkLevelsEnabled)
+ return -EINVAL;
+ *freq = clk_table->VcnClocks[dpm_level].dclk;
+ break;
+ case SMU_UCLK:
+ case SMU_MCLK:
+ if (dpm_level >= clk_table->NumDfPstatesEnabled)
+ return -EINVAL;
+ *freq = clk_table->DfPstateTable[dpm_level].memclk;
+
+ break;
+ case SMU_FCLK:
+ if (dpm_level >= clk_table->NumDfPstatesEnabled)
+ return -EINVAL;
+ *freq = clk_table->DfPstateTable[dpm_level].fclk;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int vangogh_print_fine_grain_clk(struct smu_context *smu,
enum smu_clk_type clk_type, char *buf)
{
- int size = 0;
+ DpmClocks_t *clk_table = smu->smu_table.clocks_table;
+ SmuMetrics_t metrics;
+ struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
+ int i, size = 0, ret = 0;
+ uint32_t cur_value = 0, value = 0, count = 0;
+ bool cur_value_match_level = false;
+
+ memset(&metrics, 0, sizeof(metrics));
+
+ ret = smu_cmn_get_metrics_table(smu, &metrics, false);
+ if (ret)
+ return ret;
switch (clk_type) {
case SMU_OD_SCLK:
- if (smu->od_enabled) {
+ if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
size = sprintf(buf, "%s:\n", "OD_SCLK");
size += sprintf(buf + size, "0: %10uMhz\n",
(smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq);
@@ -417,20 +466,799 @@ static int vangogh_print_fine_grain_clk(struct smu_context *smu,
(smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq);
}
break;
+ case SMU_OD_CCLK:
+ if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
+ size = sprintf(buf, "CCLK_RANGE in Core%d:\n", smu->cpu_core_id_select);
+ size += sprintf(buf + size, "0: %10uMhz\n",
+ (smu->cpu_actual_soft_min_freq > 0) ? smu->cpu_actual_soft_min_freq : smu->cpu_default_soft_min_freq);
+ size += sprintf(buf + size, "1: %10uMhz\n",
+ (smu->cpu_actual_soft_max_freq > 0) ? smu->cpu_actual_soft_max_freq : smu->cpu_default_soft_max_freq);
+ }
+ break;
case SMU_OD_RANGE:
- if (smu->od_enabled) {
+ if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
size = sprintf(buf, "%s:\n", "OD_RANGE");
size += sprintf(buf + size, "SCLK: %7uMhz %10uMhz\n",
smu->gfx_default_hard_min_freq, smu->gfx_default_soft_max_freq);
+ size += sprintf(buf + size, "CCLK: %7uMhz %10uMhz\n",
+ smu->cpu_default_soft_min_freq, smu->cpu_default_soft_max_freq);
}
break;
+ case SMU_SOCCLK:
+ /* the level 3 ~ 6 of socclk use the same frequency for vangogh */
+ count = clk_table->NumSocClkLevelsEnabled;
+ cur_value = metrics.SocclkFrequency;
+ break;
+ case SMU_VCLK:
+ count = clk_table->VcnClkLevelsEnabled;
+ cur_value = metrics.VclkFrequency;
+ break;
+ case SMU_DCLK:
+ count = clk_table->VcnClkLevelsEnabled;
+ cur_value = metrics.DclkFrequency;
+ break;
+ case SMU_MCLK:
+ count = clk_table->NumDfPstatesEnabled;
+ cur_value = metrics.MemclkFrequency;
+ break;
+ case SMU_FCLK:
+ count = clk_table->NumDfPstatesEnabled;
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetFclkFrequency, 0, &cur_value);
+ if (ret)
+ return ret;
+ break;
default:
break;
}
+ switch (clk_type) {
+ case SMU_SOCCLK:
+ case SMU_VCLK:
+ case SMU_DCLK:
+ case SMU_MCLK:
+ case SMU_FCLK:
+ for (i = 0; i < count; i++) {
+ ret = vangogh_get_dpm_clk_limited(smu, clk_type, i, &value);
+ if (ret)
+ return ret;
+ if (!value)
+ continue;
+ size += sprintf(buf + size, "%d: %uMhz %s\n", i, value,
+ cur_value == value ? "*" : "");
+ if (cur_value == value)
+ cur_value_match_level = true;
+ }
+
+ if (!cur_value_match_level)
+ size += sprintf(buf + size, " %uMhz *\n", cur_value);
+ break;
+ default:
+ break;
+ }
+
+ return size;
+}
+
+static int vangogh_get_profiling_clk_mask(struct smu_context *smu,
+ enum amd_dpm_forced_level level,
+ uint32_t *vclk_mask,
+ uint32_t *dclk_mask,
+ uint32_t *mclk_mask,
+ uint32_t *fclk_mask,
+ uint32_t *soc_mask)
+{
+ DpmClocks_t *clk_table = smu->smu_table.clocks_table;
+
+ if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) {
+ if (mclk_mask)
+ *mclk_mask = clk_table->NumDfPstatesEnabled - 1;
+
+ if (fclk_mask)
+ *fclk_mask = clk_table->NumDfPstatesEnabled - 1;
+
+ if (soc_mask)
+ *soc_mask = 0;
+ } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
+ if (mclk_mask)
+ *mclk_mask = 0;
+
+ if (fclk_mask)
+ *fclk_mask = 0;
+
+ if (soc_mask)
+ *soc_mask = 1;
+
+ if (vclk_mask)
+ *vclk_mask = 1;
+
+ if (dclk_mask)
+ *dclk_mask = 1;
+ } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD) {
+ if (mclk_mask)
+ *mclk_mask = 0;
+
+ if (fclk_mask)
+ *fclk_mask = 0;
+
+ if (soc_mask)
+ *soc_mask = 1;
+
+ if (vclk_mask)
+ *vclk_mask = 1;
+
+ if (dclk_mask)
+ *dclk_mask = 1;
+ }
+
+ return 0;
+}
+
+static bool vangogh_clk_dpm_is_enabled(struct smu_context *smu,
+ enum smu_clk_type clk_type)
+{
+ enum smu_feature_mask feature_id = 0;
+
+ switch (clk_type) {
+ case SMU_MCLK:
+ case SMU_UCLK:
+ case SMU_FCLK:
+ feature_id = SMU_FEATURE_DPM_FCLK_BIT;
+ break;
+ case SMU_GFXCLK:
+ case SMU_SCLK:
+ feature_id = SMU_FEATURE_DPM_GFXCLK_BIT;
+ break;
+ case SMU_SOCCLK:
+ feature_id = SMU_FEATURE_DPM_SOCCLK_BIT;
+ break;
+ case SMU_VCLK:
+ case SMU_DCLK:
+ feature_id = SMU_FEATURE_VCN_DPM_BIT;
+ break;
+ default:
+ return true;
+ }
+
+ if (!smu_cmn_feature_is_enabled(smu, feature_id))
+ return false;
+
+ return true;
+}
+
+static int vangogh_get_dpm_ultimate_freq(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ uint32_t *min,
+ uint32_t *max)
+{
+ int ret = 0;
+ uint32_t soc_mask;
+ uint32_t vclk_mask;
+ uint32_t dclk_mask;
+ uint32_t mclk_mask;
+ uint32_t fclk_mask;
+ uint32_t clock_limit;
+
+ if (!vangogh_clk_dpm_is_enabled(smu, clk_type)) {
+ switch (clk_type) {
+ case SMU_MCLK:
+ case SMU_UCLK:
+ clock_limit = smu->smu_table.boot_values.uclk;
+ break;
+ case SMU_FCLK:
+ clock_limit = smu->smu_table.boot_values.fclk;
+ break;
+ case SMU_GFXCLK:
+ case SMU_SCLK:
+ clock_limit = smu->smu_table.boot_values.gfxclk;
+ break;
+ case SMU_SOCCLK:
+ clock_limit = smu->smu_table.boot_values.socclk;
+ break;
+ case SMU_VCLK:
+ clock_limit = smu->smu_table.boot_values.vclk;
+ break;
+ case SMU_DCLK:
+ clock_limit = smu->smu_table.boot_values.dclk;
+ break;
+ default:
+ clock_limit = 0;
+ break;
+ }
+
+ /* clock in Mhz unit */
+ if (min)
+ *min = clock_limit / 100;
+ if (max)
+ *max = clock_limit / 100;
+
+ return 0;
+ }
+ if (max) {
+ ret = vangogh_get_profiling_clk_mask(smu,
+ AMD_DPM_FORCED_LEVEL_PROFILE_PEAK,
+ &vclk_mask,
+ &dclk_mask,
+ &mclk_mask,
+ &fclk_mask,
+ &soc_mask);
+ if (ret)
+ goto failed;
+
+ switch (clk_type) {
+ case SMU_UCLK:
+ case SMU_MCLK:
+ ret = vangogh_get_dpm_clk_limited(smu, clk_type, mclk_mask, max);
+ if (ret)
+ goto failed;
+ break;
+ case SMU_SOCCLK:
+ ret = vangogh_get_dpm_clk_limited(smu, clk_type, soc_mask, max);
+ if (ret)
+ goto failed;
+ break;
+ case SMU_FCLK:
+ ret = vangogh_get_dpm_clk_limited(smu, clk_type, fclk_mask, max);
+ if (ret)
+ goto failed;
+ break;
+ case SMU_VCLK:
+ ret = vangogh_get_dpm_clk_limited(smu, clk_type, vclk_mask, max);
+ if (ret)
+ goto failed;
+ break;
+ case SMU_DCLK:
+ ret = vangogh_get_dpm_clk_limited(smu, clk_type, dclk_mask, max);
+ if (ret)
+ goto failed;
+ break;
+ default:
+ ret = -EINVAL;
+ goto failed;
+ }
+ }
+ if (min) {
+ switch (clk_type) {
+ case SMU_UCLK:
+ case SMU_MCLK:
+ ret = vangogh_get_dpm_clk_limited(smu, clk_type, mclk_mask, min);
+ if (ret)
+ goto failed;
+ break;
+ case SMU_SOCCLK:
+ ret = vangogh_get_dpm_clk_limited(smu, clk_type, soc_mask, min);
+ if (ret)
+ goto failed;
+ break;
+ case SMU_FCLK:
+ ret = vangogh_get_dpm_clk_limited(smu, clk_type, fclk_mask, min);
+ if (ret)
+ goto failed;
+ break;
+ case SMU_VCLK:
+ ret = vangogh_get_dpm_clk_limited(smu, clk_type, vclk_mask, min);
+ if (ret)
+ goto failed;
+ break;
+ case SMU_DCLK:
+ ret = vangogh_get_dpm_clk_limited(smu, clk_type, dclk_mask, min);
+ if (ret)
+ goto failed;
+ break;
+ default:
+ ret = -EINVAL;
+ goto failed;
+ }
+ }
+failed:
+ return ret;
+}
+
+static int vangogh_get_power_profile_mode(struct smu_context *smu,
+ char *buf)
+{
+ static const char *profile_name[] = {
+ "BOOTUP_DEFAULT",
+ "3D_FULL_SCREEN",
+ "POWER_SAVING",
+ "VIDEO",
+ "VR",
+ "COMPUTE",
+ "CUSTOM"};
+ uint32_t i, size = 0;
+ int16_t workload_type = 0;
+
+ if (!buf)
+ return -EINVAL;
+
+ for (i = 0; i <= PP_SMC_POWER_PROFILE_CUSTOM; i++) {
+ /*
+ * Conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT
+ * Not all profile modes are supported on vangogh.
+ */
+ workload_type = smu_cmn_to_asic_specific_index(smu,
+ CMN2ASIC_MAPPING_WORKLOAD,
+ i);
+
+ if (workload_type < 0)
+ continue;
+
+ size += sprintf(buf + size, "%2d %14s%s\n",
+ i, profile_name[i], (i == smu->power_profile_mode) ? "*" : " ");
+ }
+
return size;
}
+static int vangogh_set_power_profile_mode(struct smu_context *smu, long *input, uint32_t size)
+{
+ int workload_type, ret;
+ uint32_t profile_mode = input[size];
+
+ if (profile_mode > PP_SMC_POWER_PROFILE_CUSTOM) {
+ dev_err(smu->adev->dev, "Invalid power profile mode %d\n", profile_mode);
+ return -EINVAL;
+ }
+
+ if (profile_mode == PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT ||
+ profile_mode == PP_SMC_POWER_PROFILE_POWERSAVING)
+ return 0;
+
+ /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
+ workload_type = smu_cmn_to_asic_specific_index(smu,
+ CMN2ASIC_MAPPING_WORKLOAD,
+ profile_mode);
+ if (workload_type < 0) {
+ dev_err_once(smu->adev->dev, "Unsupported power profile mode %d on VANGOGH\n",
+ profile_mode);
+ return -EINVAL;
+ }
+
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ActiveProcessNotify,
+ 1 << workload_type,
+ NULL);
+ if (ret) {
+ dev_err_once(smu->adev->dev, "Fail to set workload type %d\n",
+ workload_type);
+ return ret;
+ }
+
+ smu->power_profile_mode = profile_mode;
+
+ return 0;
+}
+
+static int vangogh_set_soft_freq_limited_range(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ uint32_t min,
+ uint32_t max)
+{
+ int ret = 0;
+
+ if (!vangogh_clk_dpm_is_enabled(smu, clk_type))
+ return 0;
+
+ switch (clk_type) {
+ case SMU_GFXCLK:
+ case SMU_SCLK:
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SetHardMinGfxClk,
+ min, NULL);
+ if (ret)
+ return ret;
+
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SetSoftMaxGfxClk,
+ max, NULL);
+ if (ret)
+ return ret;
+ break;
+ case SMU_FCLK:
+ case SMU_MCLK:
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SetHardMinFclkByFreq,
+ min, NULL);
+ if (ret)
+ return ret;
+
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SetSoftMaxFclkByFreq,
+ max, NULL);
+ if (ret)
+ return ret;
+ break;
+ case SMU_SOCCLK:
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SetHardMinSocclkByFreq,
+ min, NULL);
+ if (ret)
+ return ret;
+
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SetSoftMaxSocclkByFreq,
+ max, NULL);
+ if (ret)
+ return ret;
+ break;
+ case SMU_VCLK:
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SetHardMinVcn,
+ min << 16, NULL);
+ if (ret)
+ return ret;
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SetSoftMaxVcn,
+ max << 16, NULL);
+ if (ret)
+ return ret;
+ break;
+ case SMU_DCLK:
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SetHardMinVcn,
+ min, NULL);
+ if (ret)
+ return ret;
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SetSoftMaxVcn,
+ max, NULL);
+ if (ret)
+ return ret;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+static int vangogh_force_clk_levels(struct smu_context *smu,
+ enum smu_clk_type clk_type, uint32_t mask)
+{
+ uint32_t soft_min_level = 0, soft_max_level = 0;
+ uint32_t min_freq = 0, max_freq = 0;
+ int ret = 0 ;
+
+ soft_min_level = mask ? (ffs(mask) - 1) : 0;
+ soft_max_level = mask ? (fls(mask) - 1) : 0;
+
+ switch (clk_type) {
+ case SMU_SOCCLK:
+ ret = vangogh_get_dpm_clk_limited(smu, clk_type,
+ soft_min_level, &min_freq);
+ if (ret)
+ return ret;
+ ret = vangogh_get_dpm_clk_limited(smu, clk_type,
+ soft_max_level, &max_freq);
+ if (ret)
+ return ret;
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SetSoftMaxSocclkByFreq,
+ max_freq, NULL);
+ if (ret)
+ return ret;
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SetHardMinSocclkByFreq,
+ min_freq, NULL);
+ if (ret)
+ return ret;
+ break;
+ case SMU_MCLK:
+ case SMU_FCLK:
+ ret = vangogh_get_dpm_clk_limited(smu,
+ clk_type, soft_min_level, &min_freq);
+ if (ret)
+ return ret;
+ ret = vangogh_get_dpm_clk_limited(smu,
+ clk_type, soft_max_level, &max_freq);
+ if (ret)
+ return ret;
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SetSoftMaxFclkByFreq,
+ max_freq, NULL);
+ if (ret)
+ return ret;
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SetHardMinFclkByFreq,
+ min_freq, NULL);
+ if (ret)
+ return ret;
+ break;
+ case SMU_VCLK:
+ ret = vangogh_get_dpm_clk_limited(smu,
+ clk_type, soft_min_level, &min_freq);
+ if (ret)
+ return ret;
+
+ ret = vangogh_get_dpm_clk_limited(smu,
+ clk_type, soft_max_level, &max_freq);
+ if (ret)
+ return ret;
+
+
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SetHardMinVcn,
+ min_freq << 16, NULL);
+ if (ret)
+ return ret;
+
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SetSoftMaxVcn,
+ max_freq << 16, NULL);
+ if (ret)
+ return ret;
+
+ break;
+ case SMU_DCLK:
+ ret = vangogh_get_dpm_clk_limited(smu,
+ clk_type, soft_min_level, &min_freq);
+ if (ret)
+ return ret;
+
+ ret = vangogh_get_dpm_clk_limited(smu,
+ clk_type, soft_max_level, &max_freq);
+ if (ret)
+ return ret;
+
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SetHardMinVcn,
+ min_freq, NULL);
+ if (ret)
+ return ret;
+
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SetSoftMaxVcn,
+ max_freq, NULL);
+ if (ret)
+ return ret;
+
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static int vangogh_force_dpm_limit_value(struct smu_context *smu, bool highest)
+{
+ int ret = 0, i = 0;
+ uint32_t min_freq, max_freq, force_freq;
+ enum smu_clk_type clk_type;
+
+ enum smu_clk_type clks[] = {
+ SMU_SOCCLK,
+ SMU_VCLK,
+ SMU_DCLK,
+ SMU_MCLK,
+ SMU_FCLK,
+ };
+
+ for (i = 0; i < ARRAY_SIZE(clks); i++) {
+ clk_type = clks[i];
+ ret = vangogh_get_dpm_ultimate_freq(smu, clk_type, &min_freq, &max_freq);
+ if (ret)
+ return ret;
+
+ force_freq = highest ? max_freq : min_freq;
+ ret = vangogh_set_soft_freq_limited_range(smu, clk_type, force_freq, force_freq);
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
+static int vangogh_unforce_dpm_levels(struct smu_context *smu)
+{
+ int ret = 0, i = 0;
+ uint32_t min_freq, max_freq;
+ enum smu_clk_type clk_type;
+
+ struct clk_feature_map {
+ enum smu_clk_type clk_type;
+ uint32_t feature;
+ } clk_feature_map[] = {
+ {SMU_MCLK, SMU_FEATURE_DPM_FCLK_BIT},
+ {SMU_FCLK, SMU_FEATURE_DPM_FCLK_BIT},
+ {SMU_SOCCLK, SMU_FEATURE_DPM_SOCCLK_BIT},
+ {SMU_VCLK, SMU_FEATURE_VCN_DPM_BIT},
+ {SMU_DCLK, SMU_FEATURE_VCN_DPM_BIT},
+ };
+
+ for (i = 0; i < ARRAY_SIZE(clk_feature_map); i++) {
+
+ if (!smu_cmn_feature_is_enabled(smu, clk_feature_map[i].feature))
+ continue;
+
+ clk_type = clk_feature_map[i].clk_type;
+
+ ret = vangogh_get_dpm_ultimate_freq(smu, clk_type, &min_freq, &max_freq);
+
+ if (ret)
+ return ret;
+
+ ret = vangogh_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq);
+
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
+static int vangogh_set_peak_clock_by_device(struct smu_context *smu)
+{
+ int ret = 0;
+ uint32_t socclk_freq = 0, fclk_freq = 0;
+ uint32_t vclk_freq = 0, dclk_freq = 0;
+
+ ret = vangogh_get_dpm_ultimate_freq(smu, SMU_FCLK, NULL, &fclk_freq);
+ if (ret)
+ return ret;
+
+ ret = vangogh_set_soft_freq_limited_range(smu, SMU_FCLK, fclk_freq, fclk_freq);
+ if (ret)
+ return ret;
+
+ ret = vangogh_get_dpm_ultimate_freq(smu, SMU_SOCCLK, NULL, &socclk_freq);
+ if (ret)
+ return ret;
+
+ ret = vangogh_set_soft_freq_limited_range(smu, SMU_SOCCLK, socclk_freq, socclk_freq);
+ if (ret)
+ return ret;
+
+ ret = vangogh_get_dpm_ultimate_freq(smu, SMU_VCLK, NULL, &vclk_freq);
+ if (ret)
+ return ret;
+
+ ret = vangogh_set_soft_freq_limited_range(smu, SMU_VCLK, vclk_freq, vclk_freq);
+ if (ret)
+ return ret;
+
+ ret = vangogh_get_dpm_ultimate_freq(smu, SMU_DCLK, NULL, &dclk_freq);
+ if (ret)
+ return ret;
+
+ ret = vangogh_set_soft_freq_limited_range(smu, SMU_DCLK, dclk_freq, dclk_freq);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+
+static int vangogh_set_performance_level(struct smu_context *smu,
+ enum amd_dpm_forced_level level)
+{
+ int ret = 0;
+ uint32_t soc_mask, mclk_mask, fclk_mask;
+ uint32_t vclk_mask = 0, dclk_mask = 0;
+
+ switch (level) {
+ case AMD_DPM_FORCED_LEVEL_HIGH:
+ smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq;
+ smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq;
+
+ smu->cpu_actual_soft_min_freq = smu->cpu_default_soft_min_freq;
+ smu->cpu_actual_soft_max_freq = smu->cpu_default_soft_max_freq;
+
+ ret = vangogh_force_dpm_limit_value(smu, true);
+ break;
+ case AMD_DPM_FORCED_LEVEL_LOW:
+ smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq;
+ smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq;
+
+ smu->cpu_actual_soft_min_freq = smu->cpu_default_soft_min_freq;
+ smu->cpu_actual_soft_max_freq = smu->cpu_default_soft_max_freq;
+
+ ret = vangogh_force_dpm_limit_value(smu, false);
+ break;
+ case AMD_DPM_FORCED_LEVEL_AUTO:
+ smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq;
+ smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq;
+
+ smu->cpu_actual_soft_min_freq = smu->cpu_default_soft_min_freq;
+ smu->cpu_actual_soft_max_freq = smu->cpu_default_soft_max_freq;
+
+ ret = vangogh_unforce_dpm_levels(smu);
+ break;
+ case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
+ smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq;
+ smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq;
+
+ smu->cpu_actual_soft_min_freq = smu->cpu_default_soft_min_freq;
+ smu->cpu_actual_soft_max_freq = smu->cpu_default_soft_max_freq;
+
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SetHardMinGfxClk,
+ VANGOGH_UMD_PSTATE_STANDARD_GFXCLK, NULL);
+ if (ret)
+ return ret;
+
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SetSoftMaxGfxClk,
+ VANGOGH_UMD_PSTATE_STANDARD_GFXCLK, NULL);
+ if (ret)
+ return ret;
+
+ ret = vangogh_get_profiling_clk_mask(smu, level,
+ &vclk_mask,
+ &dclk_mask,
+ &mclk_mask,
+ &fclk_mask,
+ &soc_mask);
+ if (ret)
+ return ret;
+
+ vangogh_force_clk_levels(smu, SMU_MCLK, 1 << mclk_mask);
+ vangogh_force_clk_levels(smu, SMU_FCLK, 1 << fclk_mask);
+ vangogh_force_clk_levels(smu, SMU_SOCCLK, 1 << soc_mask);
+ vangogh_force_clk_levels(smu, SMU_VCLK, 1 << vclk_mask);
+ vangogh_force_clk_levels(smu, SMU_DCLK, 1 << dclk_mask);
+
+ break;
+ case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
+ smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq;
+ smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq;
+
+ smu->cpu_actual_soft_min_freq = smu->cpu_default_soft_min_freq;
+ smu->cpu_actual_soft_max_freq = smu->cpu_default_soft_max_freq;
+
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinVcn,
+ VANGOGH_UMD_PSTATE_PEAK_DCLK, NULL);
+ if (ret)
+ return ret;
+
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxVcn,
+ VANGOGH_UMD_PSTATE_PEAK_DCLK, NULL);
+ if (ret)
+ return ret;
+ break;
+ case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
+ smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq;
+ smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq;
+
+ smu->cpu_actual_soft_min_freq = smu->cpu_default_soft_min_freq;
+ smu->cpu_actual_soft_max_freq = smu->cpu_default_soft_max_freq;
+
+ ret = vangogh_get_profiling_clk_mask(smu, level,
+ NULL,
+ NULL,
+ &mclk_mask,
+ &fclk_mask,
+ NULL);
+ if (ret)
+ return ret;
+
+ vangogh_force_clk_levels(smu, SMU_MCLK, 1 << mclk_mask);
+ vangogh_force_clk_levels(smu, SMU_FCLK, 1 << fclk_mask);
+ break;
+ case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
+ smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq;
+ smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq;
+
+ smu->cpu_actual_soft_min_freq = smu->cpu_default_soft_min_freq;
+ smu->cpu_actual_soft_max_freq = smu->cpu_default_soft_max_freq;
+
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk,
+ VANGOGH_UMD_PSTATE_PEAK_GFXCLK, NULL);
+ if (ret)
+ return ret;
+
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk,
+ VANGOGH_UMD_PSTATE_PEAK_GFXCLK, NULL);
+ if (ret)
+ return ret;
+
+ ret = vangogh_set_peak_clock_by_device(smu);
+ break;
+ case AMD_DPM_FORCED_LEVEL_MANUAL:
+ case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
+ default:
+ break;
+ }
+ return ret;
+}
+
static int vangogh_read_sensor(struct smu_context *smu,
enum amd_pp_sensors sensor,
void *data, uint32_t *size)
@@ -492,6 +1320,12 @@ static int vangogh_read_sensor(struct smu_context *smu,
(uint32_t *)data);
*size = 4;
break;
+ case AMDGPU_PP_SENSOR_CPU_CLK:
+ ret = vangogh_get_smu_metrics_data(smu,
+ METRICS_AVERAGE_CPUCLK,
+ (uint32_t *)data);
+ *size = smu->cpu_core_num * sizeof(uint16_t);
+ break;
default:
ret = -EOPNOTSUPP;
break;
@@ -513,7 +1347,7 @@ static int vangogh_set_watermarks_table(struct smu_context *smu,
if (clock_ranges) {
if (clock_ranges->num_reader_wm_sets > NUM_WM_RANGES ||
- clock_ranges->num_writer_wm_sets > NUM_WM_RANGES)
+ clock_ranges->num_writer_wm_sets > NUM_WM_RANGES)
return -EINVAL;
for (i = 0; i < clock_ranges->num_reader_wm_sets; i++) {
@@ -613,16 +1447,46 @@ static ssize_t vangogh_get_gpu_metrics(struct smu_context *smu,
}
static int vangogh_od_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TABLE_COMMAND type,
- long input[], uint32_t size)
+ long input[], uint32_t size)
{
int ret = 0;
+ int i;
+ struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
- if (!smu->od_enabled) {
+ if (!(smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL)) {
dev_warn(smu->adev->dev, "Fine grain is not enabled!\n");
return -EINVAL;
}
switch (type) {
+ case PP_OD_EDIT_CCLK_VDDC_TABLE:
+ if (size != 3) {
+ dev_err(smu->adev->dev, "Input parameter number not correct (should be 4 for processor)\n");
+ return -EINVAL;
+ }
+ if (input[0] >= smu->cpu_core_num) {
+ dev_err(smu->adev->dev, "core index is overflow, should be less than %d\n",
+ smu->cpu_core_num);
+ }
+ smu->cpu_core_id_select = input[0];
+ if (input[1] == 0) {
+ if (input[2] < smu->cpu_default_soft_min_freq) {
+ dev_warn(smu->adev->dev, "Fine grain setting minimum cclk (%ld) MHz is less than the minimum allowed (%d) MHz\n",
+ input[2], smu->cpu_default_soft_min_freq);
+ return -EINVAL;
+ }
+ smu->cpu_actual_soft_min_freq = input[2];
+ } else if (input[1] == 1) {
+ if (input[2] > smu->cpu_default_soft_max_freq) {
+ dev_warn(smu->adev->dev, "Fine grain setting maximum cclk (%ld) MHz is greater than the maximum allowed (%d) MHz\n",
+ input[2], smu->cpu_default_soft_max_freq);
+ return -EINVAL;
+ }
+ smu->cpu_actual_soft_max_freq = input[2];
+ } else {
+ return -EINVAL;
+ }
+ break;
case PP_OD_EDIT_SCLK_VDDC_TABLE:
if (size != 2) {
dev_err(smu->adev->dev, "Input parameter number not correct\n");
@@ -631,14 +1495,16 @@ static int vangogh_od_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TAB
if (input[0] == 0) {
if (input[1] < smu->gfx_default_hard_min_freq) {
- dev_warn(smu->adev->dev, "Fine grain setting minimum sclk (%ld) MHz is less than the minimum allowed (%d) MHz\n",
+ dev_warn(smu->adev->dev,
+ "Fine grain setting minimum sclk (%ld) MHz is less than the minimum allowed (%d) MHz\n",
input[1], smu->gfx_default_hard_min_freq);
return -EINVAL;
}
smu->gfx_actual_hard_min_freq = input[1];
} else if (input[0] == 1) {
if (input[1] > smu->gfx_default_soft_max_freq) {
- dev_warn(smu->adev->dev, "Fine grain setting maximum sclk (%ld) MHz is greater than the maximum allowed (%d) MHz\n",
+ dev_warn(smu->adev->dev,
+ "Fine grain setting maximum sclk (%ld) MHz is greater than the maximum allowed (%d) MHz\n",
input[1], smu->gfx_default_soft_max_freq);
return -EINVAL;
}
@@ -654,6 +1520,8 @@ static int vangogh_od_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TAB
} else {
smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq;
smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq;
+ smu->cpu_actual_soft_min_freq = smu->cpu_default_soft_min_freq;
+ smu->cpu_actual_soft_max_freq = smu->cpu_default_soft_max_freq;
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk,
smu->gfx_actual_hard_min_freq, NULL);
@@ -668,6 +1536,29 @@ static int vangogh_od_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TAB
dev_err(smu->adev->dev, "Restore the default soft max sclk failed!");
return ret;
}
+
+ if (smu->adev->pm.fw_version < 0x43f1b00) {
+ dev_warn(smu->adev->dev, "CPUSoftMax/CPUSoftMin are not supported, please update SBIOS!\n");
+ break;
+ }
+
+ for (i = 0; i < smu->cpu_core_num; i++) {
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinCclk,
+ (i << 20) | smu->cpu_actual_soft_min_freq,
+ NULL);
+ if (ret) {
+ dev_err(smu->adev->dev, "Set hard min cclk failed!");
+ return ret;
+ }
+
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxCclk,
+ (i << 20) | smu->cpu_actual_soft_max_freq,
+ NULL);
+ if (ret) {
+ dev_err(smu->adev->dev, "Set soft max cclk failed!");
+ return ret;
+ }
+ }
}
break;
case PP_OD_COMMIT_DPM_TABLE:
@@ -676,8 +1567,10 @@ static int vangogh_od_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TAB
return -EINVAL;
} else {
if (smu->gfx_actual_hard_min_freq > smu->gfx_actual_soft_max_freq) {
- dev_err(smu->adev->dev, "The setting minimun sclk (%d) MHz is greater than the setting maximum sclk (%d) MHz\n",
- smu->gfx_actual_hard_min_freq, smu->gfx_actual_soft_max_freq);
+ dev_err(smu->adev->dev,
+ "The setting minimun sclk (%d) MHz is greater than the setting maximum sclk (%d) MHz\n",
+ smu->gfx_actual_hard_min_freq,
+ smu->gfx_actual_soft_max_freq);
return -EINVAL;
}
@@ -694,6 +1587,29 @@ static int vangogh_od_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TAB
dev_err(smu->adev->dev, "Set soft max sclk failed!");
return ret;
}
+
+ if (smu->adev->pm.fw_version < 0x43f1b00) {
+ dev_warn(smu->adev->dev, "CPUSoftMax/CPUSoftMin are not supported, please update SBIOS!\n");
+ break;
+ }
+
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinCclk,
+ ((smu->cpu_core_id_select << 20)
+ | smu->cpu_actual_soft_min_freq),
+ NULL);
+ if (ret) {
+ dev_err(smu->adev->dev, "Set hard min cclk failed!");
+ return ret;
+ }
+
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxCclk,
+ ((smu->cpu_core_id_select << 20)
+ | smu->cpu_actual_soft_max_freq),
+ NULL);
+ if (ret) {
+ dev_err(smu->adev->dev, "Set soft max cclk failed!");
+ return ret;
+ }
}
break;
default:
@@ -719,18 +1635,133 @@ static int vangogh_set_fine_grain_gfx_freq_parameters(struct smu_context *smu)
smu->gfx_actual_hard_min_freq = 0;
smu->gfx_actual_soft_max_freq = 0;
+ smu->cpu_default_soft_min_freq = 1400;
+ smu->cpu_default_soft_max_freq = 3500;
+ smu->cpu_actual_soft_min_freq = 0;
+ smu->cpu_actual_soft_max_freq = 0;
+
+ return 0;
+}
+
+static int vangogh_get_dpm_clock_table(struct smu_context *smu, struct dpm_clocks *clock_table)
+{
+ DpmClocks_t *table = smu->smu_table.clocks_table;
+ int i;
+
+ if (!clock_table || !table)
+ return -EINVAL;
+
+ for (i = 0; i < NUM_SOCCLK_DPM_LEVELS; i++) {
+ clock_table->SocClocks[i].Freq = table->SocClocks[i];
+ clock_table->SocClocks[i].Vol = table->SocVoltage[i];
+ }
+
+ for (i = 0; i < NUM_FCLK_DPM_LEVELS; i++) {
+ clock_table->FClocks[i].Freq = table->DfPstateTable[i].fclk;
+ clock_table->FClocks[i].Vol = table->DfPstateTable[i].voltage;
+ }
+
+ for (i = 0; i < NUM_FCLK_DPM_LEVELS; i++) {
+ clock_table->MemClocks[i].Freq = table->DfPstateTable[i].memclk;
+ clock_table->MemClocks[i].Vol = table->DfPstateTable[i].voltage;
+ }
+
return 0;
}
+
static int vangogh_system_features_control(struct smu_context *smu, bool en)
{
struct amdgpu_device *adev = smu->adev;
+ struct smu_feature *feature = &smu->smu_feature;
+ uint32_t feature_mask[2];
+ int ret = 0;
if (adev->pm.fw_version >= 0x43f1700)
- return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_RlcPowerNotify,
- en ? RLC_STATUS_NORMAL : RLC_STATUS_OFF, NULL);
- else
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_RlcPowerNotify,
+ en ? RLC_STATUS_NORMAL : RLC_STATUS_OFF, NULL);
+
+ bitmap_zero(feature->enabled, feature->feature_num);
+ bitmap_zero(feature->supported, feature->feature_num);
+
+ if (!en)
+ return ret;
+
+ ret = smu_cmn_get_enabled_32_bits_mask(smu, feature_mask, 2);
+ if (ret)
+ return ret;
+
+ bitmap_copy(feature->enabled, (unsigned long *)&feature_mask,
+ feature->feature_num);
+ bitmap_copy(feature->supported, (unsigned long *)&feature_mask,
+ feature->feature_num);
+
+ return 0;
+}
+
+static int vangogh_post_smu_init(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t tmp;
+ int ret = 0;
+ uint8_t aon_bits = 0;
+ /* Two CUs in one WGP */
+ uint32_t req_active_wgps = adev->gfx.cu_info.number/2;
+ uint32_t total_cu = adev->gfx.config.max_cu_per_sh *
+ adev->gfx.config.max_sh_per_se * adev->gfx.config.max_shader_engines;
+
+ /* allow message will be sent after enable message on Vangogh*/
+ ret = smu_cmn_send_smc_msg(smu, SMU_MSG_EnableGfxOff, NULL);
+ if (ret) {
+ dev_err(adev->dev, "Failed to Enable GfxOff!\n");
+ return ret;
+ }
+
+ /* if all CUs are active, no need to power off any WGPs */
+ if (total_cu == adev->gfx.cu_info.number)
return 0;
+
+ /*
+ * Calculate the total bits number of always on WGPs for all SA/SEs in
+ * RLC_PG_ALWAYS_ON_WGP_MASK.
+ */
+ tmp = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_ALWAYS_ON_WGP_MASK));
+ tmp &= RLC_PG_ALWAYS_ON_WGP_MASK__AON_WGP_MASK_MASK;
+
+ aon_bits = hweight32(tmp) * adev->gfx.config.max_sh_per_se * adev->gfx.config.max_shader_engines;
+
+ /* Do not request any WGPs less than set in the AON_WGP_MASK */
+ if (aon_bits > req_active_wgps) {
+ dev_info(adev->dev, "Number of always on WGPs greater than active WGPs: WGP power save not requested.\n");
+ return 0;
+ } else {
+ return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_RequestActiveWgp, req_active_wgps, NULL);
+ }
+}
+
+static int vangogh_mode_reset(struct smu_context *smu, int type)
+{
+ int ret = 0, index = 0;
+
+ index = smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG,
+ SMU_MSG_GfxDeviceDriverReset);
+ if (index < 0)
+ return index == -EACCES ? 0 : index;
+
+ mutex_lock(&smu->message_lock);
+
+ ret = smu_cmn_send_msg_without_waiting(smu, (uint16_t)index, type);
+
+ mutex_unlock(&smu->message_lock);
+
+ mdelay(10);
+
+ return ret;
+}
+
+static int vangogh_mode2_reset(struct smu_context *smu)
+{
+ return vangogh_mode_reset(smu, SMU_RESET_MODE_2);
}
static const struct pptable_funcs vangogh_ppt_funcs = {
@@ -742,7 +1773,6 @@ static const struct pptable_funcs vangogh_ppt_funcs = {
.init_power = smu_v11_0_init_power,
.fini_power = smu_v11_0_fini_power,
.register_irq_handler = smu_v11_0_register_irq_handler,
- .get_allowed_feature_mask = vangogh_get_allowed_feature_mask,
.notify_memory_pool_location = smu_v11_0_notify_memory_pool_location,
.send_smc_msg_with_param = smu_cmn_send_smc_msg_with_param,
.send_smc_msg = smu_cmn_send_smc_msg,
@@ -761,6 +1791,15 @@ static const struct pptable_funcs vangogh_ppt_funcs = {
.set_default_dpm_table = vangogh_set_default_dpm_tables,
.set_fine_grain_gfx_freq_parameters = vangogh_set_fine_grain_gfx_freq_parameters,
.system_features_control = vangogh_system_features_control,
+ .feature_is_enabled = smu_cmn_feature_is_enabled,
+ .set_power_profile_mode = vangogh_set_power_profile_mode,
+ .get_power_profile_mode = vangogh_get_power_profile_mode,
+ .get_dpm_clock_table = vangogh_get_dpm_clock_table,
+ .force_clk_levels = vangogh_force_clk_levels,
+ .set_performance_level = vangogh_set_performance_level,
+ .post_init = vangogh_post_smu_init,
+ .mode2_reset = vangogh_mode2_reset,
+ .gfx_off_control = smu_v11_0_gfx_off_control,
};
void vangogh_set_ppt_funcs(struct smu_context *smu)
@@ -769,5 +1808,6 @@ void vangogh_set_ppt_funcs(struct smu_context *smu)
smu->message_map = vangogh_message_map;
smu->feature_map = vangogh_feature_mask_map;
smu->table_map = vangogh_table_map;
+ smu->workload_map = vangogh_workload_map;
smu->is_apu = true;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.h b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.h
index eab455493076..c56d4583dc72 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.h
@@ -28,9 +28,29 @@
extern void vangogh_set_ppt_funcs(struct smu_context *smu);
/* UMD PState Vangogh Msg Parameters in MHz */
-#define VANGOGH_UMD_PSTATE_GFXCLK 700
-#define VANGOGH_UMD_PSTATE_SOCCLK 678
-#define VANGOGH_UMD_PSTATE_FCLK 800
+#define VANGOGH_UMD_PSTATE_STANDARD_GFXCLK 1100
+#define VANGOGH_UMD_PSTATE_STANDARD_SOCCLK 600
+#define VANGOGH_UMD_PSTATE_STANDARD_FCLK 800
+#define VANGOGH_UMD_PSTATE_STANDARD_VCLK 705
+#define VANGOGH_UMD_PSTATE_STANDARD_DCLK 600
+
+#define VANGOGH_UMD_PSTATE_PEAK_GFXCLK 1300
+#define VANGOGH_UMD_PSTATE_PEAK_SOCCLK 600
+#define VANGOGH_UMD_PSTATE_PEAK_FCLK 800
+#define VANGOGH_UMD_PSTATE_PEAK_VCLK 705
+#define VANGOGH_UMD_PSTATE_PEAK_DCLK 600
+
+#define VANGOGH_UMD_PSTATE_MIN_SCLK_GFXCLK 400
+#define VANGOGH_UMD_PSTATE_MIN_SCLK_SOCCLK 1000
+#define VANGOGH_UMD_PSTATE_MIN_SCLK_FCLK 800
+#define VANGOGH_UMD_PSTATE_MIN_SCLK_VCLK 1000
+#define VANGOGH_UMD_PSTATE_MIN_SCLK_DCLK 800
+
+#define VANGOGH_UMD_PSTATE_MIN_MCLK_GFXCLK 1100
+#define VANGOGH_UMD_PSTATE_MIN_MCLK_SOCCLK 1000
+#define VANGOGH_UMD_PSTATE_MIN_MCLK_FCLK 400
+#define VANGOGH_UMD_PSTATE_MIN_MCLK_VCLK 1000
+#define VANGOGH_UMD_PSTATE_MIN_MCLK_DCLK 800
/* RLC Power Status */
#define RLC_STATUS_OFF 0
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
index dc75db8af371..185698bb0027 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
@@ -188,6 +188,7 @@ static int renoir_get_dpm_clk_limited(struct smu_context *smu, enum smu_clk_type
return -EINVAL;
*freq = clk_table->SocClocks[dpm_level].Freq;
break;
+ case SMU_UCLK:
case SMU_MCLK:
if (dpm_level >= NUM_FCLK_DPM_LEVELS)
return -EINVAL;
@@ -343,12 +344,141 @@ failed:
return ret;
}
+static int renoir_od_edit_dpm_table(struct smu_context *smu,
+ enum PP_OD_DPM_TABLE_COMMAND type,
+ long input[], uint32_t size)
+{
+ int ret = 0;
+ struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
+
+ if (!(smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL)) {
+ dev_warn(smu->adev->dev, "Fine grain is not enabled!\n");
+ return -EINVAL;
+ }
+
+ switch (type) {
+ case PP_OD_EDIT_SCLK_VDDC_TABLE:
+ if (size != 2) {
+ dev_err(smu->adev->dev, "Input parameter number not correct\n");
+ return -EINVAL;
+ }
+
+ if (input[0] == 0) {
+ if (input[1] < smu->gfx_default_hard_min_freq) {
+ dev_warn(smu->adev->dev,
+ "Fine grain setting minimum sclk (%ld) MHz is less than the minimum allowed (%d) MHz\n",
+ input[1], smu->gfx_default_hard_min_freq);
+ return -EINVAL;
+ }
+ smu->gfx_actual_hard_min_freq = input[1];
+ } else if (input[0] == 1) {
+ if (input[1] > smu->gfx_default_soft_max_freq) {
+ dev_warn(smu->adev->dev,
+ "Fine grain setting maximum sclk (%ld) MHz is greater than the maximum allowed (%d) MHz\n",
+ input[1], smu->gfx_default_soft_max_freq);
+ return -EINVAL;
+ }
+ smu->gfx_actual_soft_max_freq = input[1];
+ } else {
+ return -EINVAL;
+ }
+ break;
+ case PP_OD_RESTORE_DEFAULT_TABLE:
+ if (size != 0) {
+ dev_err(smu->adev->dev, "Input parameter number not correct\n");
+ return -EINVAL;
+ }
+ smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq;
+ smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq;
+
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SetHardMinGfxClk,
+ smu->gfx_actual_hard_min_freq,
+ NULL);
+ if (ret) {
+ dev_err(smu->adev->dev, "Restore the default hard min sclk failed!");
+ return ret;
+ }
+
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SetSoftMaxGfxClk,
+ smu->gfx_actual_soft_max_freq,
+ NULL);
+ if (ret) {
+ dev_err(smu->adev->dev, "Restore the default soft max sclk failed!");
+ return ret;
+ }
+ break;
+ case PP_OD_COMMIT_DPM_TABLE:
+ if (size != 0) {
+ dev_err(smu->adev->dev, "Input parameter number not correct\n");
+ return -EINVAL;
+ } else {
+ if (smu->gfx_actual_hard_min_freq > smu->gfx_actual_soft_max_freq) {
+ dev_err(smu->adev->dev,
+ "The setting minimun sclk (%d) MHz is greater than the setting maximum sclk (%d) MHz\n",
+ smu->gfx_actual_hard_min_freq,
+ smu->gfx_actual_soft_max_freq);
+ return -EINVAL;
+ }
+
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SetHardMinGfxClk,
+ smu->gfx_actual_hard_min_freq,
+ NULL);
+ if (ret) {
+ dev_err(smu->adev->dev, "Set hard min sclk failed!");
+ return ret;
+ }
+
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SetSoftMaxGfxClk,
+ smu->gfx_actual_soft_max_freq,
+ NULL);
+ if (ret) {
+ dev_err(smu->adev->dev, "Set soft max sclk failed!");
+ return ret;
+ }
+ }
+ break;
+ default:
+ return -ENOSYS;
+ }
+
+ return ret;
+}
+
+static int renoir_set_fine_grain_gfx_freq_parameters(struct smu_context *smu)
+{
+ uint32_t min = 0, max = 0;
+ uint32_t ret = 0;
+
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_GetMinGfxclkFrequency,
+ 0, &min);
+ if (ret)
+ return ret;
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_GetMaxGfxclkFrequency,
+ 0, &max);
+ if (ret)
+ return ret;
+
+ smu->gfx_default_hard_min_freq = min;
+ smu->gfx_default_soft_max_freq = max;
+ smu->gfx_actual_hard_min_freq = 0;
+ smu->gfx_actual_soft_max_freq = 0;
+
+ return 0;
+}
+
static int renoir_print_clk_levels(struct smu_context *smu,
enum smu_clk_type clk_type, char *buf)
{
int i, size = 0, ret = 0;
uint32_t cur_value = 0, value = 0, count = 0, min = 0, max = 0;
SmuMetrics_t metrics;
+ struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
bool cur_value_match_level = false;
memset(&metrics, 0, sizeof(metrics));
@@ -358,6 +488,30 @@ static int renoir_print_clk_levels(struct smu_context *smu,
return ret;
switch (clk_type) {
+ case SMU_OD_RANGE:
+ if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_GetMinGfxclkFrequency,
+ 0, &min);
+ if (ret)
+ return ret;
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_GetMaxGfxclkFrequency,
+ 0, &max);
+ if (ret)
+ return ret;
+ size += sprintf(buf + size, "OD_RANGE\nSCLK: %10uMhz %10uMhz\n", min, max);
+ }
+ break;
+ case SMU_OD_SCLK:
+ if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
+ min = (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq;
+ max = (smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq;
+ size += sprintf(buf + size, "OD_SCLK\n");
+ size += sprintf(buf + size, "0:%10uMhz\n", min);
+ size += sprintf(buf + size, "1:%10uMhz\n", max);
+ }
+ break;
case SMU_GFXCLK:
case SMU_SCLK:
/* retirve table returned paramters unit is MHz */
@@ -398,23 +552,35 @@ static int renoir_print_clk_levels(struct smu_context *smu,
cur_value = metrics.ClockFrequency[CLOCK_FCLK];
break;
default:
- return -EINVAL;
+ break;
}
- for (i = 0; i < count; i++) {
- ret = renoir_get_dpm_clk_limited(smu, clk_type, i, &value);
- if (ret)
- return ret;
- if (!value)
- continue;
- size += sprintf(buf + size, "%d: %uMhz %s\n", i, value,
- cur_value == value ? "*" : "");
- if (cur_value == value)
- cur_value_match_level = true;
- }
+ switch (clk_type) {
+ case SMU_GFXCLK:
+ case SMU_SCLK:
+ case SMU_SOCCLK:
+ case SMU_MCLK:
+ case SMU_DCEFCLK:
+ case SMU_FCLK:
+ for (i = 0; i < count; i++) {
+ ret = renoir_get_dpm_clk_limited(smu, clk_type, i, &value);
+ if (ret)
+ return ret;
+ if (!value)
+ continue;
+ size += sprintf(buf + size, "%d: %uMhz %s\n", i, value,
+ cur_value == value ? "*" : "");
+ if (cur_value == value)
+ cur_value_match_level = true;
+ }
+
+ if (!cur_value_match_level)
+ size += sprintf(buf + size, " %uMhz *\n", cur_value);
- if (!cur_value_match_level)
- size += sprintf(buf + size, " %uMhz *\n", cur_value);
+ break;
+ default:
+ break;
+ }
return size;
}
@@ -666,6 +832,10 @@ static int renoir_set_power_profile_mode(struct smu_context *smu, long *input, u
return -EINVAL;
}
+ if (profile_mode == PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT ||
+ profile_mode == PP_SMC_POWER_PROFILE_POWERSAVING)
+ return 0;
+
/* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
workload_type = smu_cmn_to_asic_specific_index(smu,
CMN2ASIC_MAPPING_WORKLOAD,
@@ -724,15 +894,27 @@ static int renoir_set_performance_level(struct smu_context *smu,
switch (level) {
case AMD_DPM_FORCED_LEVEL_HIGH:
+ smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq;
+ smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq;
+
ret = renoir_force_dpm_limit_value(smu, true);
break;
case AMD_DPM_FORCED_LEVEL_LOW:
+ smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq;
+ smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq;
+
ret = renoir_force_dpm_limit_value(smu, false);
break;
case AMD_DPM_FORCED_LEVEL_AUTO:
+ smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq;
+ smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq;
+
ret = renoir_unforce_dpm_levels(smu);
break;
case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
+ smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq;
+ smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq;
+
ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_SetHardMinGfxClk,
RENOIR_UMD_PSTATE_GFXCLK,
@@ -785,6 +967,9 @@ static int renoir_set_performance_level(struct smu_context *smu,
break;
case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
+ smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq;
+ smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq;
+
ret = renoir_get_profiling_clk_mask(smu, level,
&sclk_mask,
&mclk_mask,
@@ -796,6 +981,9 @@ static int renoir_set_performance_level(struct smu_context *smu,
renoir_force_clk_levels(smu, SMU_SOCCLK, 1 << soc_mask);
break;
case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
+ smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq;
+ smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq;
+
ret = renoir_set_peak_clock_by_device(smu);
break;
case AMD_DPM_FORCED_LEVEL_MANUAL:
@@ -1120,7 +1308,7 @@ static ssize_t renoir_get_gpu_metrics(struct smu_context *smu,
static int renoir_gfx_state_change_set(struct smu_context *smu, uint32_t state)
{
- return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GpuChangeState, state, NULL);
+ return 0;
}
static const struct pptable_funcs renoir_ppt_funcs = {
@@ -1159,6 +1347,8 @@ static const struct pptable_funcs renoir_ppt_funcs = {
.set_pp_feature_mask = smu_cmn_set_pp_feature_mask,
.get_gpu_metrics = renoir_get_gpu_metrics,
.gfx_state_change_set = renoir_gfx_state_change_set,
+ .set_fine_grain_gfx_freq_parameters = renoir_set_fine_grain_gfx_freq_parameters,
+ .od_edit_dpm_table = renoir_od_edit_dpm_table,
};
void renoir_set_ppt_funcs(struct smu_context *smu)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c
index 522d55004655..06abf2a7ce9e 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c
@@ -225,6 +225,7 @@ int smu_v12_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_
break;
case SMU_FCLK:
case SMU_MCLK:
+ case SMU_UCLK:
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinFclkByFreq, min, NULL);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
index f8260769061c..e4eff6d9f092 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
@@ -68,14 +68,6 @@ static const char *smu_get_message_name(struct smu_context *smu,
return __smu_message_names[type];
}
-static void smu_cmn_send_msg_without_waiting(struct smu_context *smu,
- uint16_t msg)
-{
- struct amdgpu_device *adev = smu->adev;
-
- WREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_66, msg);
-}
-
static void smu_cmn_read_arg(struct smu_context *smu,
uint32_t *arg)
{
@@ -92,7 +84,7 @@ static int smu_cmn_wait_for_response(struct smu_context *smu)
for (i = 0; i < timeout; i++) {
cur_value = RREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_90);
if ((cur_value & MP1_C2PMSG_90__CONTENT_MASK) != 0)
- return cur_value == 0x1 ? 0 : -EIO;
+ return cur_value;
udelay(1);
}
@@ -101,7 +93,29 @@ static int smu_cmn_wait_for_response(struct smu_context *smu)
if (i == timeout)
return -ETIME;
- return RREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_90) == 0x1 ? 0 : -EIO;
+ return RREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_90);
+}
+
+int smu_cmn_send_msg_without_waiting(struct smu_context *smu,
+ uint16_t msg, uint32_t param)
+{
+ struct amdgpu_device *adev = smu->adev;
+ int ret;
+
+ ret = smu_cmn_wait_for_response(smu);
+ if (ret != 0x1) {
+ dev_err(adev->dev, "Msg issuing pre-check failed and "
+ "SMU may be not in the right state!\n");
+ if (ret != -ETIME)
+ ret = -EIO;
+ return ret;
+ }
+
+ WREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
+ WREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_82, param);
+ WREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_66, msg);
+
+ return 0;
}
int smu_cmn_send_smc_msg_with_param(struct smu_context *smu,
@@ -122,29 +136,23 @@ int smu_cmn_send_smc_msg_with_param(struct smu_context *smu,
return index == -EACCES ? 0 : index;
mutex_lock(&smu->message_lock);
- ret = smu_cmn_wait_for_response(smu);
- if (ret) {
- dev_err(adev->dev, "Msg issuing pre-check failed and "
- "SMU may be not in the right state!\n");
+ ret = smu_cmn_send_msg_without_waiting(smu, (uint16_t)index, param);
+ if (ret)
goto out;
- }
-
- WREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
-
- WREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_82, param);
-
- smu_cmn_send_msg_without_waiting(smu, (uint16_t)index);
ret = smu_cmn_wait_for_response(smu);
- if (ret) {
+ if (ret != 0x1) {
dev_err(adev->dev, "failed send message: %10s (%d) \tparam: 0x%08x response %#x\n",
- smu_get_message_name(smu, msg), index, param, ret);
+ smu_get_message_name(smu, msg), index, param, ret);
+ if (ret != -ETIME)
+ ret = -EIO;
goto out;
}
if (read_arg)
smu_cmn_read_arg(smu, read_arg);
+ ret = 0; /* 0 as driver return value */
out:
mutex_unlock(&smu->message_lock);
return ret;
@@ -269,11 +277,13 @@ int smu_cmn_feature_is_enabled(struct smu_context *smu,
enum smu_feature_mask mask)
{
struct smu_feature *feature = &smu->smu_feature;
+ struct amdgpu_device *adev = smu->adev;
int feature_id;
int ret = 0;
- if (smu->is_apu)
+ if (smu->is_apu && adev->family < AMDGPU_FAMILY_VGH)
return 1;
+
feature_id = smu_cmn_to_asic_specific_index(smu,
CMN2ASIC_MAPPING_FEATURE,
mask);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h
index 01e825d83d8d..08ccf2d3257c 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h
@@ -26,6 +26,8 @@
#include "amdgpu_smu.h"
#if defined(SWSMU_CODE_LAYER_L2) || defined(SWSMU_CODE_LAYER_L3) || defined(SWSMU_CODE_LAYER_L4)
+int smu_cmn_send_msg_without_waiting(struct smu_context *smu,
+ uint16_t msg, uint32_t param);
int smu_cmn_send_smc_msg_with_param(struct smu_context *smu,
enum smu_message_type msg,
uint32_t param,
diff --git a/drivers/gpu/drm/arc/arcpgu_crtc.c b/drivers/gpu/drm/arc/arcpgu_crtc.c
index 042d7b54a6de..895cdd991af6 100644
--- a/drivers/gpu/drm/arc/arcpgu_crtc.c
+++ b/drivers/gpu/drm/arc/arcpgu_crtc.c
@@ -162,15 +162,10 @@ static const struct drm_plane_helper_funcs arc_pgu_plane_helper_funcs = {
.atomic_update = arc_pgu_plane_atomic_update,
};
-static void arc_pgu_plane_destroy(struct drm_plane *plane)
-{
- drm_plane_cleanup(plane);
-}
-
static const struct drm_plane_funcs arc_pgu_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
- .destroy = arc_pgu_plane_destroy,
+ .destroy = drm_plane_cleanup,
.reset = drm_atomic_helper_plane_reset,
.atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
@@ -213,7 +208,7 @@ int arc_pgu_setup_crtc(struct drm_device *drm)
ret = drm_crtc_init_with_planes(drm, &arcpgu->crtc, primary, NULL,
&arc_pgu_crtc_funcs, NULL);
if (ret) {
- arc_pgu_plane_destroy(primary);
+ drm_plane_cleanup(primary);
return ret;
}
diff --git a/drivers/gpu/drm/arc/arcpgu_drv.c b/drivers/gpu/drm/arc/arcpgu_drv.c
index f164818ec477..077d006b1fbf 100644
--- a/drivers/gpu/drm/arc/arcpgu_drv.c
+++ b/drivers/gpu/drm/arc/arcpgu_drv.c
@@ -145,7 +145,7 @@ static void arcpgu_debugfs_init(struct drm_minor *minor)
}
#endif
-static struct drm_driver arcpgu_drm_driver = {
+static const struct drm_driver arcpgu_drm_driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
.name = "arcpgu",
.desc = "ARC PGU Controller",
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c b/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c
index 4b485eb512e2..59172acb9738 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c
@@ -550,7 +550,6 @@ static void komeda_crtc_vblank_disable(struct drm_crtc *crtc)
}
static const struct drm_crtc_funcs komeda_crtc_funcs = {
- .gamma_set = drm_atomic_helper_legacy_gamma_set,
.destroy = drm_crtc_cleanup,
.set_config = drm_atomic_helper_set_config,
.page_flip = drm_atomic_helper_page_flip,
diff --git a/drivers/gpu/drm/arm/malidp_crtc.c b/drivers/gpu/drm/arm/malidp_crtc.c
index 108e7a31bd26..494075ddbef6 100644
--- a/drivers/gpu/drm/arm/malidp_crtc.c
+++ b/drivers/gpu/drm/arm/malidp_crtc.c
@@ -510,7 +510,6 @@ static void malidp_crtc_disable_vblank(struct drm_crtc *crtc)
}
static const struct drm_crtc_funcs malidp_crtc_funcs = {
- .gamma_set = drm_atomic_helper_legacy_gamma_set,
.destroy = drm_crtc_cleanup,
.set_config = drm_atomic_helper_set_config,
.page_flip = drm_atomic_helper_page_flip,
diff --git a/drivers/gpu/drm/armada/armada_crtc.c b/drivers/gpu/drm/armada/armada_crtc.c
index 3ebcf5a52c8b..b7bb90ae787f 100644
--- a/drivers/gpu/drm/armada/armada_crtc.c
+++ b/drivers/gpu/drm/armada/armada_crtc.c
@@ -820,7 +820,6 @@ static const struct drm_crtc_funcs armada_crtc_funcs = {
.cursor_set = armada_drm_crtc_cursor_set,
.cursor_move = armada_drm_crtc_cursor_move,
.destroy = armada_drm_crtc_destroy,
- .gamma_set = drm_atomic_helper_legacy_gamma_set,
.set_config = drm_atomic_helper_set_config,
.page_flip = drm_atomic_helper_page_flip,
.atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
diff --git a/drivers/gpu/drm/ast/ast_cursor.c b/drivers/gpu/drm/ast/ast_cursor.c
index 742d43a7edf4..fac1ee79c372 100644
--- a/drivers/gpu/drm/ast/ast_cursor.c
+++ b/drivers/gpu/drm/ast/ast_cursor.c
@@ -39,7 +39,6 @@ static void ast_cursor_fini(struct ast_private *ast)
for (i = 0; i < ARRAY_SIZE(ast->cursor.gbo); ++i) {
gbo = ast->cursor.gbo[i];
- drm_gem_vram_vunmap(gbo, &ast->cursor.map[i]);
drm_gem_vram_unpin(gbo);
drm_gem_vram_put(gbo);
}
@@ -53,14 +52,13 @@ static void ast_cursor_release(struct drm_device *dev, void *ptr)
}
/*
- * Allocate cursor BOs and pins them at the end of VRAM.
+ * Allocate cursor BOs and pin them at the end of VRAM.
*/
int ast_cursor_init(struct ast_private *ast)
{
struct drm_device *dev = &ast->base;
size_t size, i;
struct drm_gem_vram_object *gbo;
- struct dma_buf_map map;
int ret;
size = roundup(AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE, PAGE_SIZE);
@@ -77,15 +75,7 @@ int ast_cursor_init(struct ast_private *ast)
drm_gem_vram_put(gbo);
goto err_drm_gem_vram_put;
}
- ret = drm_gem_vram_vmap(gbo, &map);
- if (ret) {
- drm_gem_vram_unpin(gbo);
- drm_gem_vram_put(gbo);
- goto err_drm_gem_vram_put;
- }
-
ast->cursor.gbo[i] = gbo;
- ast->cursor.map[i] = map;
}
return drmm_add_action_or_reset(dev, ast_cursor_release, NULL);
@@ -94,7 +84,6 @@ err_drm_gem_vram_put:
while (i) {
--i;
gbo = ast->cursor.gbo[i];
- drm_gem_vram_vunmap(gbo, &ast->cursor.map[i]);
drm_gem_vram_unpin(gbo);
drm_gem_vram_put(gbo);
}
@@ -168,38 +157,37 @@ static void update_cursor_image(u8 __iomem *dst, const u8 *src, int width, int h
int ast_cursor_blit(struct ast_private *ast, struct drm_framebuffer *fb)
{
struct drm_device *dev = &ast->base;
- struct drm_gem_vram_object *gbo;
- struct dma_buf_map map;
- int ret;
- void *src;
+ struct drm_gem_vram_object *dst_gbo = ast->cursor.gbo[ast->cursor.next_index];
+ struct drm_gem_vram_object *src_gbo = drm_gem_vram_of_gem(fb->obj[0]);
+ struct dma_buf_map src_map, dst_map;
void __iomem *dst;
+ void *src;
+ int ret;
if (drm_WARN_ON_ONCE(dev, fb->width > AST_MAX_HWC_WIDTH) ||
drm_WARN_ON_ONCE(dev, fb->height > AST_MAX_HWC_HEIGHT))
return -EINVAL;
- gbo = drm_gem_vram_of_gem(fb->obj[0]);
-
- ret = drm_gem_vram_pin(gbo, 0);
+ ret = drm_gem_vram_vmap(src_gbo, &src_map);
if (ret)
return ret;
- ret = drm_gem_vram_vmap(gbo, &map);
- if (ret)
- goto err_drm_gem_vram_unpin;
- src = map.vaddr; /* TODO: Use mapping abstraction properly */
+ src = src_map.vaddr; /* TODO: Use mapping abstraction properly */
- dst = ast->cursor.map[ast->cursor.next_index].vaddr_iomem;
+ ret = drm_gem_vram_vmap(dst_gbo, &dst_map);
+ if (ret)
+ goto err_drm_gem_vram_vunmap;
+ dst = dst_map.vaddr_iomem; /* TODO: Use mapping abstraction properly */
/* do data transfer to cursor BO */
update_cursor_image(dst, src, fb->width, fb->height);
- drm_gem_vram_vunmap(gbo, &map);
- drm_gem_vram_unpin(gbo);
+ drm_gem_vram_vunmap(dst_gbo, &dst_map);
+ drm_gem_vram_vunmap(src_gbo, &src_map);
return 0;
-err_drm_gem_vram_unpin:
- drm_gem_vram_unpin(gbo);
+err_drm_gem_vram_vunmap:
+ drm_gem_vram_vunmap(src_gbo, &src_map);
return ret;
}
@@ -251,17 +239,26 @@ static void ast_cursor_set_location(struct ast_private *ast, u16 x, u16 y,
void ast_cursor_show(struct ast_private *ast, int x, int y,
unsigned int offset_x, unsigned int offset_y)
{
+ struct drm_device *dev = &ast->base;
+ struct drm_gem_vram_object *gbo = ast->cursor.gbo[ast->cursor.next_index];
+ struct dma_buf_map map;
u8 x_offset, y_offset;
u8 __iomem *dst;
u8 __iomem *sig;
u8 jreg;
+ int ret;
- dst = ast->cursor.map[ast->cursor.next_index].vaddr;
+ ret = drm_gem_vram_vmap(gbo, &map);
+ if (drm_WARN_ONCE(dev, ret, "drm_gem_vram_vmap() failed, ret=%d\n", ret))
+ return;
+ dst = map.vaddr_iomem; /* TODO: Use mapping abstraction properly */
sig = dst + AST_HWC_SIZE;
writel(x, sig + AST_HWC_SIGNATURE_X);
writel(y, sig + AST_HWC_SIGNATURE_Y);
+ drm_gem_vram_vunmap(gbo, &map);
+
if (x < 0) {
x_offset = (-x) + offset_x;
x = 0;
diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c
index 667b450606ef..ea8164e7a6dc 100644
--- a/drivers/gpu/drm/ast/ast_drv.c
+++ b/drivers/gpu/drm/ast/ast_drv.c
@@ -147,7 +147,7 @@ static int ast_drm_freeze(struct drm_device *dev)
error = drm_mode_config_helper_suspend(dev);
if (error)
return error;
- pci_save_state(dev->pdev);
+ pci_save_state(to_pci_dev(dev->dev));
return 0;
}
@@ -162,7 +162,7 @@ static int ast_drm_resume(struct drm_device *dev)
{
int ret;
- if (pci_enable_device(dev->pdev))
+ if (pci_enable_device(to_pci_dev(dev->dev)))
return -EIO;
ret = ast_drm_thaw(dev);
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
index ccaff81924ee..f871fc36c2f7 100644
--- a/drivers/gpu/drm/ast/ast_drv.h
+++ b/drivers/gpu/drm/ast/ast_drv.h
@@ -28,7 +28,6 @@
#ifndef __AST_DRV_H__
#define __AST_DRV_H__
-#include <linux/dma-buf-map.h>
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
#include <linux/io.h>
@@ -133,7 +132,6 @@ struct ast_private {
struct {
struct drm_gem_vram_object *gbo[AST_DEFAULT_HWC_NUM];
- struct dma_buf_map map[AST_DEFAULT_HWC_NUM];
unsigned int next_index;
} cursor;
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
index 1b13199858cb..0ac3c2039c4b 100644
--- a/drivers/gpu/drm/ast/ast_main.c
+++ b/drivers/gpu/drm/ast/ast_main.c
@@ -67,8 +67,9 @@ uint8_t ast_get_index_reg_mask(struct ast_private *ast,
static void ast_detect_config_mode(struct drm_device *dev, u32 *scu_rev)
{
- struct device_node *np = dev->pdev->dev.of_node;
+ struct device_node *np = dev->dev->of_node;
struct ast_private *ast = to_ast_private(dev);
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
uint32_t data, jregd0, jregd1;
/* Defaults */
@@ -85,7 +86,7 @@ static void ast_detect_config_mode(struct drm_device *dev, u32 *scu_rev)
}
/* Not all families have a P2A bridge */
- if (dev->pdev->device != PCI_CHIP_AST2000)
+ if (pdev->device != PCI_CHIP_AST2000)
return;
/*
@@ -119,6 +120,7 @@ static void ast_detect_config_mode(struct drm_device *dev, u32 *scu_rev)
static int ast_detect_chip(struct drm_device *dev, bool *need_post)
{
struct ast_private *ast = to_ast_private(dev);
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
uint32_t jreg, scu_rev;
/*
@@ -143,19 +145,19 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post)
ast_detect_config_mode(dev, &scu_rev);
/* Identify chipset */
- if (dev->pdev->revision >= 0x50) {
+ if (pdev->revision >= 0x50) {
ast->chip = AST2600;
drm_info(dev, "AST 2600 detected\n");
- } else if (dev->pdev->revision >= 0x40) {
+ } else if (pdev->revision >= 0x40) {
ast->chip = AST2500;
drm_info(dev, "AST 2500 detected\n");
- } else if (dev->pdev->revision >= 0x30) {
+ } else if (pdev->revision >= 0x30) {
ast->chip = AST2400;
drm_info(dev, "AST 2400 detected\n");
- } else if (dev->pdev->revision >= 0x20) {
+ } else if (pdev->revision >= 0x20) {
ast->chip = AST2300;
drm_info(dev, "AST 2300 detected\n");
- } else if (dev->pdev->revision >= 0x10) {
+ } else if (pdev->revision >= 0x10) {
switch (scu_rev & 0x0300) {
case 0x0200:
ast->chip = AST1100;
@@ -265,7 +267,7 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post)
static int ast_get_dram_info(struct drm_device *dev)
{
- struct device_node *np = dev->pdev->dev.of_node;
+ struct device_node *np = dev->dev->of_node;
struct ast_private *ast = to_ast_private(dev);
uint32_t mcr_cfg, mcr_scu_mpll, mcr_scu_strap;
uint32_t denum, num, div, ref_pll, dsel;
@@ -409,10 +411,9 @@ struct ast_private *ast_device_create(const struct drm_driver *drv,
return ast;
dev = &ast->base;
- dev->pdev = pdev;
pci_set_drvdata(pdev, dev);
- ast->regs = pci_iomap(dev->pdev, 1, 0);
+ ast->regs = pci_iomap(pdev, 1, 0);
if (!ast->regs)
return ERR_PTR(-EIO);
@@ -421,14 +422,14 @@ struct ast_private *ast_device_create(const struct drm_driver *drv,
* assume the chip has MMIO enabled by default (rev 0x20
* and higher).
*/
- if (!(pci_resource_flags(dev->pdev, 2) & IORESOURCE_IO)) {
+ if (!(pci_resource_flags(pdev, 2) & IORESOURCE_IO)) {
drm_info(dev, "platform has no IO space, trying MMIO\n");
ast->ioregs = ast->regs + AST_IO_MM_OFFSET;
}
/* "map" IO regs if the above hasn't done so already */
if (!ast->ioregs) {
- ast->ioregs = pci_iomap(dev->pdev, 2, 0);
+ ast->ioregs = pci_iomap(pdev, 2, 0);
if (!ast->ioregs)
return ERR_PTR(-EIO);
}
diff --git a/drivers/gpu/drm/ast/ast_mm.c b/drivers/gpu/drm/ast/ast_mm.c
index 8392ebde504b..7592f1b9e1f1 100644
--- a/drivers/gpu/drm/ast/ast_mm.c
+++ b/drivers/gpu/drm/ast/ast_mm.c
@@ -77,31 +77,32 @@ static u32 ast_get_vram_size(struct ast_private *ast)
static void ast_mm_release(struct drm_device *dev, void *ptr)
{
struct ast_private *ast = to_ast_private(dev);
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
arch_phys_wc_del(ast->fb_mtrr);
- arch_io_free_memtype_wc(pci_resource_start(dev->pdev, 0),
- pci_resource_len(dev->pdev, 0));
+ arch_io_free_memtype_wc(pci_resource_start(pdev, 0),
+ pci_resource_len(pdev, 0));
}
int ast_mm_init(struct ast_private *ast)
{
struct drm_device *dev = &ast->base;
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
u32 vram_size;
int ret;
vram_size = ast_get_vram_size(ast);
- ret = drmm_vram_helper_init(dev, pci_resource_start(dev->pdev, 0),
- vram_size);
+ ret = drmm_vram_helper_init(dev, pci_resource_start(pdev, 0), vram_size);
if (ret) {
drm_err(dev, "Error initializing VRAM MM; %d\n", ret);
return ret;
}
- arch_io_reserve_memtype_wc(pci_resource_start(dev->pdev, 0),
- pci_resource_len(dev->pdev, 0));
- ast->fb_mtrr = arch_phys_wc_add(pci_resource_start(dev->pdev, 0),
- pci_resource_len(dev->pdev, 0));
+ arch_io_reserve_memtype_wc(pci_resource_start(pdev, 0),
+ pci_resource_len(pdev, 0));
+ ast->fb_mtrr = arch_phys_wc_add(pci_resource_start(pdev, 0),
+ pci_resource_len(pdev, 0));
return drmm_add_action_or_reset(dev, ast_mm_release, NULL);
}
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index 9db371f4054f..988b270fea5e 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -903,7 +903,6 @@ static void ast_crtc_atomic_destroy_state(struct drm_crtc *crtc,
static const struct drm_crtc_funcs ast_crtc_funcs = {
.reset = ast_crtc_reset,
- .gamma_set = drm_atomic_helper_legacy_gamma_set,
.destroy = drm_crtc_cleanup,
.set_config = drm_atomic_helper_set_config,
.page_flip = drm_atomic_helper_page_flip,
@@ -1107,6 +1106,7 @@ static const struct drm_mode_config_funcs ast_mode_config_funcs = {
int ast_mode_config_init(struct ast_private *ast)
{
struct drm_device *dev = &ast->base;
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
int ret;
ret = ast_cursor_init(ast);
@@ -1122,7 +1122,7 @@ int ast_mode_config_init(struct ast_private *ast)
dev->mode_config.min_height = 0;
dev->mode_config.preferred_depth = 24;
dev->mode_config.prefer_shadow = 1;
- dev->mode_config.fb_base = pci_resource_start(dev->pdev, 0);
+ dev->mode_config.fb_base = pci_resource_start(pdev, 0);
if (ast->chip == AST2100 ||
ast->chip == AST2200 ||
@@ -1259,7 +1259,7 @@ static struct ast_i2c_chan *ast_i2c_create(struct drm_device *dev)
i2c->adapter.owner = THIS_MODULE;
i2c->adapter.class = I2C_CLASS_DDC;
- i2c->adapter.dev.parent = &dev->pdev->dev;
+ i2c->adapter.dev.parent = dev->dev;
i2c->dev = dev;
i2c_set_adapdata(&i2c->adapter, i2c);
snprintf(i2c->adapter.name, sizeof(i2c->adapter.name),
diff --git a/drivers/gpu/drm/ast/ast_post.c b/drivers/gpu/drm/ast/ast_post.c
index 8902c2f84bf9..0607658dde51 100644
--- a/drivers/gpu/drm/ast/ast_post.c
+++ b/drivers/gpu/drm/ast/ast_post.c
@@ -71,6 +71,7 @@ static void
ast_set_def_ext_reg(struct drm_device *dev)
{
struct ast_private *ast = to_ast_private(dev);
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
u8 i, index, reg;
const u8 *ext_reg_info;
@@ -80,7 +81,7 @@ ast_set_def_ext_reg(struct drm_device *dev)
if (ast->chip == AST2300 || ast->chip == AST2400 ||
ast->chip == AST2500) {
- if (dev->pdev->revision >= 0x20)
+ if (pdev->revision >= 0x20)
ext_reg_info = extreginfo_ast2300;
else
ext_reg_info = extreginfo_ast2300a0;
@@ -366,11 +367,12 @@ static void ast_init_dram_reg(struct drm_device *dev)
void ast_post_gpu(struct drm_device *dev)
{
struct ast_private *ast = to_ast_private(dev);
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
u32 reg;
- pci_read_config_dword(dev->pdev, 0x04, &reg);
+ pci_read_config_dword(pdev, 0x04, &reg);
reg |= 0x3;
- pci_write_config_dword(dev->pdev, 0x04, reg);
+ pci_write_config_dword(pdev, 0x04, reg);
ast_enable_vga(dev);
ast_open_key(ast);
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
index c58fa00b4848..05ad75d155e8 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
@@ -473,7 +473,6 @@ static const struct drm_crtc_funcs atmel_hlcdc_crtc_funcs = {
.atomic_destroy_state = atmel_hlcdc_crtc_destroy_state,
.enable_vblank = atmel_hlcdc_crtc_enable_vblank,
.disable_vblank = atmel_hlcdc_crtc_disable_vblank,
- .gamma_set = drm_atomic_helper_legacy_gamma_set,
};
int atmel_hlcdc_crtc_create(struct drm_device *dev)
diff --git a/drivers/gpu/drm/bochs/bochs_drv.c b/drivers/gpu/drm/bochs/bochs_drv.c
index fd454225fd19..b469624fe40d 100644
--- a/drivers/gpu/drm/bochs/bochs_drv.c
+++ b/drivers/gpu/drm/bochs/bochs_drv.c
@@ -121,7 +121,6 @@ static int bochs_pci_probe(struct pci_dev *pdev,
if (ret)
goto err_free_dev;
- dev->pdev = pdev;
pci_set_drvdata(pdev, dev);
ret = bochs_load(dev);
diff --git a/drivers/gpu/drm/bochs/bochs_hw.c b/drivers/gpu/drm/bochs/bochs_hw.c
index dce4672e3fc8..2d7380a9890e 100644
--- a/drivers/gpu/drm/bochs/bochs_hw.c
+++ b/drivers/gpu/drm/bochs/bochs_hw.c
@@ -110,7 +110,7 @@ int bochs_hw_load_edid(struct bochs_device *bochs)
int bochs_hw_init(struct drm_device *dev)
{
struct bochs_device *bochs = dev->dev_private;
- struct pci_dev *pdev = dev->pdev;
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
unsigned long addr, size, mem, ioaddr, iosize;
u16 id;
@@ -201,7 +201,7 @@ void bochs_hw_fini(struct drm_device *dev)
release_region(VBE_DISPI_IOPORT_INDEX, 2);
if (bochs->fb_map)
iounmap(bochs->fb_map);
- pci_release_regions(dev->pdev);
+ pci_release_regions(to_pci_dev(dev->dev));
kfree(bochs->edid);
}
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
index a0d392c338da..76555ae64e9c 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
@@ -1292,8 +1292,7 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
err_unregister_cec:
i2c_unregister_device(adv7511->i2c_cec);
- if (adv7511->cec_clk)
- clk_disable_unprepare(adv7511->cec_clk);
+ clk_disable_unprepare(adv7511->cec_clk);
err_i2c_unregister_packet:
i2c_unregister_device(adv7511->i2c_packet);
err_i2c_unregister_edid:
@@ -1311,8 +1310,7 @@ static int adv7511_remove(struct i2c_client *i2c)
if (adv7511->type == ADV7533 || adv7511->type == ADV7535)
adv7533_detach_dsi(adv7511);
i2c_unregister_device(adv7511->i2c_cec);
- if (adv7511->cec_clk)
- clk_disable_unprepare(adv7511->cec_clk);
+ clk_disable_unprepare(adv7511->cec_clk);
adv7511_uninit_regulators(adv7511);
diff --git a/drivers/gpu/drm/bridge/display-connector.c b/drivers/gpu/drm/bridge/display-connector.c
index 4d278573cdb9..05eb759da6fc 100644
--- a/drivers/gpu/drm/bridge/display-connector.c
+++ b/drivers/gpu/drm/bridge/display-connector.c
@@ -11,6 +11,7 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
#include <drm/drm_bridge.h>
#include <drm/drm_edid.h>
@@ -20,6 +21,8 @@ struct display_connector {
struct gpio_desc *hpd_gpio;
int hpd_irq;
+
+ struct regulator *dp_pwr;
};
static inline struct display_connector *
@@ -172,11 +175,12 @@ static int display_connector_probe(struct platform_device *pdev)
of_property_read_string(pdev->dev.of_node, "label", &label);
/*
- * Get the HPD GPIO for DVI and HDMI connectors. If the GPIO can provide
+ * Get the HPD GPIO for DVI, HDMI and DP connectors. If the GPIO can provide
* edge interrupts, register an interrupt handler.
*/
if (type == DRM_MODE_CONNECTOR_DVII ||
- type == DRM_MODE_CONNECTOR_HDMIA) {
+ type == DRM_MODE_CONNECTOR_HDMIA ||
+ type == DRM_MODE_CONNECTOR_DisplayPort) {
conn->hpd_gpio = devm_gpiod_get_optional(&pdev->dev, "hpd",
GPIOD_IN);
if (IS_ERR(conn->hpd_gpio)) {
@@ -223,6 +227,38 @@ static int display_connector_probe(struct platform_device *pdev)
}
}
+ /* Get the DP PWR for DP connector. */
+ if (type == DRM_MODE_CONNECTOR_DisplayPort) {
+ int ret;
+
+ conn->dp_pwr = devm_regulator_get_optional(&pdev->dev, "dp-pwr");
+
+ if (IS_ERR(conn->dp_pwr)) {
+ ret = PTR_ERR(conn->dp_pwr);
+
+ switch (ret) {
+ case -ENODEV:
+ conn->dp_pwr = NULL;
+ break;
+
+ case -EPROBE_DEFER:
+ return -EPROBE_DEFER;
+
+ default:
+ dev_err(&pdev->dev, "failed to get DP PWR regulator: %d\n", ret);
+ return ret;
+ }
+ }
+
+ if (conn->dp_pwr) {
+ ret = regulator_enable(conn->dp_pwr);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to enable DP PWR regulator: %d\n", ret);
+ return ret;
+ }
+ }
+ }
+
conn->bridge.funcs = &display_connector_bridge_funcs;
conn->bridge.of_node = pdev->dev.of_node;
@@ -251,6 +287,9 @@ static int display_connector_remove(struct platform_device *pdev)
{
struct display_connector *conn = platform_get_drvdata(pdev);
+ if (conn->dp_pwr)
+ regulator_disable(conn->dp_pwr);
+
drm_bridge_remove(&conn->bridge);
if (!IS_ERR(conn->bridge.ddc))
@@ -275,6 +314,9 @@ static const struct of_device_id display_connector_match[] = {
}, {
.compatible = "vga-connector",
.data = (void *)DRM_MODE_CONNECTOR_VGA,
+ }, {
+ .compatible = "dp-connector",
+ .data = (void *)DRM_MODE_CONNECTOR_DisplayPort,
},
{},
};
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
index 0c79a9ba48bb..dda4fa9a1a08 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
@@ -3440,8 +3440,7 @@ struct dw_hdmi *dw_hdmi_probe(struct platform_device *pdev,
err_iahb:
clk_disable_unprepare(hdmi->iahb_clk);
- if (hdmi->cec_clk)
- clk_disable_unprepare(hdmi->cec_clk);
+ clk_disable_unprepare(hdmi->cec_clk);
err_isfr:
clk_disable_unprepare(hdmi->isfr_clk);
err_res:
@@ -3465,8 +3464,7 @@ void dw_hdmi_remove(struct dw_hdmi *hdmi)
clk_disable_unprepare(hdmi->iahb_clk);
clk_disable_unprepare(hdmi->isfr_clk);
- if (hdmi->cec_clk)
- clk_disable_unprepare(hdmi->cec_clk);
+ clk_disable_unprepare(hdmi->cec_clk);
if (hdmi->i2c)
i2c_del_adapter(&hdmi->i2c->adap);
diff --git a/drivers/gpu/drm/bridge/thc63lvd1024.c b/drivers/gpu/drm/bridge/thc63lvd1024.c
index 86b06975bfdd..e21078b2f8b5 100644
--- a/drivers/gpu/drm/bridge/thc63lvd1024.c
+++ b/drivers/gpu/drm/bridge/thc63lvd1024.c
@@ -202,7 +202,7 @@ static int thc63_probe(struct platform_device *pdev)
thc63->dev = &pdev->dev;
platform_set_drvdata(pdev, thc63);
- thc63->vcc = devm_regulator_get_optional(thc63->dev, "vcc");
+ thc63->vcc = devm_regulator_get(thc63->dev, "vcc");
if (IS_ERR(thc63->vcc)) {
if (PTR_ERR(thc63->vcc) == -EPROBE_DEFER)
return -EPROBE_DEFER;
diff --git a/drivers/gpu/drm/drm_agpsupport.c b/drivers/gpu/drm/drm_agpsupport.c
index 4c7ad46fdd21..5311d03d49cc 100644
--- a/drivers/gpu/drm/drm_agpsupport.c
+++ b/drivers/gpu/drm/drm_agpsupport.c
@@ -45,13 +45,9 @@
#include "drm_legacy.h"
-/**
+/*
* Get AGP information.
*
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg pointer to a (output) drm_agp_info structure.
* \return zero on success or a negative number on failure.
*
* Verifies the AGP device has been initialized and acquired and fills in the
@@ -92,7 +88,7 @@ int drm_agp_info_ioctl(struct drm_device *dev, void *data,
return 0;
}
-/**
+/*
* Acquire the AGP device.
*
* \param dev DRM device that is to acquire AGP.
@@ -103,11 +99,13 @@ int drm_agp_info_ioctl(struct drm_device *dev, void *data,
*/
int drm_agp_acquire(struct drm_device *dev)
{
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
+
if (!dev->agp)
return -ENODEV;
if (dev->agp->acquired)
return -EBUSY;
- dev->agp->bridge = agp_backend_acquire(dev->pdev);
+ dev->agp->bridge = agp_backend_acquire(pdev);
if (!dev->agp->bridge)
return -ENODEV;
dev->agp->acquired = 1;
@@ -115,13 +113,9 @@ int drm_agp_acquire(struct drm_device *dev)
}
EXPORT_SYMBOL(drm_agp_acquire);
-/**
+/*
* Acquire the AGP device (ioctl).
*
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg user argument.
* \return zero on success or a negative number on failure.
*
* Verifies the AGP device hasn't been acquired before and calls
@@ -133,7 +127,7 @@ int drm_agp_acquire_ioctl(struct drm_device *dev, void *data,
return drm_agp_acquire((struct drm_device *) file_priv->minor->dev);
}
-/**
+/*
* Release the AGP device.
*
* \param dev DRM device that is to release AGP.
@@ -157,7 +151,7 @@ int drm_agp_release_ioctl(struct drm_device *dev, void *data,
return drm_agp_release(dev);
}
-/**
+/*
* Enable the AGP bus.
*
* \param dev DRM device that has previously acquired AGP.
@@ -187,13 +181,9 @@ int drm_agp_enable_ioctl(struct drm_device *dev, void *data,
return drm_agp_enable(dev, *mode);
}
-/**
+/*
* Allocate AGP memory.
*
- * \param inode device inode.
- * \param file_priv file private pointer.
- * \param cmd command.
- * \param arg pointer to a drm_agp_buffer structure.
* \return zero on success or a negative number on failure.
*
* Verifies the AGP device is present and has been acquired, allocates the
@@ -242,7 +232,7 @@ int drm_agp_alloc_ioctl(struct drm_device *dev, void *data,
return drm_agp_alloc(dev, request);
}
-/**
+/*
* Search for the AGP memory entry associated with a handle.
*
* \param dev DRM device structure.
@@ -263,13 +253,9 @@ static struct drm_agp_mem *drm_agp_lookup_entry(struct drm_device *dev,
return NULL;
}
-/**
+/*
* Unbind AGP memory from the GATT (ioctl).
*
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg pointer to a drm_agp_binding structure.
* \return zero on success or a negative number on failure.
*
* Verifies the AGP device is present and acquired, looks-up the AGP memory
@@ -285,7 +271,7 @@ int drm_agp_unbind(struct drm_device *dev, struct drm_agp_binding *request)
entry = drm_agp_lookup_entry(dev, request->handle);
if (!entry || !entry->bound)
return -EINVAL;
- ret = drm_unbind_agp(entry->memory);
+ ret = agp_unbind_memory(entry->memory);
if (ret == 0)
entry->bound = 0;
return ret;
@@ -301,13 +287,9 @@ int drm_agp_unbind_ioctl(struct drm_device *dev, void *data,
return drm_agp_unbind(dev, request);
}
-/**
+/*
* Bind AGP memory into the GATT (ioctl)
*
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg pointer to a drm_agp_binding structure.
* \return zero on success or a negative number on failure.
*
* Verifies the AGP device is present and has been acquired and that no memory
@@ -326,7 +308,7 @@ int drm_agp_bind(struct drm_device *dev, struct drm_agp_binding *request)
if (!entry || entry->bound)
return -EINVAL;
page = DIV_ROUND_UP(request->offset, PAGE_SIZE);
- retcode = drm_bind_agp(entry->memory, page);
+ retcode = agp_bind_memory(entry->memory, page);
if (retcode)
return retcode;
entry->bound = dev->agp->base + (page << PAGE_SHIFT);
@@ -345,13 +327,9 @@ int drm_agp_bind_ioctl(struct drm_device *dev, void *data,
return drm_agp_bind(dev, request);
}
-/**
+/*
* Free AGP memory (ioctl).
*
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg pointer to a drm_agp_buffer structure.
* \return zero on success or a negative number on failure.
*
* Verifies the AGP device is present and has been acquired and looks up the
@@ -369,11 +347,11 @@ int drm_agp_free(struct drm_device *dev, struct drm_agp_buffer *request)
if (!entry)
return -EINVAL;
if (entry->bound)
- drm_unbind_agp(entry->memory);
+ agp_unbind_memory(entry->memory);
list_del(&entry->head);
- drm_free_agp(entry->memory, entry->pages);
+ agp_free_memory(entry->memory);
kfree(entry);
return 0;
}
@@ -388,7 +366,7 @@ int drm_agp_free_ioctl(struct drm_device *dev, void *data,
return drm_agp_free(dev, request);
}
-/**
+/*
* Initialize the AGP resources.
*
* \return pointer to a drm_agp_head structure.
@@ -402,14 +380,15 @@ int drm_agp_free_ioctl(struct drm_device *dev, void *data,
*/
struct drm_agp_head *drm_agp_init(struct drm_device *dev)
{
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
struct drm_agp_head *head = NULL;
head = kzalloc(sizeof(*head), GFP_KERNEL);
if (!head)
return NULL;
- head->bridge = agp_find_bridge(dev->pdev);
+ head->bridge = agp_find_bridge(pdev);
if (!head->bridge) {
- head->bridge = agp_backend_acquire(dev->pdev);
+ head->bridge = agp_backend_acquire(pdev);
if (!head->bridge) {
kfree(head);
return NULL;
@@ -453,8 +432,8 @@ void drm_legacy_agp_clear(struct drm_device *dev)
list_for_each_entry_safe(entry, tempe, &dev->agp->memory, head) {
if (entry->bound)
- drm_unbind_agp(entry->memory);
- drm_free_agp(entry->memory, entry->pages);
+ agp_unbind_memory(entry->memory);
+ agp_free_memory(entry->memory);
kfree(entry);
}
INIT_LIST_HEAD(&dev->agp->memory);
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index ba1507036f26..560aaecba31b 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -2040,6 +2040,9 @@ crtc_or_fake_commit(struct drm_atomic_state *state, struct drm_crtc *crtc)
* should always call this function from their
* &drm_mode_config_funcs.atomic_commit hook.
*
+ * Drivers that need to extend the commit setup to private objects can use the
+ * &drm_mode_config_helper_funcs.atomic_commit_setup hook.
+ *
* To be able to use this support drivers need to use a few more helper
* functions. drm_atomic_helper_wait_for_dependencies() must be called before
* actually committing the hardware state, and for nonblocking commits this call
@@ -2083,8 +2086,11 @@ int drm_atomic_helper_setup_commit(struct drm_atomic_state *state,
struct drm_plane *plane;
struct drm_plane_state *old_plane_state, *new_plane_state;
struct drm_crtc_commit *commit;
+ const struct drm_mode_config_helper_funcs *funcs;
int i, ret;
+ funcs = state->dev->mode_config.helper_private;
+
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
commit = kzalloc(sizeof(*commit), GFP_KERNEL);
if (!commit)
@@ -2169,6 +2175,9 @@ int drm_atomic_helper_setup_commit(struct drm_atomic_state *state,
new_plane_state->commit = drm_crtc_commit_get(commit);
}
+ if (funcs && funcs->atomic_commit_setup)
+ return funcs->atomic_commit_setup(state);
+
return 0;
}
EXPORT_SYMBOL(drm_atomic_helper_setup_commit);
@@ -3021,7 +3030,7 @@ int drm_atomic_helper_set_config(struct drm_mode_set *set,
ret = handle_conflicting_encoders(state, true);
if (ret)
- return ret;
+ goto fail;
ret = drm_atomic_commit(state);
@@ -3500,76 +3509,6 @@ fail:
EXPORT_SYMBOL(drm_atomic_helper_page_flip_target);
/**
- * drm_atomic_helper_legacy_gamma_set - set the legacy gamma correction table
- * @crtc: CRTC object
- * @red: red correction table
- * @green: green correction table
- * @blue: green correction table
- * @size: size of the tables
- * @ctx: lock acquire context
- *
- * Implements support for legacy gamma correction table for drivers
- * that support color management through the DEGAMMA_LUT/GAMMA_LUT
- * properties. See drm_crtc_enable_color_mgmt() and the containing chapter for
- * how the atomic color management and gamma tables work.
- */
-int drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc,
- u16 *red, u16 *green, u16 *blue,
- uint32_t size,
- struct drm_modeset_acquire_ctx *ctx)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_atomic_state *state;
- struct drm_crtc_state *crtc_state;
- struct drm_property_blob *blob = NULL;
- struct drm_color_lut *blob_data;
- int i, ret = 0;
- bool replaced;
-
- state = drm_atomic_state_alloc(crtc->dev);
- if (!state)
- return -ENOMEM;
-
- blob = drm_property_create_blob(dev,
- sizeof(struct drm_color_lut) * size,
- NULL);
- if (IS_ERR(blob)) {
- ret = PTR_ERR(blob);
- blob = NULL;
- goto fail;
- }
-
- /* Prepare GAMMA_LUT with the legacy values. */
- blob_data = blob->data;
- for (i = 0; i < size; i++) {
- blob_data[i].red = red[i];
- blob_data[i].green = green[i];
- blob_data[i].blue = blue[i];
- }
-
- state->acquire_ctx = ctx;
- crtc_state = drm_atomic_get_crtc_state(state, crtc);
- if (IS_ERR(crtc_state)) {
- ret = PTR_ERR(crtc_state);
- goto fail;
- }
-
- /* Reset DEGAMMA_LUT and CTM properties. */
- replaced = drm_property_replace_blob(&crtc_state->degamma_lut, NULL);
- replaced |= drm_property_replace_blob(&crtc_state->ctm, NULL);
- replaced |= drm_property_replace_blob(&crtc_state->gamma_lut, blob);
- crtc_state->color_mgmt_changed |= replaced;
-
- ret = drm_atomic_commit(state);
-
-fail:
- drm_atomic_state_put(state);
- drm_property_blob_put(blob);
- return ret;
-}
-EXPORT_SYMBOL(drm_atomic_helper_legacy_gamma_set);
-
-/**
* drm_atomic_helper_bridge_propagate_bus_fmt() - Propagate output format to
* the input end of a bridge
* @bridge: bridge control structure
diff --git a/drivers/gpu/drm/drm_blend.c b/drivers/gpu/drm/drm_blend.c
index 5c2141e9a9f4..26e2f2ffd255 100644
--- a/drivers/gpu/drm/drm_blend.c
+++ b/drivers/gpu/drm/drm_blend.c
@@ -185,12 +185,6 @@
* plane does not expose the "alpha" property, then this is
* assumed to be 1.0
*
- * IN_FORMATS:
- * Blob property which contains the set of buffer format and modifier
- * pairs supported by this plane. The blob is a drm_format_modifier_blob
- * struct. Without this property the plane doesn't support buffers with
- * modifiers. Userspace cannot change this property.
- *
* Note that all the property extensions described here apply either to the
* plane or the CRTC (e.g. for the background color, which currently is not
* exposed and assumed to be black).
diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c
index aeb1327e3077..e3d77dfefb0a 100644
--- a/drivers/gpu/drm/drm_bufs.c
+++ b/drivers/gpu/drm/drm_bufs.c
@@ -326,7 +326,7 @@ static int drm_addmap_core(struct drm_device *dev, resource_size_t offset,
* As we're limiting the address to 2^32-1 (or less),
* casting it down to 32 bits is no problem, but we
* need to point to a 64bit variable first. */
- map->handle = dma_alloc_coherent(&dev->pdev->dev,
+ map->handle = dma_alloc_coherent(dev->dev,
map->size,
&map->offset,
GFP_KERNEL);
@@ -556,7 +556,7 @@ int drm_legacy_rmmap_locked(struct drm_device *dev, struct drm_local_map *map)
case _DRM_SCATTER_GATHER:
break;
case _DRM_CONSISTENT:
- dma_free_coherent(&dev->pdev->dev,
+ dma_free_coherent(dev->dev,
map->size,
map->handle,
map->offset);
diff --git a/drivers/gpu/drm/drm_cache.c b/drivers/gpu/drm/drm_cache.c
index 0fe3c496002a..79a50ef1250f 100644
--- a/drivers/gpu/drm/drm_cache.c
+++ b/drivers/gpu/drm/drm_cache.c
@@ -30,6 +30,8 @@
#include <linux/export.h>
#include <linux/highmem.h>
+#include <linux/mem_encrypt.h>
+#include <xen/xen.h>
#include <drm/drm_cache.h>
@@ -176,3 +178,34 @@ drm_clflush_virt_range(void *addr, unsigned long length)
#endif
}
EXPORT_SYMBOL(drm_clflush_virt_range);
+
+bool drm_need_swiotlb(int dma_bits)
+{
+ struct resource *tmp;
+ resource_size_t max_iomem = 0;
+
+ /*
+ * Xen paravirtual hosts require swiotlb regardless of requested dma
+ * transfer size.
+ *
+ * NOTE: Really, what it requires is use of the dma_alloc_coherent
+ * allocator used in ttm_dma_populate() instead of
+ * ttm_populate_and_map_pages(), which bounce buffers so much in
+ * Xen it leads to swiotlb buffer exhaustion.
+ */
+ if (xen_pv_domain())
+ return true;
+
+ /*
+ * Enforce dma_alloc_coherent when memory encryption is active as well
+ * for the same reasons as for Xen paravirtual hosts.
+ */
+ if (mem_encrypt_active())
+ return true;
+
+ for (tmp = iomem_resource.child; tmp; tmp = tmp->sibling)
+ max_iomem = max(max_iomem, tmp->end);
+
+ return max_iomem > ((u64)1 << dma_bits);
+}
+EXPORT_SYMBOL(drm_need_swiotlb);
diff --git a/drivers/gpu/drm/drm_color_mgmt.c b/drivers/gpu/drm/drm_color_mgmt.c
index 3bcabc2f6e0e..bb14f488c8f6 100644
--- a/drivers/gpu/drm/drm_color_mgmt.c
+++ b/drivers/gpu/drm/drm_color_mgmt.c
@@ -22,6 +22,7 @@
#include <linux/uaccess.h>
+#include <drm/drm_atomic.h>
#include <drm/drm_color_mgmt.h>
#include <drm/drm_crtc.h>
#include <drm/drm_device.h>
@@ -89,9 +90,8 @@
* modes) appropriately.
*
* There is also support for a legacy gamma table, which is set up by calling
- * drm_mode_crtc_set_gamma_size(). Drivers which support both should use
- * drm_atomic_helper_legacy_gamma_set() to alias the legacy gamma ramp with the
- * "GAMMA_LUT" property above.
+ * drm_mode_crtc_set_gamma_size(). The DRM core will then alias the legacy gamma
+ * ramp with "GAMMA_LUT" or, if that is unavailable, "DEGAMMA_LUT".
*
* Support for different non RGB color encodings is controlled through
* &drm_plane specific COLOR_ENCODING and COLOR_RANGE properties. They
@@ -156,9 +156,6 @@ EXPORT_SYMBOL(drm_color_ctm_s31_32_to_qm_n);
* optional. The gamma and degamma properties are only attached if
* their size is not 0 and ctm_property is only attached if has_ctm is
* true.
- *
- * Drivers should use drm_atomic_helper_legacy_gamma_set() to implement the
- * legacy &drm_crtc_funcs.gamma_set callback.
*/
void drm_crtc_enable_color_mgmt(struct drm_crtc *crtc,
uint degamma_lut_size,
@@ -232,6 +229,116 @@ int drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc,
EXPORT_SYMBOL(drm_mode_crtc_set_gamma_size);
/**
+ * drm_crtc_supports_legacy_gamma - does the crtc support legacy gamma correction table
+ * @crtc: CRTC object
+ *
+ * Returns true/false if the given crtc supports setting the legacy gamma
+ * correction table.
+ */
+static bool drm_crtc_supports_legacy_gamma(struct drm_crtc *crtc)
+{
+ u32 gamma_id = crtc->dev->mode_config.gamma_lut_property->base.id;
+ u32 degamma_id = crtc->dev->mode_config.degamma_lut_property->base.id;
+
+ if (!crtc->gamma_size)
+ return false;
+
+ if (crtc->funcs->gamma_set)
+ return true;
+
+ return !!(drm_mode_obj_find_prop_id(&crtc->base, gamma_id) ||
+ drm_mode_obj_find_prop_id(&crtc->base, degamma_id));
+}
+
+/**
+ * drm_crtc_legacy_gamma_set - set the legacy gamma correction table
+ * @crtc: CRTC object
+ * @red: red correction table
+ * @green: green correction table
+ * @blue: green correction table
+ * @size: size of the tables
+ * @ctx: lock acquire context
+ *
+ * Implements support for legacy gamma correction table for drivers
+ * that have set drm_crtc_funcs.gamma_set or that support color management
+ * through the DEGAMMA_LUT/GAMMA_LUT properties. See
+ * drm_crtc_enable_color_mgmt() and the containing chapter for
+ * how the atomic color management and gamma tables work.
+ *
+ * This function sets the gamma using drm_crtc_funcs.gamma_set if set, or
+ * alternatively using crtc color management properties.
+ */
+static int drm_crtc_legacy_gamma_set(struct drm_crtc *crtc,
+ u16 *red, u16 *green, u16 *blue,
+ u32 size,
+ struct drm_modeset_acquire_ctx *ctx)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_atomic_state *state;
+ struct drm_crtc_state *crtc_state;
+ struct drm_property_blob *blob;
+ struct drm_color_lut *blob_data;
+ u32 gamma_id = dev->mode_config.gamma_lut_property->base.id;
+ u32 degamma_id = dev->mode_config.degamma_lut_property->base.id;
+ bool use_gamma_lut;
+ int i, ret = 0;
+ bool replaced;
+
+ if (crtc->funcs->gamma_set)
+ return crtc->funcs->gamma_set(crtc, red, green, blue, size, ctx);
+
+ if (drm_mode_obj_find_prop_id(&crtc->base, gamma_id))
+ use_gamma_lut = true;
+ else if (drm_mode_obj_find_prop_id(&crtc->base, degamma_id))
+ use_gamma_lut = false;
+ else
+ return -ENODEV;
+
+ state = drm_atomic_state_alloc(crtc->dev);
+ if (!state)
+ return -ENOMEM;
+
+ blob = drm_property_create_blob(dev,
+ sizeof(struct drm_color_lut) * size,
+ NULL);
+ if (IS_ERR(blob)) {
+ ret = PTR_ERR(blob);
+ blob = NULL;
+ goto fail;
+ }
+
+ /* Prepare GAMMA_LUT with the legacy values. */
+ blob_data = blob->data;
+ for (i = 0; i < size; i++) {
+ blob_data[i].red = red[i];
+ blob_data[i].green = green[i];
+ blob_data[i].blue = blue[i];
+ }
+
+ state->acquire_ctx = ctx;
+ crtc_state = drm_atomic_get_crtc_state(state, crtc);
+ if (IS_ERR(crtc_state)) {
+ ret = PTR_ERR(crtc_state);
+ goto fail;
+ }
+
+ /* Set GAMMA_LUT and reset DEGAMMA_LUT and CTM */
+ replaced = drm_property_replace_blob(&crtc_state->degamma_lut,
+ use_gamma_lut ? NULL : blob);
+ replaced |= drm_property_replace_blob(&crtc_state->ctm, NULL);
+ replaced |= drm_property_replace_blob(&crtc_state->gamma_lut,
+ use_gamma_lut ? blob : NULL);
+ crtc_state->color_mgmt_changed |= replaced;
+
+ ret = drm_atomic_commit(state);
+
+fail:
+ drm_atomic_state_put(state);
+ drm_property_blob_put(blob);
+ return ret;
+}
+
+/**
* drm_mode_gamma_set_ioctl - set the gamma table
* @dev: DRM device
* @data: ioctl data
@@ -262,7 +369,7 @@ int drm_mode_gamma_set_ioctl(struct drm_device *dev,
if (!crtc)
return -ENOENT;
- if (crtc->funcs->gamma_set == NULL)
+ if (!drm_crtc_supports_legacy_gamma(crtc))
return -ENOSYS;
/* memcpy into gamma store */
@@ -290,8 +397,8 @@ int drm_mode_gamma_set_ioctl(struct drm_device *dev,
goto out;
}
- ret = crtc->funcs->gamma_set(crtc, r_base, g_base, b_base,
- crtc->gamma_size, &ctx);
+ ret = drm_crtc_legacy_gamma_set(crtc, r_base, g_base, b_base,
+ crtc->gamma_size, &ctx);
out:
DRM_MODESET_LOCK_ALL_END(dev, ctx, ret);
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 74090fc3aa55..9c4f9947b194 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -38,6 +38,7 @@
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_managed.h>
#include <drm/drm_modeset_lock.h>
#include <drm/drm_atomic.h>
#include <drm/drm_auth.h>
@@ -67,7 +68,7 @@
* &drm_crtc_funcs.page_flip and &drm_crtc_funcs.cursor_set2, and other legacy
* operations like &drm_crtc_funcs.gamma_set. For atomic drivers all these
* features are controlled through &drm_property and
- * &drm_mode_config_funcs.atomic_check and &drm_mode_config_funcs.atomic_check.
+ * &drm_mode_config_funcs.atomic_check.
*/
/**
@@ -240,30 +241,12 @@ struct dma_fence *drm_crtc_create_fence(struct drm_crtc *crtc)
* Nearest Neighbor scaling filter
*/
-/**
- * drm_crtc_init_with_planes - Initialise a new CRTC object with
- * specified primary and cursor planes.
- * @dev: DRM device
- * @crtc: CRTC object to init
- * @primary: Primary plane for CRTC
- * @cursor: Cursor plane for CRTC
- * @funcs: callbacks for the new CRTC
- * @name: printf style format string for the CRTC name, or NULL for default name
- *
- * Inits a new object created as base part of a driver crtc object. Drivers
- * should use this function instead of drm_crtc_init(), which is only provided
- * for backwards compatibility with drivers which do not yet support universal
- * planes). For really simple hardware which has only 1 plane look at
- * drm_simple_display_pipe_init() instead.
- *
- * Returns:
- * Zero on success, error code on failure.
- */
-int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc,
- struct drm_plane *primary,
- struct drm_plane *cursor,
- const struct drm_crtc_funcs *funcs,
- const char *name, ...)
+__printf(6, 0)
+static int __drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc,
+ struct drm_plane *primary,
+ struct drm_plane *cursor,
+ const struct drm_crtc_funcs *funcs,
+ const char *name, va_list ap)
{
struct drm_mode_config *config = &dev->mode_config;
int ret;
@@ -291,11 +274,7 @@ int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc,
return ret;
if (name) {
- va_list ap;
-
- va_start(ap, name);
crtc->name = kvasprintf(GFP_KERNEL, name, ap);
- va_end(ap);
} else {
crtc->name = kasprintf(GFP_KERNEL, "crtc-%d",
drm_num_crtcs(dev));
@@ -339,8 +318,101 @@ int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc,
return 0;
}
+
+/**
+ * drm_crtc_init_with_planes - Initialise a new CRTC object with
+ * specified primary and cursor planes.
+ * @dev: DRM device
+ * @crtc: CRTC object to init
+ * @primary: Primary plane for CRTC
+ * @cursor: Cursor plane for CRTC
+ * @funcs: callbacks for the new CRTC
+ * @name: printf style format string for the CRTC name, or NULL for default name
+ *
+ * Inits a new object created as base part of a driver crtc object. Drivers
+ * should use this function instead of drm_crtc_init(), which is only provided
+ * for backwards compatibility with drivers which do not yet support universal
+ * planes). For really simple hardware which has only 1 plane look at
+ * drm_simple_display_pipe_init() instead.
+ * The &drm_crtc_funcs.destroy hook should call drm_crtc_cleanup() and kfree()
+ * the crtc structure. The crtc structure should not be allocated with
+ * devm_kzalloc().
+ *
+ * The @primary and @cursor planes are only relevant for legacy uAPI, see
+ * &drm_crtc.primary and &drm_crtc.cursor.
+ *
+ * Note: consider using drmm_crtc_alloc_with_planes() instead of
+ * drm_crtc_init_with_planes() to let the DRM managed resource infrastructure
+ * take care of cleanup and deallocation.
+ *
+ * Returns:
+ * Zero on success, error code on failure.
+ */
+int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc,
+ struct drm_plane *primary,
+ struct drm_plane *cursor,
+ const struct drm_crtc_funcs *funcs,
+ const char *name, ...)
+{
+ va_list ap;
+ int ret;
+
+ WARN_ON(!funcs->destroy);
+
+ va_start(ap, name);
+ ret = __drm_crtc_init_with_planes(dev, crtc, primary, cursor, funcs,
+ name, ap);
+ va_end(ap);
+
+ return ret;
+}
EXPORT_SYMBOL(drm_crtc_init_with_planes);
+static void drmm_crtc_alloc_with_planes_cleanup(struct drm_device *dev,
+ void *ptr)
+{
+ struct drm_crtc *crtc = ptr;
+
+ drm_crtc_cleanup(crtc);
+}
+
+void *__drmm_crtc_alloc_with_planes(struct drm_device *dev,
+ size_t size, size_t offset,
+ struct drm_plane *primary,
+ struct drm_plane *cursor,
+ const struct drm_crtc_funcs *funcs,
+ const char *name, ...)
+{
+ void *container;
+ struct drm_crtc *crtc;
+ va_list ap;
+ int ret;
+
+ if (WARN_ON(!funcs || funcs->destroy))
+ return ERR_PTR(-EINVAL);
+
+ container = drmm_kzalloc(dev, size, GFP_KERNEL);
+ if (!container)
+ return ERR_PTR(-ENOMEM);
+
+ crtc = container + offset;
+
+ va_start(ap, name);
+ ret = __drm_crtc_init_with_planes(dev, crtc, primary, cursor, funcs,
+ name, ap);
+ va_end(ap);
+ if (ret)
+ return ERR_PTR(ret);
+
+ ret = drmm_add_action_or_reset(dev, drmm_crtc_alloc_with_planes_cleanup,
+ crtc);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return container;
+}
+EXPORT_SYMBOL(__drmm_crtc_alloc_with_planes);
+
/**
* drm_crtc_cleanup - Clean up the core crtc usage
* @crtc: CRTC to cleanup
diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c
index 5bd0934004e3..eedbb48815b7 100644
--- a/drivers/gpu/drm/drm_dp_helper.c
+++ b/drivers/gpu/drm/drm_dp_helper.c
@@ -950,6 +950,38 @@ bool drm_dp_downstream_444_to_420_conversion(const u8 dpcd[DP_RECEIVER_CAP_SIZE]
EXPORT_SYMBOL(drm_dp_downstream_444_to_420_conversion);
/**
+ * drm_dp_downstream_rgb_to_ycbcr_conversion() - determine downstream facing port
+ * RGB->YCbCr conversion capability
+ * @dpcd: DisplayPort configuration data
+ * @port_cap: downstream facing port capabilities
+ * @color_spc: Colorspace for which conversion cap is sought
+ *
+ * Returns: whether the downstream facing port can convert RGB->YCbCr for a given
+ * colorspace.
+ */
+bool drm_dp_downstream_rgb_to_ycbcr_conversion(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
+ const u8 port_cap[4],
+ u8 color_spc)
+{
+ if (!drm_dp_is_branch(dpcd))
+ return false;
+
+ if (dpcd[DP_DPCD_REV] < 0x13)
+ return false;
+
+ switch (port_cap[0] & DP_DS_PORT_TYPE_MASK) {
+ case DP_DS_PORT_TYPE_HDMI:
+ if ((dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DETAILED_CAP_INFO_AVAILABLE) == 0)
+ return false;
+
+ return port_cap[3] & color_spc;
+ default:
+ return false;
+ }
+}
+EXPORT_SYMBOL(drm_dp_downstream_rgb_to_ycbcr_conversion);
+
+/**
* drm_dp_downstream_mode() - return a mode for downstream facing port
* @dev: DRM device
* @dpcd: DisplayPort configuration data
@@ -1204,7 +1236,7 @@ bool drm_dp_read_sink_count_cap(struct drm_connector *connector,
return connector->connector_type != DRM_MODE_CONNECTOR_eDP &&
dpcd[DP_DPCD_REV] >= DP_DPCD_REV_11 &&
dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT &&
- !drm_dp_has_quirk(desc, 0, DP_DPCD_QUIRK_NO_SINK_COUNT);
+ !drm_dp_has_quirk(desc, DP_DPCD_QUIRK_NO_SINK_COUNT);
}
EXPORT_SYMBOL(drm_dp_read_sink_count_cap);
@@ -1925,87 +1957,6 @@ drm_dp_get_quirks(const struct drm_dp_dpcd_ident *ident, bool is_branch)
#undef DEVICE_ID_ANY
#undef DEVICE_ID
-struct edid_quirk {
- u8 mfg_id[2];
- u8 prod_id[2];
- u32 quirks;
-};
-
-#define MFG(first, second) { (first), (second) }
-#define PROD_ID(first, second) { (first), (second) }
-
-/*
- * Some devices have unreliable OUIDs where they don't set the device ID
- * correctly, and as a result we need to use the EDID for finding additional
- * DP quirks in such cases.
- */
-static const struct edid_quirk edid_quirk_list[] = {
- /* Optional 4K AMOLED panel in the ThinkPad X1 Extreme 2nd Generation
- * only supports DPCD backlight controls
- */
- { MFG(0x4c, 0x83), PROD_ID(0x41, 0x41), BIT(DP_QUIRK_FORCE_DPCD_BACKLIGHT) },
- /*
- * Some Dell CML 2020 systems have panels support both AUX and PWM
- * backlight control, and some only support AUX backlight control. All
- * said panels start up in AUX mode by default, and we don't have any
- * support for disabling HDR mode on these panels which would be
- * required to switch to PWM backlight control mode (plus, I'm not
- * even sure we want PWM backlight controls over DPCD backlight
- * controls anyway...). Until we have a better way of detecting these,
- * force DPCD backlight mode on all of them.
- */
- { MFG(0x06, 0xaf), PROD_ID(0x9b, 0x32), BIT(DP_QUIRK_FORCE_DPCD_BACKLIGHT) },
- { MFG(0x06, 0xaf), PROD_ID(0xeb, 0x41), BIT(DP_QUIRK_FORCE_DPCD_BACKLIGHT) },
- { MFG(0x4d, 0x10), PROD_ID(0xc7, 0x14), BIT(DP_QUIRK_FORCE_DPCD_BACKLIGHT) },
- { MFG(0x4d, 0x10), PROD_ID(0xe6, 0x14), BIT(DP_QUIRK_FORCE_DPCD_BACKLIGHT) },
- { MFG(0x4c, 0x83), PROD_ID(0x47, 0x41), BIT(DP_QUIRK_FORCE_DPCD_BACKLIGHT) },
- { MFG(0x09, 0xe5), PROD_ID(0xde, 0x08), BIT(DP_QUIRK_FORCE_DPCD_BACKLIGHT) },
-};
-
-#undef MFG
-#undef PROD_ID
-
-/**
- * drm_dp_get_edid_quirks() - Check the EDID of a DP device to find additional
- * DP-specific quirks
- * @edid: The EDID to check
- *
- * While OUIDs are meant to be used to recognize a DisplayPort device, a lot
- * of manufacturers don't seem to like following standards and neglect to fill
- * the dev-ID in, making it impossible to only use OUIDs for determining
- * quirks in some cases. This function can be used to check the EDID and look
- * up any additional DP quirks. The bits returned by this function correspond
- * to the quirk bits in &drm_dp_quirk.
- *
- * Returns: a bitmask of quirks, if any. The driver can check this using
- * drm_dp_has_quirk().
- */
-u32 drm_dp_get_edid_quirks(const struct edid *edid)
-{
- const struct edid_quirk *quirk;
- u32 quirks = 0;
- int i;
-
- if (!edid)
- return 0;
-
- for (i = 0; i < ARRAY_SIZE(edid_quirk_list); i++) {
- quirk = &edid_quirk_list[i];
- if (memcmp(quirk->mfg_id, edid->mfg_id,
- sizeof(edid->mfg_id)) == 0 &&
- memcmp(quirk->prod_id, edid->prod_code,
- sizeof(edid->prod_code)) == 0)
- quirks |= quirk->quirks;
- }
-
- DRM_DEBUG_KMS("DP sink: EDID mfg %*phD prod-ID %*phD quirks: 0x%04x\n",
- (int)sizeof(edid->mfg_id), edid->mfg_id,
- (int)sizeof(edid->prod_code), edid->prod_code, quirks);
-
- return quirks;
-}
-EXPORT_SYMBOL(drm_dp_get_edid_quirks);
-
/**
* drm_dp_read_desc - read sink/branch descriptor from DPCD
* @aux: DisplayPort AUX channel
@@ -2596,3 +2547,538 @@ void drm_dp_vsc_sdp_log(const char *level, struct device *dev,
#undef DP_SDP_LOG
}
EXPORT_SYMBOL(drm_dp_vsc_sdp_log);
+
+/**
+ * drm_dp_get_pcon_max_frl_bw() - maximum frl supported by PCON
+ * @dpcd: DisplayPort configuration data
+ * @port_cap: port capabilities
+ *
+ * Returns maximum frl bandwidth supported by PCON in GBPS,
+ * returns 0 if not supported.
+ */
+int drm_dp_get_pcon_max_frl_bw(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
+ const u8 port_cap[4])
+{
+ int bw;
+ u8 buf;
+
+ buf = port_cap[2];
+ bw = buf & DP_PCON_MAX_FRL_BW;
+
+ switch (bw) {
+ case DP_PCON_MAX_9GBPS:
+ return 9;
+ case DP_PCON_MAX_18GBPS:
+ return 18;
+ case DP_PCON_MAX_24GBPS:
+ return 24;
+ case DP_PCON_MAX_32GBPS:
+ return 32;
+ case DP_PCON_MAX_40GBPS:
+ return 40;
+ case DP_PCON_MAX_48GBPS:
+ return 48;
+ case DP_PCON_MAX_0GBPS:
+ default:
+ return 0;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_dp_get_pcon_max_frl_bw);
+
+/**
+ * drm_dp_pcon_frl_prepare() - Prepare PCON for FRL.
+ * @aux: DisplayPort AUX channel
+ * @enable_frl_ready_hpd: Configure DP_PCON_ENABLE_HPD_READY.
+ *
+ * Returns 0 if success, else returns negative error code.
+ */
+int drm_dp_pcon_frl_prepare(struct drm_dp_aux *aux, bool enable_frl_ready_hpd)
+{
+ int ret;
+ u8 buf = DP_PCON_ENABLE_SOURCE_CTL_MODE |
+ DP_PCON_ENABLE_LINK_FRL_MODE;
+
+ if (enable_frl_ready_hpd)
+ buf |= DP_PCON_ENABLE_HPD_READY;
+
+ ret = drm_dp_dpcd_writeb(aux, DP_PCON_HDMI_LINK_CONFIG_1, buf);
+
+ return ret;
+}
+EXPORT_SYMBOL(drm_dp_pcon_frl_prepare);
+
+/**
+ * drm_dp_pcon_is_frl_ready() - Is PCON ready for FRL
+ * @aux: DisplayPort AUX channel
+ *
+ * Returns true if success, else returns false.
+ */
+bool drm_dp_pcon_is_frl_ready(struct drm_dp_aux *aux)
+{
+ int ret;
+ u8 buf;
+
+ ret = drm_dp_dpcd_readb(aux, DP_PCON_HDMI_TX_LINK_STATUS, &buf);
+ if (ret < 0)
+ return false;
+
+ if (buf & DP_PCON_FRL_READY)
+ return true;
+
+ return false;
+}
+EXPORT_SYMBOL(drm_dp_pcon_is_frl_ready);
+
+/**
+ * drm_dp_pcon_frl_configure_1() - Set HDMI LINK Configuration-Step1
+ * @aux: DisplayPort AUX channel
+ * @max_frl_gbps: maximum frl bw to be configured between PCON and HDMI sink
+ * @concurrent_mode: true if concurrent mode or operation is required,
+ * false otherwise.
+ *
+ * Returns 0 if success, else returns negative error code.
+ */
+
+int drm_dp_pcon_frl_configure_1(struct drm_dp_aux *aux, int max_frl_gbps,
+ bool concurrent_mode)
+{
+ int ret;
+ u8 buf;
+
+ ret = drm_dp_dpcd_readb(aux, DP_PCON_HDMI_LINK_CONFIG_1, &buf);
+ if (ret < 0)
+ return ret;
+
+ if (concurrent_mode)
+ buf |= DP_PCON_ENABLE_CONCURRENT_LINK;
+ else
+ buf &= ~DP_PCON_ENABLE_CONCURRENT_LINK;
+
+ switch (max_frl_gbps) {
+ case 9:
+ buf |= DP_PCON_ENABLE_MAX_BW_9GBPS;
+ break;
+ case 18:
+ buf |= DP_PCON_ENABLE_MAX_BW_18GBPS;
+ break;
+ case 24:
+ buf |= DP_PCON_ENABLE_MAX_BW_24GBPS;
+ break;
+ case 32:
+ buf |= DP_PCON_ENABLE_MAX_BW_32GBPS;
+ break;
+ case 40:
+ buf |= DP_PCON_ENABLE_MAX_BW_40GBPS;
+ break;
+ case 48:
+ buf |= DP_PCON_ENABLE_MAX_BW_48GBPS;
+ break;
+ case 0:
+ buf |= DP_PCON_ENABLE_MAX_BW_0GBPS;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = drm_dp_dpcd_writeb(aux, DP_PCON_HDMI_LINK_CONFIG_1, buf);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_dp_pcon_frl_configure_1);
+
+/**
+ * drm_dp_pcon_frl_configure_2() - Set HDMI Link configuration Step-2
+ * @aux: DisplayPort AUX channel
+ * @max_frl_mask : Max FRL BW to be tried by the PCON with HDMI Sink
+ * @extended_train_mode : true for Extended Mode, false for Normal Mode.
+ * In Normal mode, the PCON tries each frl bw from the max_frl_mask starting
+ * from min, and stops when link training is successful. In Extended mode, all
+ * frl bw selected in the mask are trained by the PCON.
+ *
+ * Returns 0 if success, else returns negative error code.
+ */
+int drm_dp_pcon_frl_configure_2(struct drm_dp_aux *aux, int max_frl_mask,
+ bool extended_train_mode)
+{
+ int ret;
+ u8 buf = max_frl_mask;
+
+ if (extended_train_mode)
+ buf |= DP_PCON_FRL_LINK_TRAIN_EXTENDED;
+
+ ret = drm_dp_dpcd_writeb(aux, DP_PCON_HDMI_LINK_CONFIG_2, buf);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_dp_pcon_frl_configure_2);
+
+/**
+ * drm_dp_pcon_reset_frl_config() - Re-Set HDMI Link configuration.
+ * @aux: DisplayPort AUX channel
+ *
+ * Returns 0 if success, else returns negative error code.
+ */
+int drm_dp_pcon_reset_frl_config(struct drm_dp_aux *aux)
+{
+ int ret;
+
+ ret = drm_dp_dpcd_writeb(aux, DP_PCON_HDMI_LINK_CONFIG_1, 0x0);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_dp_pcon_reset_frl_config);
+
+/**
+ * drm_dp_pcon_frl_enable() - Enable HDMI link through FRL
+ * @aux: DisplayPort AUX channel
+ *
+ * Returns 0 if success, else returns negative error code.
+ */
+int drm_dp_pcon_frl_enable(struct drm_dp_aux *aux)
+{
+ int ret;
+ u8 buf = 0;
+
+ ret = drm_dp_dpcd_readb(aux, DP_PCON_HDMI_LINK_CONFIG_1, &buf);
+ if (ret < 0)
+ return ret;
+ if (!(buf & DP_PCON_ENABLE_SOURCE_CTL_MODE)) {
+ DRM_DEBUG_KMS("PCON in Autonomous mode, can't enable FRL\n");
+ return -EINVAL;
+ }
+ buf |= DP_PCON_ENABLE_HDMI_LINK;
+ ret = drm_dp_dpcd_writeb(aux, DP_PCON_HDMI_LINK_CONFIG_1, buf);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_dp_pcon_frl_enable);
+
+/**
+ * drm_dp_pcon_hdmi_link_active() - check if the PCON HDMI LINK status is active.
+ * @aux: DisplayPort AUX channel
+ *
+ * Returns true if link is active else returns false.
+ */
+bool drm_dp_pcon_hdmi_link_active(struct drm_dp_aux *aux)
+{
+ u8 buf;
+ int ret;
+
+ ret = drm_dp_dpcd_readb(aux, DP_PCON_HDMI_TX_LINK_STATUS, &buf);
+ if (ret < 0)
+ return false;
+
+ return buf & DP_PCON_HDMI_TX_LINK_ACTIVE;
+}
+EXPORT_SYMBOL(drm_dp_pcon_hdmi_link_active);
+
+/**
+ * drm_dp_pcon_hdmi_link_mode() - get the PCON HDMI LINK MODE
+ * @aux: DisplayPort AUX channel
+ * @frl_trained_mask: pointer to store bitmask of the trained bw configuration.
+ * Valid only if the MODE returned is FRL. For Normal Link training mode
+ * only 1 of the bits will be set, but in case of Extended mode, more than
+ * one bits can be set.
+ *
+ * Returns the link mode : TMDS or FRL on success, else returns negative error
+ * code.
+ */
+int drm_dp_pcon_hdmi_link_mode(struct drm_dp_aux *aux, u8 *frl_trained_mask)
+{
+ u8 buf;
+ int mode;
+ int ret;
+
+ ret = drm_dp_dpcd_readb(aux, DP_PCON_HDMI_POST_FRL_STATUS, &buf);
+ if (ret < 0)
+ return ret;
+
+ mode = buf & DP_PCON_HDMI_LINK_MODE;
+
+ if (frl_trained_mask && DP_PCON_HDMI_MODE_FRL == mode)
+ *frl_trained_mask = (buf & DP_PCON_HDMI_FRL_TRAINED_BW) >> 1;
+
+ return mode;
+}
+EXPORT_SYMBOL(drm_dp_pcon_hdmi_link_mode);
+
+/**
+ * drm_dp_pcon_hdmi_frl_link_error_count() - print the error count per lane
+ * during link failure between PCON and HDMI sink
+ * @aux: DisplayPort AUX channel
+ * @connector: DRM connector
+ * code.
+ **/
+
+void drm_dp_pcon_hdmi_frl_link_error_count(struct drm_dp_aux *aux,
+ struct drm_connector *connector)
+{
+ u8 buf, error_count;
+ int i, num_error;
+ struct drm_hdmi_info *hdmi = &connector->display_info.hdmi;
+
+ for (i = 0; i < hdmi->max_lanes; i++) {
+ if (drm_dp_dpcd_readb(aux, DP_PCON_HDMI_ERROR_STATUS_LN0 + i, &buf) < 0)
+ return;
+
+ error_count = buf & DP_PCON_HDMI_ERROR_COUNT_MASK;
+ switch (error_count) {
+ case DP_PCON_HDMI_ERROR_COUNT_HUNDRED_PLUS:
+ num_error = 100;
+ break;
+ case DP_PCON_HDMI_ERROR_COUNT_TEN_PLUS:
+ num_error = 10;
+ break;
+ case DP_PCON_HDMI_ERROR_COUNT_THREE_PLUS:
+ num_error = 3;
+ break;
+ default:
+ num_error = 0;
+ }
+
+ DRM_ERROR("More than %d errors since the last read for lane %d", num_error, i);
+ }
+}
+EXPORT_SYMBOL(drm_dp_pcon_hdmi_frl_link_error_count);
+
+/*
+ * drm_dp_pcon_enc_is_dsc_1_2 - Does PCON Encoder supports DSC 1.2
+ * @pcon_dsc_dpcd: DSC capabilities of the PCON DSC Encoder
+ *
+ * Returns true is PCON encoder is DSC 1.2 else returns false.
+ */
+bool drm_dp_pcon_enc_is_dsc_1_2(const u8 pcon_dsc_dpcd[DP_PCON_DSC_ENCODER_CAP_SIZE])
+{
+ u8 buf;
+ u8 major_v, minor_v;
+
+ buf = pcon_dsc_dpcd[DP_PCON_DSC_VERSION - DP_PCON_DSC_ENCODER];
+ major_v = (buf & DP_PCON_DSC_MAJOR_MASK) >> DP_PCON_DSC_MAJOR_SHIFT;
+ minor_v = (buf & DP_PCON_DSC_MINOR_MASK) >> DP_PCON_DSC_MINOR_SHIFT;
+
+ if (major_v == 1 && minor_v == 2)
+ return true;
+
+ return false;
+}
+EXPORT_SYMBOL(drm_dp_pcon_enc_is_dsc_1_2);
+
+/*
+ * drm_dp_pcon_dsc_max_slices - Get max slices supported by PCON DSC Encoder
+ * @pcon_dsc_dpcd: DSC capabilities of the PCON DSC Encoder
+ *
+ * Returns maximum no. of slices supported by the PCON DSC Encoder.
+ */
+int drm_dp_pcon_dsc_max_slices(const u8 pcon_dsc_dpcd[DP_PCON_DSC_ENCODER_CAP_SIZE])
+{
+ u8 slice_cap1, slice_cap2;
+
+ slice_cap1 = pcon_dsc_dpcd[DP_PCON_DSC_SLICE_CAP_1 - DP_PCON_DSC_ENCODER];
+ slice_cap2 = pcon_dsc_dpcd[DP_PCON_DSC_SLICE_CAP_2 - DP_PCON_DSC_ENCODER];
+
+ if (slice_cap2 & DP_PCON_DSC_24_PER_DSC_ENC)
+ return 24;
+ if (slice_cap2 & DP_PCON_DSC_20_PER_DSC_ENC)
+ return 20;
+ if (slice_cap2 & DP_PCON_DSC_16_PER_DSC_ENC)
+ return 16;
+ if (slice_cap1 & DP_PCON_DSC_12_PER_DSC_ENC)
+ return 12;
+ if (slice_cap1 & DP_PCON_DSC_10_PER_DSC_ENC)
+ return 10;
+ if (slice_cap1 & DP_PCON_DSC_8_PER_DSC_ENC)
+ return 8;
+ if (slice_cap1 & DP_PCON_DSC_6_PER_DSC_ENC)
+ return 6;
+ if (slice_cap1 & DP_PCON_DSC_4_PER_DSC_ENC)
+ return 4;
+ if (slice_cap1 & DP_PCON_DSC_2_PER_DSC_ENC)
+ return 2;
+ if (slice_cap1 & DP_PCON_DSC_1_PER_DSC_ENC)
+ return 1;
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_dp_pcon_dsc_max_slices);
+
+/*
+ * drm_dp_pcon_dsc_max_slice_width() - Get max slice width for Pcon DSC encoder
+ * @pcon_dsc_dpcd: DSC capabilities of the PCON DSC Encoder
+ *
+ * Returns maximum width of the slices in pixel width i.e. no. of pixels x 320.
+ */
+int drm_dp_pcon_dsc_max_slice_width(const u8 pcon_dsc_dpcd[DP_PCON_DSC_ENCODER_CAP_SIZE])
+{
+ u8 buf;
+
+ buf = pcon_dsc_dpcd[DP_PCON_DSC_MAX_SLICE_WIDTH - DP_PCON_DSC_ENCODER];
+
+ return buf * DP_DSC_SLICE_WIDTH_MULTIPLIER;
+}
+EXPORT_SYMBOL(drm_dp_pcon_dsc_max_slice_width);
+
+/*
+ * drm_dp_pcon_dsc_bpp_incr() - Get bits per pixel increment for PCON DSC encoder
+ * @pcon_dsc_dpcd: DSC capabilities of the PCON DSC Encoder
+ *
+ * Returns the bpp precision supported by the PCON encoder.
+ */
+int drm_dp_pcon_dsc_bpp_incr(const u8 pcon_dsc_dpcd[DP_PCON_DSC_ENCODER_CAP_SIZE])
+{
+ u8 buf;
+
+ buf = pcon_dsc_dpcd[DP_PCON_DSC_BPP_INCR - DP_PCON_DSC_ENCODER];
+
+ switch (buf & DP_PCON_DSC_BPP_INCR_MASK) {
+ case DP_PCON_DSC_ONE_16TH_BPP:
+ return 16;
+ case DP_PCON_DSC_ONE_8TH_BPP:
+ return 8;
+ case DP_PCON_DSC_ONE_4TH_BPP:
+ return 4;
+ case DP_PCON_DSC_ONE_HALF_BPP:
+ return 2;
+ case DP_PCON_DSC_ONE_BPP:
+ return 1;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_dp_pcon_dsc_bpp_incr);
+
+static
+int drm_dp_pcon_configure_dsc_enc(struct drm_dp_aux *aux, u8 pps_buf_config)
+{
+ u8 buf;
+ int ret;
+
+ ret = drm_dp_dpcd_readb(aux, DP_PROTOCOL_CONVERTER_CONTROL_2, &buf);
+ if (ret < 0)
+ return ret;
+
+ buf |= DP_PCON_ENABLE_DSC_ENCODER;
+
+ if (pps_buf_config <= DP_PCON_ENC_PPS_OVERRIDE_EN_BUFFER) {
+ buf &= ~DP_PCON_ENCODER_PPS_OVERRIDE_MASK;
+ buf |= pps_buf_config << 2;
+ }
+
+ ret = drm_dp_dpcd_writeb(aux, DP_PROTOCOL_CONVERTER_CONTROL_2, buf);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+/**
+ * drm_dp_pcon_pps_default() - Let PCON fill the default pps parameters
+ * for DSC1.2 between PCON & HDMI2.1 sink
+ * @aux: DisplayPort AUX channel
+ *
+ * Returns 0 on success, else returns negative error code.
+ */
+int drm_dp_pcon_pps_default(struct drm_dp_aux *aux)
+{
+ int ret;
+
+ ret = drm_dp_pcon_configure_dsc_enc(aux, DP_PCON_ENC_PPS_OVERRIDE_DISABLED);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_dp_pcon_pps_default);
+
+/**
+ * drm_dp_pcon_pps_override_buf() - Configure PPS encoder override buffer for
+ * HDMI sink
+ * @aux: DisplayPort AUX channel
+ * @pps_buf: 128 bytes to be written into PPS buffer for HDMI sink by PCON.
+ *
+ * Returns 0 on success, else returns negative error code.
+ */
+int drm_dp_pcon_pps_override_buf(struct drm_dp_aux *aux, u8 pps_buf[128])
+{
+ int ret;
+
+ ret = drm_dp_dpcd_write(aux, DP_PCON_HDMI_PPS_OVERRIDE_BASE, &pps_buf, 128);
+ if (ret < 0)
+ return ret;
+
+ ret = drm_dp_pcon_configure_dsc_enc(aux, DP_PCON_ENC_PPS_OVERRIDE_EN_BUFFER);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_dp_pcon_pps_override_buf);
+
+/*
+ * drm_dp_pcon_pps_override_param() - Write PPS parameters to DSC encoder
+ * override registers
+ * @aux: DisplayPort AUX channel
+ * @pps_param: 3 Parameters (2 Bytes each) : Slice Width, Slice Height,
+ * bits_per_pixel.
+ *
+ * Returns 0 on success, else returns negative error code.
+ */
+int drm_dp_pcon_pps_override_param(struct drm_dp_aux *aux, u8 pps_param[6])
+{
+ int ret;
+
+ ret = drm_dp_dpcd_write(aux, DP_PCON_HDMI_PPS_OVRD_SLICE_HEIGHT, &pps_param[0], 2);
+ if (ret < 0)
+ return ret;
+ ret = drm_dp_dpcd_write(aux, DP_PCON_HDMI_PPS_OVRD_SLICE_WIDTH, &pps_param[2], 2);
+ if (ret < 0)
+ return ret;
+ ret = drm_dp_dpcd_write(aux, DP_PCON_HDMI_PPS_OVRD_BPP, &pps_param[4], 2);
+ if (ret < 0)
+ return ret;
+
+ ret = drm_dp_pcon_configure_dsc_enc(aux, DP_PCON_ENC_PPS_OVERRIDE_EN_BUFFER);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_dp_pcon_pps_override_param);
+
+/*
+ * drm_dp_pcon_convert_rgb_to_ycbcr() - Configure the PCon to convert RGB to Ycbcr
+ * @aux: displayPort AUX channel
+ * @color_spc: Color-space/s for which conversion is to be enabled, 0 for disable.
+ *
+ * Returns 0 on success, else returns negative error code.
+ */
+int drm_dp_pcon_convert_rgb_to_ycbcr(struct drm_dp_aux *aux, u8 color_spc)
+{
+ int ret;
+ u8 buf;
+
+ ret = drm_dp_dpcd_readb(aux, DP_PROTOCOL_CONVERTER_CONTROL_2, &buf);
+ if (ret < 0)
+ return ret;
+
+ if (color_spc & DP_CONVERSION_RGB_YCBCR_MASK)
+ buf |= (color_spc & DP_CONVERSION_RGB_YCBCR_MASK);
+ else
+ buf &= ~DP_CONVERSION_RGB_YCBCR_MASK;
+
+ ret = drm_dp_dpcd_writeb(aux, DP_PROTOCOL_CONVERTER_CONTROL_2, buf);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_dp_pcon_convert_rgb_to_ycbcr);
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index 0401b2f47500..e82b596d646c 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -2751,7 +2751,7 @@ static void drm_dp_mst_link_probe_work(struct work_struct *work)
drm_dp_mst_topology_put_mstb(mstb);
mutex_unlock(&mgr->probe_lock);
- if (ret)
+ if (ret > 0)
drm_kms_helper_hotplug_event(dev);
}
@@ -3629,14 +3629,26 @@ static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr *mgr,
return 0;
}
-static int drm_dp_get_vc_payload_bw(u8 dp_link_bw, u8 dp_link_count)
+/**
+ * drm_dp_get_vc_payload_bw - get the VC payload BW for an MST link
+ * @link_rate: link rate in 10kbits/s units
+ * @link_lane_count: lane count
+ *
+ * Calculate the total bandwidth of a MultiStream Transport link. The returned
+ * value is in units of PBNs/(timeslots/1 MTP). This value can be used to
+ * convert the number of PBNs required for a given stream to the number of
+ * timeslots this stream requires in each MTP.
+ */
+int drm_dp_get_vc_payload_bw(int link_rate, int link_lane_count)
{
- if (dp_link_bw == 0 || dp_link_count == 0)
- DRM_DEBUG_KMS("invalid link bandwidth in DPCD: %x (link count: %d)\n",
- dp_link_bw, dp_link_count);
+ if (link_rate == 0 || link_lane_count == 0)
+ DRM_DEBUG_KMS("invalid link rate/lane count: (%d / %d)\n",
+ link_rate, link_lane_count);
- return dp_link_bw * dp_link_count / 2;
+ /* See DP v2.0 2.6.4.2, VCPayload_Bandwidth_for_OneTimeSlotPer_MTP_Allocation */
+ return link_rate * link_lane_count / 54000;
}
+EXPORT_SYMBOL(drm_dp_get_vc_payload_bw);
/**
* drm_dp_read_mst_cap() - check whether or not a sink supports MST
@@ -3692,7 +3704,7 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
goto out_unlock;
}
- mgr->pbn_div = drm_dp_get_vc_payload_bw(mgr->dpcd[1],
+ mgr->pbn_div = drm_dp_get_vc_payload_bw(drm_dp_bw_code_to_link_rate(mgr->dpcd[1]),
mgr->dpcd[2] & DP_MAX_LANE_COUNT_MASK);
if (mgr->pbn_div == 0) {
ret = -EINVAL;
@@ -5824,8 +5836,7 @@ struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port)
if (drm_dp_read_desc(port->mgr->aux, &desc, true))
return NULL;
- if (drm_dp_has_quirk(&desc, 0,
- DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD) &&
+ if (drm_dp_has_quirk(&desc, DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD) &&
port->mgr->dpcd[DP_DPCD_REV] >= DP_DPCD_REV_14 &&
port->parent == port->mgr->mst_primary) {
u8 downstreamport;
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 734303802bc3..20d22e41d7ce 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -469,6 +469,9 @@ void drm_dev_unplug(struct drm_device *dev)
synchronize_srcu(&drm_unplug_srcu);
drm_dev_unregister(dev);
+
+ /* Clear all CPU mappings pointing to this device */
+ unmap_mapping_range(dev->anon_inode->i_mapping, 0, 0, 1);
}
EXPORT_SYMBOL(drm_dev_unplug);
@@ -589,11 +592,7 @@ static int drm_dev_init(struct drm_device *dev,
kref_init(&dev->ref);
dev->dev = get_device(parent);
-#ifdef CONFIG_DRM_LEGACY
- dev->driver = (struct drm_driver *)driver;
-#else
dev->driver = driver;
-#endif
INIT_LIST_HEAD(&dev->managed.resources);
spin_lock_init(&dev->managed.lock);
@@ -675,11 +674,8 @@ static int devm_drm_dev_init(struct device *parent,
if (ret)
return ret;
- ret = devm_add_action(parent, devm_drm_dev_init_release, dev);
- if (ret)
- devm_drm_dev_init_release(dev);
-
- return ret;
+ return devm_add_action_or_reset(parent,
+ devm_drm_dev_init_release, dev);
}
void *__devm_drm_dev_alloc(struct device *parent,
@@ -897,8 +893,6 @@ int drm_dev_register(struct drm_device *dev, unsigned long flags)
if (drm_core_check_feature(dev, DRIVER_MODESET))
drm_modeset_register_all(dev);
- ret = 0;
-
DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n",
driver->name, driver->major, driver->minor,
driver->patchlevel, driver->date,
diff --git a/drivers/gpu/drm/drm_dsc.c b/drivers/gpu/drm/drm_dsc.c
index 4a475d9696ff..ff602f7ec65b 100644
--- a/drivers/gpu/drm/drm_dsc.c
+++ b/drivers/gpu/drm/drm_dsc.c
@@ -50,6 +50,33 @@ void drm_dsc_dp_pps_header_init(struct dp_sdp_header *pps_header)
EXPORT_SYMBOL(drm_dsc_dp_pps_header_init);
/**
+ * drm_dsc_dp_rc_buffer_size - get rc buffer size in bytes
+ * @rc_buffer_block_size: block size code, according to DPCD offset 62h
+ * @rc_buffer_size: number of blocks - 1, according to DPCD offset 63h
+ *
+ * return:
+ * buffer size in bytes, or 0 on invalid input
+ */
+int drm_dsc_dp_rc_buffer_size(u8 rc_buffer_block_size, u8 rc_buffer_size)
+{
+ int size = 1024 * (rc_buffer_size + 1);
+
+ switch (rc_buffer_block_size) {
+ case DP_DSC_RC_BUF_BLK_SIZE_1:
+ return 1 * size;
+ case DP_DSC_RC_BUF_BLK_SIZE_4:
+ return 4 * size;
+ case DP_DSC_RC_BUF_BLK_SIZE_16:
+ return 16 * size;
+ case DP_DSC_RC_BUF_BLK_SIZE_64:
+ return 64 * size;
+ default:
+ return 0;
+ }
+}
+EXPORT_SYMBOL(drm_dsc_dp_rc_buffer_size);
+
+/**
* drm_dsc_pps_payload_pack() - Populates the DSC PPS
*
* @pps_payload:
@@ -186,8 +213,7 @@ void drm_dsc_pps_payload_pack(struct drm_dsc_picture_parameter_set *pps_payload,
pps_payload->flatness_max_qp = dsc_cfg->flatness_max_qp;
/* PPS 38, 39 */
- pps_payload->rc_model_size =
- cpu_to_be16(DSC_RC_MODEL_SIZE_CONST);
+ pps_payload->rc_model_size = cpu_to_be16(dsc_cfg->rc_model_size);
/* PPS 40 */
pps_payload->rc_edge_factor = DSC_RC_EDGE_FACTOR_CONST;
diff --git a/drivers/gpu/drm/drm_dumb_buffers.c b/drivers/gpu/drm/drm_dumb_buffers.c
index d18a740fe0f1..ad17fa21cebb 100644
--- a/drivers/gpu/drm/drm_dumb_buffers.c
+++ b/drivers/gpu/drm/drm_dumb_buffers.c
@@ -29,6 +29,7 @@
#include <drm/drm_mode.h>
#include "drm_crtc_internal.h"
+#include "drm_internal.h"
/**
* DOC: overview
@@ -46,9 +47,10 @@
* KMS frame buffers.
*
* To support dumb objects drivers must implement the &drm_driver.dumb_create
- * operation. &drm_driver.dumb_destroy defaults to drm_gem_dumb_destroy() if
- * not set and &drm_driver.dumb_map_offset defaults to
- * drm_gem_dumb_map_offset(). See the callbacks for further details.
+ * and &drm_driver.dumb_map_offset operations (the latter defaults to
+ * drm_gem_dumb_map_offset() if not set). Drivers that don't use GEM handles
+ * additionally need to implement the &drm_driver.dumb_destroy operation. See
+ * the callbacks for further details.
*
* Note that dumb objects may not be used for gpu acceleration, as has been
* attempted on some ARM embedded platforms. Such drivers really must have
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index e95cce8e736d..c2bbe7bee7b6 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -32,6 +32,7 @@
#include <linux/i2c.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/vga_switcheroo.h>
@@ -2075,9 +2076,13 @@ EXPORT_SYMBOL(drm_get_edid);
struct edid *drm_get_edid_switcheroo(struct drm_connector *connector,
struct i2c_adapter *adapter)
{
- struct pci_dev *pdev = connector->dev->pdev;
+ struct drm_device *dev = connector->dev;
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
struct edid *edid;
+ if (drm_WARN_ON_ONCE(dev, !dev_is_pci(dev->dev)))
+ return NULL;
+
vga_switcheroo_lock_ddc(pdev);
edid = drm_get_edid(connector, adapter);
vga_switcheroo_unlock_ddc(pdev);
@@ -4851,6 +4856,41 @@ static void drm_parse_vcdb(struct drm_connector *connector, const u8 *db)
info->rgb_quant_range_selectable = true;
}
+static
+void drm_get_max_frl_rate(int max_frl_rate, u8 *max_lanes, u8 *max_rate_per_lane)
+{
+ switch (max_frl_rate) {
+ case 1:
+ *max_lanes = 3;
+ *max_rate_per_lane = 3;
+ break;
+ case 2:
+ *max_lanes = 3;
+ *max_rate_per_lane = 6;
+ break;
+ case 3:
+ *max_lanes = 4;
+ *max_rate_per_lane = 6;
+ break;
+ case 4:
+ *max_lanes = 4;
+ *max_rate_per_lane = 8;
+ break;
+ case 5:
+ *max_lanes = 4;
+ *max_rate_per_lane = 10;
+ break;
+ case 6:
+ *max_lanes = 4;
+ *max_rate_per_lane = 12;
+ break;
+ case 0:
+ default:
+ *max_lanes = 0;
+ *max_rate_per_lane = 0;
+ }
+}
+
static void drm_parse_ycbcr420_deep_color_info(struct drm_connector *connector,
const u8 *db)
{
@@ -4904,6 +4944,74 @@ static void drm_parse_hdmi_forum_vsdb(struct drm_connector *connector,
}
}
+ if (hf_vsdb[7]) {
+ u8 max_frl_rate;
+ u8 dsc_max_frl_rate;
+ u8 dsc_max_slices;
+ struct drm_hdmi_dsc_cap *hdmi_dsc = &hdmi->dsc_cap;
+
+ DRM_DEBUG_KMS("hdmi_21 sink detected. parsing edid\n");
+ max_frl_rate = (hf_vsdb[7] & DRM_EDID_MAX_FRL_RATE_MASK) >> 4;
+ drm_get_max_frl_rate(max_frl_rate, &hdmi->max_lanes,
+ &hdmi->max_frl_rate_per_lane);
+ hdmi_dsc->v_1p2 = hf_vsdb[11] & DRM_EDID_DSC_1P2;
+
+ if (hdmi_dsc->v_1p2) {
+ hdmi_dsc->native_420 = hf_vsdb[11] & DRM_EDID_DSC_NATIVE_420;
+ hdmi_dsc->all_bpp = hf_vsdb[11] & DRM_EDID_DSC_ALL_BPP;
+
+ if (hf_vsdb[11] & DRM_EDID_DSC_16BPC)
+ hdmi_dsc->bpc_supported = 16;
+ else if (hf_vsdb[11] & DRM_EDID_DSC_12BPC)
+ hdmi_dsc->bpc_supported = 12;
+ else if (hf_vsdb[11] & DRM_EDID_DSC_10BPC)
+ hdmi_dsc->bpc_supported = 10;
+ else
+ hdmi_dsc->bpc_supported = 0;
+
+ dsc_max_frl_rate = (hf_vsdb[12] & DRM_EDID_DSC_MAX_FRL_RATE_MASK) >> 4;
+ drm_get_max_frl_rate(dsc_max_frl_rate, &hdmi_dsc->max_lanes,
+ &hdmi_dsc->max_frl_rate_per_lane);
+ hdmi_dsc->total_chunk_kbytes = hf_vsdb[13] & DRM_EDID_DSC_TOTAL_CHUNK_KBYTES;
+
+ dsc_max_slices = hf_vsdb[12] & DRM_EDID_DSC_MAX_SLICES;
+ switch (dsc_max_slices) {
+ case 1:
+ hdmi_dsc->max_slices = 1;
+ hdmi_dsc->clk_per_slice = 340;
+ break;
+ case 2:
+ hdmi_dsc->max_slices = 2;
+ hdmi_dsc->clk_per_slice = 340;
+ break;
+ case 3:
+ hdmi_dsc->max_slices = 4;
+ hdmi_dsc->clk_per_slice = 340;
+ break;
+ case 4:
+ hdmi_dsc->max_slices = 8;
+ hdmi_dsc->clk_per_slice = 340;
+ break;
+ case 5:
+ hdmi_dsc->max_slices = 8;
+ hdmi_dsc->clk_per_slice = 400;
+ break;
+ case 6:
+ hdmi_dsc->max_slices = 12;
+ hdmi_dsc->clk_per_slice = 400;
+ break;
+ case 7:
+ hdmi_dsc->max_slices = 16;
+ hdmi_dsc->clk_per_slice = 400;
+ break;
+ case 0:
+ default:
+ hdmi_dsc->max_slices = 0;
+ hdmi_dsc->clk_per_slice = 0;
+ }
+ }
+ }
+
drm_parse_ycbcr420_deep_color_info(connector, hf_vsdb);
}
diff --git a/drivers/gpu/drm/drm_encoder.c b/drivers/gpu/drm/drm_encoder.c
index e555281f43d4..72e982323a5e 100644
--- a/drivers/gpu/drm/drm_encoder.c
+++ b/drivers/gpu/drm/drm_encoder.c
@@ -26,6 +26,7 @@
#include <drm/drm_device.h>
#include <drm/drm_drv.h>
#include <drm/drm_encoder.h>
+#include <drm/drm_managed.h>
#include "drm_crtc_internal.h"
@@ -72,7 +73,7 @@ int drm_encoder_register_all(struct drm_device *dev)
int ret = 0;
drm_for_each_encoder(encoder, dev) {
- if (encoder->funcs->late_register)
+ if (encoder->funcs && encoder->funcs->late_register)
ret = encoder->funcs->late_register(encoder);
if (ret)
return ret;
@@ -86,30 +87,16 @@ void drm_encoder_unregister_all(struct drm_device *dev)
struct drm_encoder *encoder;
drm_for_each_encoder(encoder, dev) {
- if (encoder->funcs->early_unregister)
+ if (encoder->funcs && encoder->funcs->early_unregister)
encoder->funcs->early_unregister(encoder);
}
}
-/**
- * drm_encoder_init - Init a preallocated encoder
- * @dev: drm device
- * @encoder: the encoder to init
- * @funcs: callbacks for this encoder
- * @encoder_type: user visible type of the encoder
- * @name: printf style format string for the encoder name, or NULL for default name
- *
- * Initialises a preallocated encoder. Encoder should be subclassed as part of
- * driver encoder objects. At driver unload time drm_encoder_cleanup() should be
- * called from the driver's &drm_encoder_funcs.destroy hook.
- *
- * Returns:
- * Zero on success, error code on failure.
- */
-int drm_encoder_init(struct drm_device *dev,
- struct drm_encoder *encoder,
- const struct drm_encoder_funcs *funcs,
- int encoder_type, const char *name, ...)
+__printf(5, 0)
+static int __drm_encoder_init(struct drm_device *dev,
+ struct drm_encoder *encoder,
+ const struct drm_encoder_funcs *funcs,
+ int encoder_type, const char *name, va_list ap)
{
int ret;
@@ -125,11 +112,7 @@ int drm_encoder_init(struct drm_device *dev,
encoder->encoder_type = encoder_type;
encoder->funcs = funcs;
if (name) {
- va_list ap;
-
- va_start(ap, name);
encoder->name = kvasprintf(GFP_KERNEL, name, ap);
- va_end(ap);
} else {
encoder->name = kasprintf(GFP_KERNEL, "%s-%d",
drm_encoder_enum_list[encoder_type].name,
@@ -150,6 +133,44 @@ out_put:
return ret;
}
+
+/**
+ * drm_encoder_init - Init a preallocated encoder
+ * @dev: drm device
+ * @encoder: the encoder to init
+ * @funcs: callbacks for this encoder
+ * @encoder_type: user visible type of the encoder
+ * @name: printf style format string for the encoder name, or NULL for default name
+ *
+ * Initializes a preallocated encoder. Encoder should be subclassed as part of
+ * driver encoder objects. At driver unload time the driver's
+ * &drm_encoder_funcs.destroy hook should call drm_encoder_cleanup() and kfree()
+ * the encoder structure. The encoder structure should not be allocated with
+ * devm_kzalloc().
+ *
+ * Note: consider using drmm_encoder_alloc() instead of drm_encoder_init() to
+ * let the DRM managed resource infrastructure take care of cleanup and
+ * deallocation.
+ *
+ * Returns:
+ * Zero on success, error code on failure.
+ */
+int drm_encoder_init(struct drm_device *dev,
+ struct drm_encoder *encoder,
+ const struct drm_encoder_funcs *funcs,
+ int encoder_type, const char *name, ...)
+{
+ va_list ap;
+ int ret;
+
+ WARN_ON(!funcs->destroy);
+
+ va_start(ap, name);
+ ret = __drm_encoder_init(dev, encoder, funcs, encoder_type, name, ap);
+ va_end(ap);
+
+ return ret;
+}
EXPORT_SYMBOL(drm_encoder_init);
/**
@@ -181,6 +202,48 @@ void drm_encoder_cleanup(struct drm_encoder *encoder)
}
EXPORT_SYMBOL(drm_encoder_cleanup);
+static void drmm_encoder_alloc_release(struct drm_device *dev, void *ptr)
+{
+ struct drm_encoder *encoder = ptr;
+
+ if (WARN_ON(!encoder->dev))
+ return;
+
+ drm_encoder_cleanup(encoder);
+}
+
+void *__drmm_encoder_alloc(struct drm_device *dev, size_t size, size_t offset,
+ const struct drm_encoder_funcs *funcs,
+ int encoder_type, const char *name, ...)
+{
+ void *container;
+ struct drm_encoder *encoder;
+ va_list ap;
+ int ret;
+
+ if (WARN_ON(funcs && funcs->destroy))
+ return ERR_PTR(-EINVAL);
+
+ container = drmm_kzalloc(dev, size, GFP_KERNEL);
+ if (!container)
+ return ERR_PTR(-EINVAL);
+
+ encoder = container + offset;
+
+ va_start(ap, name);
+ ret = __drm_encoder_init(dev, encoder, funcs, encoder_type, name, ap);
+ va_end(ap);
+ if (ret)
+ return ERR_PTR(ret);
+
+ ret = drmm_add_action_or_reset(dev, drmm_encoder_alloc_release, encoder);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return container;
+}
+EXPORT_SYMBOL(__drmm_encoder_alloc);
+
static struct drm_crtc *drm_encoder_get_crtc(struct drm_encoder *encoder)
{
struct drm_connector *connector;
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 4b8119510687..a44c3a438059 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -946,11 +946,15 @@ static int setcmap_legacy(struct fb_cmap *cmap, struct fb_info *info)
drm_modeset_lock_all(fb_helper->dev);
drm_client_for_each_modeset(modeset, &fb_helper->client) {
crtc = modeset->crtc;
- if (!crtc->funcs->gamma_set || !crtc->gamma_size)
- return -EINVAL;
+ if (!crtc->funcs->gamma_set || !crtc->gamma_size) {
+ ret = -EINVAL;
+ goto out;
+ }
- if (cmap->start + cmap->len > crtc->gamma_size)
- return -EINVAL;
+ if (cmap->start + cmap->len > crtc->gamma_size) {
+ ret = -EINVAL;
+ goto out;
+ }
r = crtc->gamma_store;
g = r + crtc->gamma_size;
@@ -963,8 +967,9 @@ static int setcmap_legacy(struct fb_cmap *cmap, struct fb_info *info)
ret = crtc->funcs->gamma_set(crtc, r, g, b,
crtc->gamma_size, NULL);
if (ret)
- return ret;
+ goto out;
}
+out:
drm_modeset_unlock_all(fb_helper->dev);
return ret;
@@ -1054,6 +1059,11 @@ retry:
goto out_state;
}
+ /*
+ * FIXME: This always uses gamma_lut. Some HW have only
+ * degamma_lut, in which case we should reset gamma_lut and set
+ * degamma_lut. See drm_crtc_legacy_gamma_set().
+ */
replaced = drm_property_replace_blob(&crtc_state->degamma_lut,
NULL);
replaced |= drm_property_replace_blob(&crtc_state->ctm, NULL);
@@ -2486,6 +2496,11 @@ void drm_fbdev_generic_setup(struct drm_device *dev,
return;
}
+ /*
+ * FIXME: This mixes up depth with bpp, which results in a glorious
+ * mess, resulting in some drivers picking wrong fbdev defaults and
+ * others wrong preferred_depth defaults.
+ */
if (!preferred_bpp)
preferred_bpp = dev->mode_config.preferred_depth;
if (!preferred_bpp)
diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c
index b50380fa80ce..6b116bfd747c 100644
--- a/drivers/gpu/drm/drm_file.c
+++ b/drivers/gpu/drm/drm_file.c
@@ -113,8 +113,7 @@ bool drm_dev_needs_global_mutex(struct drm_device *dev)
* The memory mapping implementation will vary depending on how the driver
* manages memory. Legacy drivers will use the deprecated drm_legacy_mmap()
* function, modern drivers should use one of the provided memory-manager
- * specific implementations. For GEM-based drivers this is drm_gem_mmap(), and
- * for drivers which use the CMA GEM helpers it's drm_gem_cma_mmap().
+ * specific implementations. For GEM-based drivers this is drm_gem_mmap().
*
* No other file operations are supported by the DRM userspace API. Overall the
* following is an example &file_operations structure::
@@ -240,9 +239,6 @@ static void drm_events_release(struct drm_file *file_priv)
* before calling this.
*
* If NULL is passed, this is a no-op.
- *
- * RETURNS:
- * 0 on success, or error code on failure.
*/
void drm_file_free(struct drm_file *file)
{
@@ -371,6 +367,7 @@ static int drm_open_helper(struct file *filp, struct drm_minor *minor)
list_add(&priv->lhead, &dev->filelist);
mutex_unlock(&dev->filelist_mutex);
+#ifdef CONFIG_DRM_LEGACY
#ifdef __alpha__
/*
* Default the hose
@@ -391,6 +388,7 @@ static int drm_open_helper(struct file *filp, struct drm_minor *minor)
}
}
#endif
+#endif
return 0;
}
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 92f89cee213e..c2ce78c4edc3 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -335,22 +335,12 @@ out:
}
EXPORT_SYMBOL_GPL(drm_gem_dumb_map_offset);
-/**
- * drm_gem_dumb_destroy - dumb fb callback helper for gem based drivers
- * @file: drm file-private structure to remove the dumb handle from
- * @dev: corresponding drm_device
- * @handle: the dumb handle to remove
- *
- * This implements the &drm_driver.dumb_destroy kms driver callback for drivers
- * which use gem to manage their backing storage.
- */
int drm_gem_dumb_destroy(struct drm_file *file,
struct drm_device *dev,
- uint32_t handle)
+ u32 handle)
{
return drm_gem_handle_delete(file, handle);
}
-EXPORT_SYMBOL(drm_gem_dumb_destroy);
/**
* drm_gem_handle_create_tail - internal functions to create a handle
@@ -1078,20 +1068,17 @@ int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
drm_gem_object_get(obj);
vma->vm_private_data = obj;
+ vma->vm_ops = obj->funcs->vm_ops;
if (obj->funcs->mmap) {
ret = obj->funcs->mmap(obj, vma);
- if (ret) {
- drm_gem_object_put(obj);
- return ret;
- }
+ if (ret)
+ goto err_drm_gem_object_put;
WARN_ON(!(vma->vm_flags & VM_DONTEXPAND));
} else {
- if (obj->funcs->vm_ops)
- vma->vm_ops = obj->funcs->vm_ops;
- else {
- drm_gem_object_put(obj);
- return -EINVAL;
+ if (!vma->vm_ops) {
+ ret = -EINVAL;
+ goto err_drm_gem_object_put;
}
vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
@@ -1100,6 +1087,10 @@ int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
}
return 0;
+
+err_drm_gem_object_put:
+ drm_gem_object_put(obj);
+ return ret;
}
EXPORT_SYMBOL(drm_gem_mmap_obj);
diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c
index 4d5c1d86b022..7942cf05cd93 100644
--- a/drivers/gpu/drm/drm_gem_cma_helper.c
+++ b/drivers/gpu/drm/drm_gem_cma_helper.c
@@ -36,8 +36,9 @@
static const struct drm_gem_object_funcs drm_gem_cma_default_funcs = {
.free = drm_gem_cma_free_object,
.print_info = drm_gem_cma_print_info,
- .get_sg_table = drm_gem_cma_prime_get_sg_table,
- .vmap = drm_gem_cma_prime_vmap,
+ .get_sg_table = drm_gem_cma_get_sg_table,
+ .vmap = drm_gem_cma_vmap,
+ .mmap = drm_gem_cma_mmap,
.vm_ops = &drm_gem_cma_vm_ops,
};
@@ -277,62 +278,6 @@ const struct vm_operations_struct drm_gem_cma_vm_ops = {
};
EXPORT_SYMBOL_GPL(drm_gem_cma_vm_ops);
-static int drm_gem_cma_mmap_obj(struct drm_gem_cma_object *cma_obj,
- struct vm_area_struct *vma)
-{
- int ret;
-
- /*
- * Clear the VM_PFNMAP flag that was set by drm_gem_mmap(), and set the
- * vm_pgoff (used as a fake buffer offset by DRM) to 0 as we want to map
- * the whole buffer.
- */
- vma->vm_flags &= ~VM_PFNMAP;
- vma->vm_pgoff = 0;
-
- ret = dma_mmap_wc(cma_obj->base.dev->dev, vma, cma_obj->vaddr,
- cma_obj->paddr, vma->vm_end - vma->vm_start);
- if (ret)
- drm_gem_vm_close(vma);
-
- return ret;
-}
-
-/**
- * drm_gem_cma_mmap - memory-map a CMA GEM object
- * @filp: file object
- * @vma: VMA for the area to be mapped
- *
- * This function implements an augmented version of the GEM DRM file mmap
- * operation for CMA objects: In addition to the usual GEM VMA setup it
- * immediately faults in the entire object instead of using on-demaind
- * faulting. Drivers which employ the CMA helpers should use this function
- * as their ->mmap() handler in the DRM device file's file_operations
- * structure.
- *
- * Instead of directly referencing this function, drivers should use the
- * DEFINE_DRM_GEM_CMA_FOPS().macro.
- *
- * Returns:
- * 0 on success or a negative error code on failure.
- */
-int drm_gem_cma_mmap(struct file *filp, struct vm_area_struct *vma)
-{
- struct drm_gem_cma_object *cma_obj;
- struct drm_gem_object *gem_obj;
- int ret;
-
- ret = drm_gem_mmap(filp, vma);
- if (ret)
- return ret;
-
- gem_obj = vma->vm_private_data;
- cma_obj = to_drm_gem_cma_obj(gem_obj);
-
- return drm_gem_cma_mmap_obj(cma_obj, vma);
-}
-EXPORT_SYMBOL_GPL(drm_gem_cma_mmap);
-
#ifndef CONFIG_MMU
/**
* drm_gem_cma_get_unmapped_area - propose address for mapping in noMMU cases
@@ -424,18 +369,18 @@ void drm_gem_cma_print_info(struct drm_printer *p, unsigned int indent,
EXPORT_SYMBOL(drm_gem_cma_print_info);
/**
- * drm_gem_cma_prime_get_sg_table - provide a scatter/gather table of pinned
+ * drm_gem_cma_get_sg_table - provide a scatter/gather table of pinned
* pages for a CMA GEM object
* @obj: GEM object
*
- * This function exports a scatter/gather table suitable for PRIME usage by
+ * This function exports a scatter/gather table by
* calling the standard DMA mapping API. Drivers using the CMA helpers should
* set this as their &drm_gem_object_funcs.get_sg_table callback.
*
* Returns:
* A pointer to the scatter/gather table of pinned pages or NULL on failure.
*/
-struct sg_table *drm_gem_cma_prime_get_sg_table(struct drm_gem_object *obj)
+struct sg_table *drm_gem_cma_get_sg_table(struct drm_gem_object *obj)
{
struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj);
struct sg_table *sgt;
@@ -456,7 +401,7 @@ out:
kfree(sgt);
return ERR_PTR(ret);
}
-EXPORT_SYMBOL_GPL(drm_gem_cma_prime_get_sg_table);
+EXPORT_SYMBOL_GPL(drm_gem_cma_get_sg_table);
/**
* drm_gem_cma_prime_import_sg_table - produce a CMA GEM object from another
@@ -501,40 +446,13 @@ drm_gem_cma_prime_import_sg_table(struct drm_device *dev,
EXPORT_SYMBOL_GPL(drm_gem_cma_prime_import_sg_table);
/**
- * drm_gem_cma_prime_mmap - memory-map an exported CMA GEM object
- * @obj: GEM object
- * @vma: VMA for the area to be mapped
- *
- * This function maps a buffer imported via DRM PRIME into a userspace
- * process's address space. Drivers that use the CMA helpers should set this
- * as their &drm_driver.gem_prime_mmap callback.
- *
- * Returns:
- * 0 on success or a negative error code on failure.
- */
-int drm_gem_cma_prime_mmap(struct drm_gem_object *obj,
- struct vm_area_struct *vma)
-{
- struct drm_gem_cma_object *cma_obj;
- int ret;
-
- ret = drm_gem_mmap_obj(obj, obj->size, vma);
- if (ret < 0)
- return ret;
-
- cma_obj = to_drm_gem_cma_obj(obj);
- return drm_gem_cma_mmap_obj(cma_obj, vma);
-}
-EXPORT_SYMBOL_GPL(drm_gem_cma_prime_mmap);
-
-/**
- * drm_gem_cma_prime_vmap - map a CMA GEM object into the kernel's virtual
+ * drm_gem_cma_vmap - map a CMA GEM object into the kernel's virtual
* address space
* @obj: GEM object
* @map: Returns the kernel virtual address of the CMA GEM object's backing
* store.
*
- * This function maps a buffer exported via DRM PRIME into the kernel's
+ * This function maps a buffer into the kernel's
* virtual address space. Since the CMA buffers are already mapped into the
* kernel virtual address space this simply returns the cached virtual
* address. Drivers using the CMA helpers should set this as their DRM
@@ -543,7 +461,7 @@ EXPORT_SYMBOL_GPL(drm_gem_cma_prime_mmap);
* Returns:
* 0 on success, or a negative error code otherwise.
*/
-int drm_gem_cma_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map)
+int drm_gem_cma_vmap(struct drm_gem_object *obj, struct dma_buf_map *map)
{
struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj);
@@ -551,7 +469,44 @@ int drm_gem_cma_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map)
return 0;
}
-EXPORT_SYMBOL_GPL(drm_gem_cma_prime_vmap);
+EXPORT_SYMBOL_GPL(drm_gem_cma_vmap);
+
+/**
+ * drm_gem_cma_mmap - memory-map an exported CMA GEM object
+ * @obj: GEM object
+ * @vma: VMA for the area to be mapped
+ *
+ * This function maps a buffer into a userspace process's address space.
+ * In addition to the usual GEM VMA setup it immediately faults in the entire
+ * object instead of using on-demand faulting. Drivers that use the CMA
+ * helpers should set this as their &drm_gem_object_funcs.mmap callback.
+ *
+ * Returns:
+ * 0 on success or a negative error code on failure.
+ */
+int drm_gem_cma_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
+{
+ struct drm_gem_cma_object *cma_obj;
+ int ret;
+
+ /*
+ * Clear the VM_PFNMAP flag that was set by drm_gem_mmap(), and set the
+ * vm_pgoff (used as a fake buffer offset by DRM) to 0 as we want to map
+ * the whole buffer.
+ */
+ vma->vm_pgoff -= drm_vma_node_start(&obj->vma_node);
+ vma->vm_flags &= ~VM_PFNMAP;
+
+ cma_obj = to_drm_gem_cma_obj(obj);
+
+ ret = dma_mmap_wc(cma_obj->base.dev->dev, vma, cma_obj->vaddr,
+ cma_obj->paddr, vma->vm_end - vma->vm_start);
+ if (ret)
+ drm_gem_vm_close(vma);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(drm_gem_cma_mmap);
/**
* drm_gem_cma_prime_import_sg_table_vmap - PRIME import another driver's
diff --git a/drivers/gpu/drm/drm_gem_vram_helper.c b/drivers/gpu/drm/drm_gem_vram_helper.c
index 02ca22e90290..0b232a73c1b7 100644
--- a/drivers/gpu/drm/drm_gem_vram_helper.c
+++ b/drivers/gpu/drm/drm_gem_vram_helper.c
@@ -387,9 +387,16 @@ static int drm_gem_vram_kmap_locked(struct drm_gem_vram_object *gbo,
if (gbo->vmap_use_count > 0)
goto out;
- ret = ttm_bo_vmap(&gbo->bo, &gbo->map);
- if (ret)
- return ret;
+ /*
+ * VRAM helpers unmap the BO only on demand. So the previous
+ * page mapping might still be around. Only vmap if the there's
+ * no mapping present.
+ */
+ if (dma_buf_map_is_null(&gbo->map)) {
+ ret = ttm_bo_vmap(&gbo->bo, &gbo->map);
+ if (ret)
+ return ret;
+ }
out:
++gbo->vmap_use_count;
@@ -577,6 +584,7 @@ static void drm_gem_vram_bo_driver_move_notify(struct drm_gem_vram_object *gbo,
return;
ttm_bo_vunmap(bo, &gbo->map);
+ dma_buf_map_clear(&gbo->map); /* explicitly clear mapping for next vmap call */
}
static int drm_gem_vram_bo_driver_move(struct drm_gem_vram_object *gbo,
diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h
index 81d386b5b92a..fad2249ee67b 100644
--- a/drivers/gpu/drm/drm_internal.h
+++ b/drivers/gpu/drm/drm_internal.h
@@ -191,6 +191,9 @@ void drm_gem_unpin(struct drm_gem_object *obj);
int drm_gem_vmap(struct drm_gem_object *obj, struct dma_buf_map *map);
void drm_gem_vunmap(struct drm_gem_object *obj, struct dma_buf_map *map);
+int drm_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
+ u32 handle);
+
/* drm_debugfs.c drm_debugfs_crc.c */
#if defined(CONFIG_DEBUG_FS)
int drm_debugfs_init(struct drm_minor *minor, int minor_id,
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 09d6e9e2e075..c3bd664ea733 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -122,7 +122,7 @@ int drm_irq_install(struct drm_device *dev, int irq)
dev->driver->irq_preinstall(dev);
/* PCI devices require shared interrupts. */
- if (dev->pdev)
+ if (dev_is_pci(dev->dev))
sh_flags = IRQF_SHARED;
ret = request_irq(irq, dev->driver->irq_handler,
@@ -140,7 +140,7 @@ int drm_irq_install(struct drm_device *dev, int irq)
if (ret < 0) {
dev->irq_enabled = false;
if (drm_core_check_feature(dev, DRIVER_LEGACY))
- vga_client_register(dev->pdev, NULL, NULL, NULL);
+ vga_client_register(to_pci_dev(dev->dev), NULL, NULL, NULL);
free_irq(irq, dev);
} else {
dev->irq = irq;
@@ -203,7 +203,7 @@ int drm_irq_uninstall(struct drm_device *dev)
DRM_DEBUG("irq=%d\n", dev->irq);
if (drm_core_check_feature(dev, DRIVER_LEGACY))
- vga_client_register(dev->pdev, NULL, NULL, NULL);
+ vga_client_register(to_pci_dev(dev->dev), NULL, NULL, NULL);
if (dev->driver->irq_uninstall)
dev->driver->irq_uninstall(dev);
@@ -214,12 +214,45 @@ int drm_irq_uninstall(struct drm_device *dev)
}
EXPORT_SYMBOL(drm_irq_uninstall);
+static void devm_drm_irq_uninstall(void *data)
+{
+ drm_irq_uninstall(data);
+}
+
+/**
+ * devm_drm_irq_install - install IRQ handler
+ * @dev: DRM device
+ * @irq: IRQ number to install the handler for
+ *
+ * devm_drm_irq_install is a help function of drm_irq_install.
+ *
+ * if the driver uses devm_drm_irq_install, there is no need
+ * to call drm_irq_uninstall when the drm module get unloaded,
+ * as this will done automagically.
+ *
+ * Returns:
+ * Zero on success or a negative error code on failure.
+ */
+int devm_drm_irq_install(struct drm_device *dev, int irq)
+{
+ int ret;
+
+ ret = drm_irq_install(dev, irq);
+ if (ret)
+ return ret;
+
+ return devm_add_action_or_reset(dev->dev,
+ devm_drm_irq_uninstall, dev);
+}
+EXPORT_SYMBOL(devm_drm_irq_install);
+
#if IS_ENABLED(CONFIG_DRM_LEGACY)
int drm_legacy_irq_control(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_control *ctl = data;
int ret = 0, irq;
+ struct pci_dev *pdev;
/* if we haven't irq we fallback for compatibility reasons -
* this used to be a separate function in drm_dma.h
@@ -230,12 +263,13 @@ int drm_legacy_irq_control(struct drm_device *dev, void *data,
if (!drm_core_check_feature(dev, DRIVER_LEGACY))
return 0;
/* UMS was only ever supported on pci devices. */
- if (WARN_ON(!dev->pdev))
+ if (WARN_ON(!dev_is_pci(dev->dev)))
return -EINVAL;
switch (ctl->func) {
case DRM_INST_HANDLER:
- irq = dev->pdev->irq;
+ pdev = to_pci_dev(dev->dev);
+ irq = pdev->irq;
if (dev->if_version < DRM_IF_VERSION(1, 2) &&
ctl->irq != irq)
diff --git a/drivers/gpu/drm/drm_legacy.h b/drivers/gpu/drm/drm_legacy.h
index 1be3ea320474..f71358f9eac9 100644
--- a/drivers/gpu/drm/drm_legacy.h
+++ b/drivers/gpu/drm/drm_legacy.h
@@ -127,7 +127,7 @@ static inline void drm_legacy_master_rmmaps(struct drm_device *dev,
static inline void drm_legacy_rmmaps(struct drm_device *dev) {}
#endif
-#if IS_ENABLED(CONFIG_DRM_VM) && IS_ENABLED(CONFIG_DRM_LEGACY)
+#if IS_ENABLED(CONFIG_DRM_LEGACY)
void drm_legacy_vma_flush(struct drm_device *d);
#else
static inline void drm_legacy_vma_flush(struct drm_device *d)
diff --git a/drivers/gpu/drm/drm_memory.c b/drivers/gpu/drm/drm_memory.c
index fbea69d6f909..e4f20a2eb6e7 100644
--- a/drivers/gpu/drm/drm_memory.c
+++ b/drivers/gpu/drm/drm_memory.c
@@ -37,7 +37,6 @@
#include <linux/highmem.h>
#include <linux/pci.h>
#include <linux/vmalloc.h>
-#include <xen/xen.h>
#include <drm/drm_agpsupport.h>
#include <drm/drm_cache.h>
@@ -100,24 +99,6 @@ static void *agp_remap(unsigned long offset, unsigned long size,
return addr;
}
-/** Wrapper around agp_free_memory() */
-void drm_free_agp(struct agp_memory *handle, int pages)
-{
- agp_free_memory(handle);
-}
-
-/** Wrapper around agp_bind_memory() */
-int drm_bind_agp(struct agp_memory *handle, unsigned int start)
-{
- return agp_bind_memory(handle, start);
-}
-
-/** Wrapper around agp_unbind_memory() */
-int drm_unbind_agp(struct agp_memory *handle)
-{
- return agp_unbind_memory(handle);
-}
-
#else /* CONFIG_AGP */
static inline void *agp_remap(unsigned long offset, unsigned long size,
struct drm_device *dev)
@@ -156,35 +137,3 @@ void drm_legacy_ioremapfree(struct drm_local_map *map, struct drm_device *dev)
iounmap(map->handle);
}
EXPORT_SYMBOL(drm_legacy_ioremapfree);
-
-bool drm_need_swiotlb(int dma_bits)
-{
- struct resource *tmp;
- resource_size_t max_iomem = 0;
-
- /*
- * Xen paravirtual hosts require swiotlb regardless of requested dma
- * transfer size.
- *
- * NOTE: Really, what it requires is use of the dma_alloc_coherent
- * allocator used in ttm_dma_populate() instead of
- * ttm_populate_and_map_pages(), which bounce buffers so much in
- * Xen it leads to swiotlb buffer exhaustion.
- */
- if (xen_pv_domain())
- return true;
-
- /*
- * Enforce dma_alloc_coherent when memory encryption is active as well
- * for the same reasons as for Xen paravirtual hosts.
- */
- if (mem_encrypt_active())
- return true;
-
- for (tmp = iomem_resource.child; tmp; tmp = tmp->sibling) {
- max_iomem = max(max_iomem, tmp->end);
- }
-
- return max_iomem > ((u64)1 << dma_bits);
-}
-EXPORT_SYMBOL(drm_need_swiotlb);
diff --git a/drivers/gpu/drm/drm_mode_config.c b/drivers/gpu/drm/drm_mode_config.c
index f1affc1bb679..37b4b9f0e468 100644
--- a/drivers/gpu/drm/drm_mode_config.c
+++ b/drivers/gpu/drm/drm_mode_config.c
@@ -195,7 +195,7 @@ void drm_mode_config_reset(struct drm_device *dev)
crtc->funcs->reset(crtc);
drm_for_each_encoder(encoder, dev)
- if (encoder->funcs->reset)
+ if (encoder->funcs && encoder->funcs->reset)
encoder->funcs->reset(encoder);
drm_connector_list_iter_begin(dev, &conn_iter);
@@ -625,6 +625,10 @@ static void validate_encoder_possible_crtcs(struct drm_encoder *encoder)
void drm_mode_config_validate(struct drm_device *dev)
{
struct drm_encoder *encoder;
+ struct drm_crtc *crtc;
+ struct drm_plane *plane;
+ u32 primary_with_crtc = 0, cursor_with_crtc = 0;
+ unsigned int num_primary = 0;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return;
@@ -636,4 +640,49 @@ void drm_mode_config_validate(struct drm_device *dev)
validate_encoder_possible_clones(encoder);
validate_encoder_possible_crtcs(encoder);
}
+
+ drm_for_each_crtc(crtc, dev) {
+ WARN(!crtc->primary, "Missing primary plane on [CRTC:%d:%s]\n",
+ crtc->base.id, crtc->name);
+
+ WARN(crtc->cursor && crtc->funcs->cursor_set,
+ "[CRTC:%d:%s] must not have both a cursor plane and a cursor_set func",
+ crtc->base.id, crtc->name);
+ WARN(crtc->cursor && crtc->funcs->cursor_set2,
+ "[CRTC:%d:%s] must not have both a cursor plane and a cursor_set2 func",
+ crtc->base.id, crtc->name);
+ WARN(crtc->cursor && crtc->funcs->cursor_move,
+ "[CRTC:%d:%s] must not have both a cursor plane and a cursor_move func",
+ crtc->base.id, crtc->name);
+
+ if (crtc->primary) {
+ WARN(!(crtc->primary->possible_crtcs & drm_crtc_mask(crtc)),
+ "Bogus primary plane possible_crtcs: [PLANE:%d:%s] must be compatible with [CRTC:%d:%s]\n",
+ crtc->primary->base.id, crtc->primary->name,
+ crtc->base.id, crtc->name);
+ WARN(primary_with_crtc & drm_plane_mask(crtc->primary),
+ "Primary plane [PLANE:%d:%s] used for multiple CRTCs",
+ crtc->primary->base.id, crtc->primary->name);
+ primary_with_crtc |= drm_plane_mask(crtc->primary);
+ }
+ if (crtc->cursor) {
+ WARN(!(crtc->cursor->possible_crtcs & drm_crtc_mask(crtc)),
+ "Bogus cursor plane possible_crtcs: [PLANE:%d:%s] must be compatible with [CRTC:%d:%s]\n",
+ crtc->cursor->base.id, crtc->cursor->name,
+ crtc->base.id, crtc->name);
+ WARN(cursor_with_crtc & drm_plane_mask(crtc->cursor),
+ "Cursor plane [PLANE:%d:%s] used for multiple CRTCs",
+ crtc->cursor->base.id, crtc->cursor->name);
+ cursor_with_crtc |= drm_plane_mask(crtc->cursor);
+ }
+ }
+
+ drm_for_each_plane(plane, dev) {
+ if (plane->type == DRM_PLANE_TYPE_PRIMARY)
+ num_primary++;
+ }
+
+ WARN(num_primary != dev->mode_config.num_crtc,
+ "Must have as many primary planes as there are CRTCs, but have %u primary planes and %u CRTCs",
+ num_primary, dev->mode_config.num_crtc);
}
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index 33fb2f05ce66..1ac67d4505e0 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -762,7 +762,7 @@ int drm_mode_vrefresh(const struct drm_display_mode *mode)
if (mode->htotal == 0 || mode->vtotal == 0)
return 0;
- num = mode->clock * 1000;
+ num = mode->clock;
den = mode->htotal * mode->vtotal;
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
@@ -772,7 +772,7 @@ int drm_mode_vrefresh(const struct drm_display_mode *mode)
if (mode->vscan > 1)
den *= mode->vscan;
- return DIV_ROUND_CLOSEST(num, den);
+ return DIV_ROUND_CLOSEST_ULL(mul_u32_u32(num, 1000), den);
}
EXPORT_SYMBOL(drm_mode_vrefresh);
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
index 6dba4b8ce4fe..2294a1580d35 100644
--- a/drivers/gpu/drm/drm_pci.c
+++ b/drivers/gpu/drm/drm_pci.c
@@ -24,6 +24,8 @@
#include <linux/dma-mapping.h>
#include <linux/export.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
#include <linux/pci.h>
#include <linux/slab.h>
@@ -36,6 +38,9 @@
#include "drm_legacy.h"
#ifdef CONFIG_DRM_LEGACY
+/* List of devices hanging off drivers with stealth attach. */
+static LIST_HEAD(legacy_dev_list);
+static DEFINE_MUTEX(legacy_dev_list_lock);
/**
* drm_pci_alloc - Allocate a PCI consistent memory block, for DMA.
@@ -65,7 +70,7 @@ drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t ali
return NULL;
dmah->size = size;
- dmah->vaddr = dma_alloc_coherent(&dev->pdev->dev, size,
+ dmah->vaddr = dma_alloc_coherent(dev->dev, size,
&dmah->busaddr,
GFP_KERNEL);
@@ -88,7 +93,7 @@ EXPORT_SYMBOL(drm_pci_alloc);
*/
void drm_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah)
{
- dma_free_coherent(&dev->pdev->dev, dmah->size, dmah->vaddr,
+ dma_free_coherent(dev->dev, dmah->size, dmah->vaddr,
dmah->busaddr);
kfree(dmah);
}
@@ -107,16 +112,18 @@ static int drm_get_pci_domain(struct drm_device *dev)
return 0;
#endif /* __alpha__ */
- return pci_domain_nr(dev->pdev->bus);
+ return pci_domain_nr(to_pci_dev(dev->dev)->bus);
}
int drm_pci_set_busid(struct drm_device *dev, struct drm_master *master)
{
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
+
master->unique = kasprintf(GFP_KERNEL, "pci:%04x:%02x:%02x.%d",
drm_get_pci_domain(dev),
- dev->pdev->bus->number,
- PCI_SLOT(dev->pdev->devfn),
- PCI_FUNC(dev->pdev->devfn));
+ pdev->bus->number,
+ PCI_SLOT(pdev->devfn),
+ PCI_FUNC(pdev->devfn));
if (!master->unique)
return -ENOMEM;
@@ -126,12 +133,14 @@ int drm_pci_set_busid(struct drm_device *dev, struct drm_master *master)
static int drm_pci_irq_by_busid(struct drm_device *dev, struct drm_irq_busid *p)
{
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
+
if ((p->busnum >> 8) != drm_get_pci_domain(dev) ||
- (p->busnum & 0xff) != dev->pdev->bus->number ||
- p->devnum != PCI_SLOT(dev->pdev->devfn) || p->funcnum != PCI_FUNC(dev->pdev->devfn))
+ (p->busnum & 0xff) != pdev->bus->number ||
+ p->devnum != PCI_SLOT(pdev->devfn) || p->funcnum != PCI_FUNC(pdev->devfn))
return -EINVAL;
- p->irq = dev->pdev->irq;
+ p->irq = pdev->irq;
DRM_DEBUG("%d:%d:%d => IRQ %d\n", p->busnum, p->devnum, p->funcnum,
p->irq);
@@ -159,7 +168,7 @@ int drm_legacy_irq_by_busid(struct drm_device *dev, void *data,
return -EOPNOTSUPP;
/* UMS was only ever support on PCI devices. */
- if (WARN_ON(!dev->pdev))
+ if (WARN_ON(!dev_is_pci(dev->dev)))
return -EINVAL;
if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
@@ -183,7 +192,7 @@ void drm_pci_agp_destroy(struct drm_device *dev)
static void drm_pci_agp_init(struct drm_device *dev)
{
if (drm_core_check_feature(dev, DRIVER_USE_AGP)) {
- if (pci_find_capability(dev->pdev, PCI_CAP_ID_AGP))
+ if (pci_find_capability(to_pci_dev(dev->dev), PCI_CAP_ID_AGP))
dev->agp = drm_agp_init(dev);
if (dev->agp) {
dev->agp->agp_mtrr = arch_phys_wc_add(
@@ -196,7 +205,7 @@ static void drm_pci_agp_init(struct drm_device *dev)
static int drm_get_pci_dev(struct pci_dev *pdev,
const struct pci_device_id *ent,
- struct drm_driver *driver)
+ const struct drm_driver *driver)
{
struct drm_device *dev;
int ret;
@@ -225,10 +234,11 @@ static int drm_get_pci_dev(struct pci_dev *pdev,
if (ret)
goto err_agp;
- /* No locking needed since shadow-attach is single-threaded since it may
- * only be called from the per-driver module init hook. */
- if (drm_core_check_feature(dev, DRIVER_LEGACY))
- list_add_tail(&dev->legacy_dev_list, &driver->legacy_dev_list);
+ if (drm_core_check_feature(dev, DRIVER_LEGACY)) {
+ mutex_lock(&legacy_dev_list_lock);
+ list_add_tail(&dev->legacy_dev_list, &legacy_dev_list);
+ mutex_unlock(&legacy_dev_list_lock);
+ }
return 0;
@@ -249,7 +259,8 @@ err_free:
*
* Return: 0 on success or a negative error code on failure.
*/
-int drm_legacy_pci_init(struct drm_driver *driver, struct pci_driver *pdriver)
+int drm_legacy_pci_init(const struct drm_driver *driver,
+ struct pci_driver *pdriver)
{
struct pci_dev *pdev = NULL;
const struct pci_device_id *pid;
@@ -261,7 +272,6 @@ int drm_legacy_pci_init(struct drm_driver *driver, struct pci_driver *pdriver)
return -EINVAL;
/* If not using KMS, fall back to stealth mode manual scanning. */
- INIT_LIST_HEAD(&driver->legacy_dev_list);
for (i = 0; pdriver->id_table[i].vendor != 0; i++) {
pid = &pdriver->id_table[i];
@@ -295,7 +305,8 @@ EXPORT_SYMBOL(drm_legacy_pci_init);
* Unregister a DRM driver shadow-attached through drm_legacy_pci_init(). This
* is deprecated and only used by dri1 drivers.
*/
-void drm_legacy_pci_exit(struct drm_driver *driver, struct pci_driver *pdriver)
+void drm_legacy_pci_exit(const struct drm_driver *driver,
+ struct pci_driver *pdriver)
{
struct drm_device *dev, *tmp;
@@ -304,11 +315,15 @@ void drm_legacy_pci_exit(struct drm_driver *driver, struct pci_driver *pdriver)
if (!(driver->driver_features & DRIVER_LEGACY)) {
WARN_ON(1);
} else {
- list_for_each_entry_safe(dev, tmp, &driver->legacy_dev_list,
+ mutex_lock(&legacy_dev_list_lock);
+ list_for_each_entry_safe(dev, tmp, &legacy_dev_list,
legacy_dev_list) {
- list_del(&dev->legacy_dev_list);
- drm_put_dev(dev);
+ if (dev->driver == driver) {
+ list_del(&dev->legacy_dev_list);
+ drm_put_dev(dev);
+ }
}
+ mutex_unlock(&legacy_dev_list_lock);
}
DRM_INFO("Module unloaded\n");
}
diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c
index e6231947f987..338650abd267 100644
--- a/drivers/gpu/drm/drm_plane.c
+++ b/drivers/gpu/drm/drm_plane.c
@@ -30,6 +30,7 @@
#include <drm/drm_file.h>
#include <drm/drm_crtc.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_managed.h>
#include <drm/drm_vblank.h>
#include "drm_crtc_internal.h"
@@ -40,7 +41,7 @@
* A plane represents an image source that can be blended with or overlayed on
* top of a CRTC during the scanout process. Planes take their input data from a
* &drm_framebuffer object. The plane itself specifies the cropping and scaling
- * of that image, and where it is placed on the visible are of a display
+ * of that image, and where it is placed on the visible area of a display
* pipeline, represented by &drm_crtc. A plane can also have additional
* properties that specify how the pixels are positioned and blended, like
* rotation or Z-position. All these properties are stored in &drm_plane_state.
@@ -49,14 +50,34 @@
* &struct drm_plane (possibly as part of a larger structure) and registers it
* with a call to drm_universal_plane_init().
*
- * Cursor and overlay planes are optional. All drivers should provide one
- * primary plane per CRTC to avoid surprising userspace too much. See enum
- * drm_plane_type for a more in-depth discussion of these special uapi-relevant
- * plane types. Special planes are associated with their CRTC by calling
- * drm_crtc_init_with_planes().
- *
* The type of a plane is exposed in the immutable "type" enumeration property,
- * which has one of the following values: "Overlay", "Primary", "Cursor".
+ * which has one of the following values: "Overlay", "Primary", "Cursor" (see
+ * enum drm_plane_type). A plane can be compatible with multiple CRTCs, see
+ * &drm_plane.possible_crtcs.
+ *
+ * Each CRTC must have a unique primary plane userspace can attach to enable
+ * the CRTC. In other words, userspace must be able to attach a different
+ * primary plane to each CRTC at the same time. Primary planes can still be
+ * compatible with multiple CRTCs. There must be exactly as many primary planes
+ * as there are CRTCs.
+ *
+ * Legacy uAPI doesn't expose the primary and cursor planes directly. DRM core
+ * relies on the driver to set the primary and optionally the cursor plane used
+ * for legacy IOCTLs. This is done by calling drm_crtc_init_with_planes(). All
+ * drivers must provide one primary plane per CRTC to avoid surprising legacy
+ * userspace too much.
+ */
+
+/**
+ * DOC: standard plane properties
+ *
+ * DRM planes have a few standardized properties:
+ *
+ * IN_FORMATS:
+ * Blob property which contains the set of buffer format and modifier
+ * pairs supported by this plane. The blob is a struct
+ * drm_format_modifier_blob. Without this property the plane doesn't
+ * support buffers with modifiers. Userspace cannot change this property.
*/
static unsigned int drm_num_planes(struct drm_device *dev)
@@ -152,31 +173,16 @@ done:
return 0;
}
-/**
- * drm_universal_plane_init - Initialize a new universal plane object
- * @dev: DRM device
- * @plane: plane object to init
- * @possible_crtcs: bitmask of possible CRTCs
- * @funcs: callbacks for the new plane
- * @formats: array of supported formats (DRM_FORMAT\_\*)
- * @format_count: number of elements in @formats
- * @format_modifiers: array of struct drm_format modifiers terminated by
- * DRM_FORMAT_MOD_INVALID
- * @type: type of plane (overlay, primary, cursor)
- * @name: printf style format string for the plane name, or NULL for default name
- *
- * Initializes a plane object of type @type.
- *
- * Returns:
- * Zero on success, error code on failure.
- */
-int drm_universal_plane_init(struct drm_device *dev, struct drm_plane *plane,
- uint32_t possible_crtcs,
- const struct drm_plane_funcs *funcs,
- const uint32_t *formats, unsigned int format_count,
- const uint64_t *format_modifiers,
- enum drm_plane_type type,
- const char *name, ...)
+__printf(9, 0)
+static int __drm_universal_plane_init(struct drm_device *dev,
+ struct drm_plane *plane,
+ uint32_t possible_crtcs,
+ const struct drm_plane_funcs *funcs,
+ const uint32_t *formats,
+ unsigned int format_count,
+ const uint64_t *format_modifiers,
+ enum drm_plane_type type,
+ const char *name, va_list ap)
{
struct drm_mode_config *config = &dev->mode_config;
unsigned int format_modifier_count = 0;
@@ -237,11 +243,7 @@ int drm_universal_plane_init(struct drm_device *dev, struct drm_plane *plane,
}
if (name) {
- va_list ap;
-
- va_start(ap, name);
plane->name = kvasprintf(GFP_KERNEL, name, ap);
- va_end(ap);
} else {
plane->name = kasprintf(GFP_KERNEL, "plane-%d",
drm_num_planes(dev));
@@ -286,8 +288,102 @@ int drm_universal_plane_init(struct drm_device *dev, struct drm_plane *plane,
return 0;
}
+
+/**
+ * drm_universal_plane_init - Initialize a new universal plane object
+ * @dev: DRM device
+ * @plane: plane object to init
+ * @possible_crtcs: bitmask of possible CRTCs
+ * @funcs: callbacks for the new plane
+ * @formats: array of supported formats (DRM_FORMAT\_\*)
+ * @format_count: number of elements in @formats
+ * @format_modifiers: array of struct drm_format modifiers terminated by
+ * DRM_FORMAT_MOD_INVALID
+ * @type: type of plane (overlay, primary, cursor)
+ * @name: printf style format string for the plane name, or NULL for default name
+ *
+ * Initializes a plane object of type @type. The &drm_plane_funcs.destroy hook
+ * should call drm_plane_cleanup() and kfree() the plane structure. The plane
+ * structure should not be allocated with devm_kzalloc().
+ *
+ * Note: consider using drmm_universal_plane_alloc() instead of
+ * drm_universal_plane_init() to let the DRM managed resource infrastructure
+ * take care of cleanup and deallocation.
+ *
+ * Returns:
+ * Zero on success, error code on failure.
+ */
+int drm_universal_plane_init(struct drm_device *dev, struct drm_plane *plane,
+ uint32_t possible_crtcs,
+ const struct drm_plane_funcs *funcs,
+ const uint32_t *formats, unsigned int format_count,
+ const uint64_t *format_modifiers,
+ enum drm_plane_type type,
+ const char *name, ...)
+{
+ va_list ap;
+ int ret;
+
+ WARN_ON(!funcs->destroy);
+
+ va_start(ap, name);
+ ret = __drm_universal_plane_init(dev, plane, possible_crtcs, funcs,
+ formats, format_count, format_modifiers,
+ type, name, ap);
+ va_end(ap);
+ return ret;
+}
EXPORT_SYMBOL(drm_universal_plane_init);
+static void drmm_universal_plane_alloc_release(struct drm_device *dev, void *ptr)
+{
+ struct drm_plane *plane = ptr;
+
+ if (WARN_ON(!plane->dev))
+ return;
+
+ drm_plane_cleanup(plane);
+}
+
+void *__drmm_universal_plane_alloc(struct drm_device *dev, size_t size,
+ size_t offset, uint32_t possible_crtcs,
+ const struct drm_plane_funcs *funcs,
+ const uint32_t *formats, unsigned int format_count,
+ const uint64_t *format_modifiers,
+ enum drm_plane_type type,
+ const char *name, ...)
+{
+ void *container;
+ struct drm_plane *plane;
+ va_list ap;
+ int ret;
+
+ if (WARN_ON(!funcs || funcs->destroy))
+ return ERR_PTR(-EINVAL);
+
+ container = drmm_kzalloc(dev, size, GFP_KERNEL);
+ if (!container)
+ return ERR_PTR(-ENOMEM);
+
+ plane = container + offset;
+
+ va_start(ap, name);
+ ret = __drm_universal_plane_init(dev, plane, possible_crtcs, funcs,
+ formats, format_count, format_modifiers,
+ type, name, ap);
+ va_end(ap);
+ if (ret)
+ return ERR_PTR(ret);
+
+ ret = drmm_add_action_or_reset(dev, drmm_universal_plane_alloc_release,
+ plane);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return container;
+}
+EXPORT_SYMBOL(__drmm_universal_plane_alloc);
+
int drm_plane_register_all(struct drm_device *dev)
{
unsigned int num_planes = 0;
@@ -1163,7 +1259,14 @@ retry:
if (ret)
goto out;
- if (old_fb->format != fb->format) {
+ /*
+ * Only check the FOURCC format code, excluding modifiers. This is
+ * enough for all legacy drivers. Atomic drivers have their own
+ * checks in their ->atomic_check implementation, which will
+ * return -EINVAL if any hw or driver constraint is violated due
+ * to modifier changes.
+ */
+ if (old_fb->format->format != fb->format->format) {
DRM_DEBUG_KMS("Page flip is not allowed to change frame buffer format.\n");
ret = -EINVAL;
goto out;
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index 7db55fce35d8..2a54f86856af 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -717,6 +717,8 @@ int drm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
vma->vm_pgoff += drm_vma_node_start(&obj->vma_node);
if (obj->funcs && obj->funcs->mmap) {
+ vma->vm_ops = obj->funcs->vm_ops;
+
ret = obj->funcs->mmap(obj, vma);
if (ret)
return ret;
@@ -978,44 +980,58 @@ struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev,
EXPORT_SYMBOL(drm_gem_prime_import);
/**
- * drm_prime_sg_to_page_addr_arrays - convert an sg table into a page array
+ * drm_prime_sg_to_page_array - convert an sg table into a page array
+ * @sgt: scatter-gather table to convert
+ * @pages: array of page pointers to store the pages in
+ * @max_entries: size of the passed-in array
+ *
+ * Exports an sg table into an array of pages.
+ *
+ * This function is deprecated and strongly discouraged to be used.
+ * The page array is only useful for page faults and those can corrupt fields
+ * in the struct page if they are not handled by the exporting driver.
+ */
+int __deprecated drm_prime_sg_to_page_array(struct sg_table *sgt,
+ struct page **pages,
+ int max_entries)
+{
+ struct sg_page_iter page_iter;
+ struct page **p = pages;
+
+ for_each_sgtable_page(sgt, &page_iter, 0) {
+ if (WARN_ON(p - pages >= max_entries))
+ return -1;
+ *p++ = sg_page_iter_page(&page_iter);
+ }
+ return 0;
+}
+EXPORT_SYMBOL(drm_prime_sg_to_page_array);
+
+/**
+ * drm_prime_sg_to_dma_addr_array - convert an sg table into a dma addr array
* @sgt: scatter-gather table to convert
- * @pages: optional array of page pointers to store the page array in
- * @addrs: optional array to store the dma bus address of each page
+ * @addrs: array to store the dma bus address of each page
* @max_entries: size of both the passed-in arrays
*
- * Exports an sg table into an array of pages and addresses. This is currently
- * required by the TTM driver in order to do correct fault handling.
+ * Exports an sg table into an array of addresses.
*
- * Drivers can use this in their &drm_driver.gem_prime_import_sg_table
+ * Drivers should use this in their &drm_driver.gem_prime_import_sg_table
* implementation.
*/
-int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages,
- dma_addr_t *addrs, int max_entries)
+int drm_prime_sg_to_dma_addr_array(struct sg_table *sgt, dma_addr_t *addrs,
+ int max_entries)
{
struct sg_dma_page_iter dma_iter;
- struct sg_page_iter page_iter;
- struct page **p = pages;
dma_addr_t *a = addrs;
- if (pages) {
- for_each_sgtable_page(sgt, &page_iter, 0) {
- if (WARN_ON(p - pages >= max_entries))
- return -1;
- *p++ = sg_page_iter_page(&page_iter);
- }
+ for_each_sgtable_dma_page(sgt, &dma_iter, 0) {
+ if (WARN_ON(a - addrs >= max_entries))
+ return -1;
+ *a++ = sg_page_iter_dma_address(&dma_iter);
}
- if (addrs) {
- for_each_sgtable_dma_page(sgt, &dma_iter, 0) {
- if (WARN_ON(a - addrs >= max_entries))
- return -1;
- *a++ = sg_page_iter_dma_address(&dma_iter);
- }
- }
-
return 0;
}
-EXPORT_SYMBOL(drm_prime_sg_to_page_addr_arrays);
+EXPORT_SYMBOL(drm_prime_sg_to_dma_addr_array);
/**
* drm_prime_gem_destroy - helper to clean up a PRIME-imported GEM object
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
index d6017726cc2a..ad59a51eab6d 100644
--- a/drivers/gpu/drm/drm_probe_helper.c
+++ b/drivers/gpu/drm/drm_probe_helper.c
@@ -515,7 +515,8 @@ retry:
if (count == 0 && connector->status == connector_status_connected)
count = drm_add_override_edid_modes(connector);
- if (count == 0 && connector->status == connector_status_connected)
+ if (count == 0 && (connector->status == connector_status_connected ||
+ connector->status == connector_status_unknown))
count = drm_add_modes_noedid(connector, 1024, 768);
count += drm_helper_probe_add_cmdline_mode(connector);
if (count == 0)
diff --git a/drivers/gpu/drm/drm_simple_kms_helper.c b/drivers/gpu/drm/drm_simple_kms_helper.c
index 743e57c1b44f..6ce8f5cd1eb5 100644
--- a/drivers/gpu/drm/drm_simple_kms_helper.c
+++ b/drivers/gpu/drm/drm_simple_kms_helper.c
@@ -9,6 +9,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
+#include <drm/drm_managed.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
@@ -55,8 +56,9 @@ static const struct drm_encoder_funcs drm_simple_encoder_funcs_cleanup = {
* stored in the device structure. Free the encoder's memory as part of
* the device release function.
*
- * FIXME: Later improvements to DRM's resource management may allow for
- * an automated kfree() of the encoder's memory.
+ * Note: consider using drmm_simple_encoder_alloc() instead of
+ * drm_simple_encoder_init() to let the DRM managed resource infrastructure
+ * take care of cleanup and deallocation.
*
* Returns:
* Zero on success, error code on failure.
@@ -71,6 +73,14 @@ int drm_simple_encoder_init(struct drm_device *dev,
}
EXPORT_SYMBOL(drm_simple_encoder_init);
+void *__drmm_simple_encoder_alloc(struct drm_device *dev, size_t size,
+ size_t offset, int encoder_type)
+{
+ return __drmm_encoder_alloc(dev, size, offset, NULL, encoder_type,
+ NULL);
+}
+EXPORT_SYMBOL(__drmm_simple_encoder_alloc);
+
static enum drm_mode_status
drm_simple_kms_crtc_mode_valid(struct drm_crtc *crtc,
const struct drm_display_mode *mode)
diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c
index 6e74e6745eca..349146049849 100644
--- a/drivers/gpu/drm/drm_syncobj.c
+++ b/drivers/gpu/drm/drm_syncobj.c
@@ -388,19 +388,18 @@ int drm_syncobj_find_fence(struct drm_file *file_private,
return -ENOENT;
*fence = drm_syncobj_fence_get(syncobj);
- drm_syncobj_put(syncobj);
if (*fence) {
ret = dma_fence_chain_find_seqno(fence, point);
if (!ret)
- return 0;
+ goto out;
dma_fence_put(*fence);
} else {
ret = -EINVAL;
}
if (!(flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT))
- return ret;
+ goto out;
memset(&wait, 0, sizeof(wait));
wait.task = current;
@@ -432,6 +431,9 @@ int drm_syncobj_find_fence(struct drm_file *file_private,
if (wait.node.next)
drm_syncobj_remove_wait(syncobj, &wait);
+out:
+ drm_syncobj_put(syncobj);
+
return ret;
}
EXPORT_SYMBOL(drm_syncobj_find_fence);
diff --git a/drivers/gpu/drm/drm_vblank.c b/drivers/gpu/drm/drm_vblank.c
index d30e2f2b8f3c..30912d8f82a5 100644
--- a/drivers/gpu/drm/drm_vblank.c
+++ b/drivers/gpu/drm/drm_vblank.c
@@ -74,7 +74,7 @@
* |↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓| updates the
* | | frame as it
* | | travels down
- * | | ("sacn out")
+ * | | ("scan out")
* | Old frame |
* | |
* | |
diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c
index 6d5a03b32238..9b3b989d7cad 100644
--- a/drivers/gpu/drm/drm_vm.c
+++ b/drivers/gpu/drm/drm_vm.c
@@ -278,7 +278,7 @@ static void drm_vm_shm_close(struct vm_area_struct *vma)
case _DRM_SCATTER_GATHER:
break;
case _DRM_CONSISTENT:
- dma_free_coherent(&dev->pdev->dev,
+ dma_free_coherent(dev->dev,
map->size,
map->handle,
map->offset);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
index d9bd83203a15..b390dd4d60b7 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
@@ -135,8 +135,7 @@ struct drm_gem_object *etnaviv_gem_prime_import_sg_table(struct drm_device *dev,
goto fail;
}
- ret = drm_prime_sg_to_page_addr_arrays(sgt, etnaviv_obj->pages,
- NULL, npages);
+ ret = drm_prime_sg_to_page_array(sgt, etnaviv_obj->pages, npages);
if (ret)
goto fail;
diff --git a/drivers/gpu/drm/gma500/cdv_device.c b/drivers/gpu/drm/gma500/cdv_device.c
index e75293e4a52f..19e055dbd4c2 100644
--- a/drivers/gpu/drm/gma500/cdv_device.c
+++ b/drivers/gpu/drm/gma500/cdv_device.c
@@ -95,13 +95,14 @@ static u32 cdv_get_max_backlight(struct drm_device *dev)
static int cdv_get_brightness(struct backlight_device *bd)
{
struct drm_device *dev = bl_get_data(bd);
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
u32 val = REG_READ(BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
if (cdv_backlight_combination_mode(dev)) {
u8 lbpc;
val &= ~1;
- pci_read_config_byte(dev->pdev, 0xF4, &lbpc);
+ pci_read_config_byte(pdev, 0xF4, &lbpc);
val *= lbpc;
}
return (val * 100)/cdv_get_max_backlight(dev);
@@ -111,6 +112,7 @@ static int cdv_get_brightness(struct backlight_device *bd)
static int cdv_set_brightness(struct backlight_device *bd)
{
struct drm_device *dev = bl_get_data(bd);
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
int level = bd->props.brightness;
u32 blc_pwm_ctl;
@@ -128,7 +130,7 @@ static int cdv_set_brightness(struct backlight_device *bd)
lbpc = level * 0xfe / max + 1;
level /= lbpc;
- pci_write_config_byte(dev->pdev, 0xF4, lbpc);
+ pci_write_config_byte(pdev, 0xF4, lbpc);
}
blc_pwm_ctl = REG_READ(BLC_PWM_CTL) & ~BACKLIGHT_DUTY_CYCLE_MASK;
@@ -205,8 +207,9 @@ static inline void CDV_MSG_WRITE32(int domain, uint port, uint offset,
static void cdv_init_pm(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = dev->dev_private;
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
u32 pwr_cnt;
- int domain = pci_domain_nr(dev->pdev->bus);
+ int domain = pci_domain_nr(pdev->bus);
int i;
dev_priv->apm_base = CDV_MSG_READ32(domain, PSB_PUNIT_PORT,
@@ -234,6 +237,8 @@ static void cdv_init_pm(struct drm_device *dev)
static void cdv_errata(struct drm_device *dev)
{
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
+
/* Disable bonus launch.
* CPU and GPU competes for memory and display misses updates and
* flickers. Worst with dual core, dual displays.
@@ -242,7 +247,7 @@ static void cdv_errata(struct drm_device *dev)
* Bonus Launch to work around the issue, by degrading
* performance.
*/
- CDV_MSG_WRITE32(pci_domain_nr(dev->pdev->bus), 3, 0x30, 0x08027108);
+ CDV_MSG_WRITE32(pci_domain_nr(pdev->bus), 3, 0x30, 0x08027108);
}
/**
@@ -255,12 +260,13 @@ static void cdv_errata(struct drm_device *dev)
static int cdv_save_display_registers(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = dev->dev_private;
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
struct psb_save_area *regs = &dev_priv->regs;
struct drm_connector *connector;
dev_dbg(dev->dev, "Saving GPU registers.\n");
- pci_read_config_byte(dev->pdev, 0xF4, &regs->cdv.saveLBB);
+ pci_read_config_byte(pdev, 0xF4, &regs->cdv.saveLBB);
regs->cdv.saveDSPCLK_GATE_D = REG_READ(DSPCLK_GATE_D);
regs->cdv.saveRAMCLK_GATE_D = REG_READ(RAMCLK_GATE_D);
@@ -309,11 +315,12 @@ static int cdv_save_display_registers(struct drm_device *dev)
static int cdv_restore_display_registers(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = dev->dev_private;
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
struct psb_save_area *regs = &dev_priv->regs;
struct drm_connector *connector;
u32 temp;
- pci_write_config_byte(dev->pdev, 0xF4, regs->cdv.saveLBB);
+ pci_write_config_byte(pdev, 0xF4, regs->cdv.saveLBB);
REG_WRITE(DSPCLK_GATE_D, regs->cdv.saveDSPCLK_GATE_D);
REG_WRITE(RAMCLK_GATE_D, regs->cdv.saveRAMCLK_GATE_D);
@@ -421,16 +428,16 @@ static int cdv_power_up(struct drm_device *dev)
static void cdv_hotplug_work_func(struct work_struct *work)
{
struct drm_psb_private *dev_priv = container_of(work, struct drm_psb_private,
- hotplug_work);
+ hotplug_work);
struct drm_device *dev = dev_priv->dev;
/* Just fire off a uevent and let userspace tell us what to do */
drm_helper_hpd_irq_event(dev);
-}
+}
/* The core driver has received a hotplug IRQ. We are in IRQ context
so extract the needed information and kick off queued processing */
-
+
static int cdv_hotplug_event(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = dev->dev_private;
@@ -449,7 +456,7 @@ static void cdv_hotplug_enable(struct drm_device *dev, bool on)
} else {
REG_WRITE(PORT_HOTPLUG_EN, 0);
REG_WRITE(PORT_HOTPLUG_STAT, REG_READ(PORT_HOTPLUG_STAT));
- }
+ }
}
static const char *force_audio_names[] = {
@@ -568,9 +575,10 @@ static const struct psb_offset cdv_regmap[2] = {
static int cdv_chip_setup(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = dev->dev_private;
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
INIT_WORK(&dev_priv->hotplug_work, cdv_hotplug_work_func);
- if (pci_enable_msi(dev->pdev))
+ if (pci_enable_msi(pdev))
dev_warn(dev->dev, "Enabling MSI failed!\n");
dev_priv->regmap = cdv_regmap;
gma_get_core_freq(dev);
diff --git a/drivers/gpu/drm/gma500/cdv_intel_crt.c b/drivers/gpu/drm/gma500/cdv_intel_crt.c
index 88535f5aacc5..c48c9d322dfb 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_crt.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_crt.c
@@ -127,7 +127,7 @@ static void cdv_intel_crt_mode_set(struct drm_encoder *encoder,
}
-/**
+/*
* Uses CRT_HOTPLUG_EN and CRT_HOTPLUG_STAT to detect CRT presence.
*
* \return true if CRT is connected.
@@ -278,8 +278,7 @@ void cdv_intel_crt_init(struct drm_device *dev,
gma_encoder->ddc_bus = psb_intel_i2c_create(dev,
i2c_reg, "CRTDDC_A");
if (!gma_encoder->ddc_bus) {
- dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration "
- "failed.\n");
+ dev_printk(KERN_ERR, dev->dev, "DDC bus registration failed.\n");
goto failed_ddc;
}
diff --git a/drivers/gpu/drm/gma500/cdv_intel_display.c b/drivers/gpu/drm/gma500/cdv_intel_display.c
index 686385a66167..5d3302249779 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_display.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_display.c
@@ -551,7 +551,7 @@ void cdv_update_wm(struct drm_device *dev, struct drm_crtc *crtc)
}
}
-/**
+/*
* Return the pipe currently connected to the panel fitter,
* or -1 if the panel fitter is not present or not in use
*/
diff --git a/drivers/gpu/drm/gma500/cdv_intel_dp.c b/drivers/gpu/drm/gma500/cdv_intel_dp.c
index bfd9a15d63b1..6d3ada39ff86 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_dp.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_dp.c
@@ -306,7 +306,7 @@ static uint32_t dp_vswing_premph_table[] = {
};
/**
* is_edp - is the given port attached to an eDP panel (either CPU or PCH)
- * @intel_dp: DP struct
+ * @encoder: GMA encoder struct
*
* If a CPU or PCH DP output is attached to an eDP panel, this function
* will return true, and false otherwise.
@@ -1687,7 +1687,7 @@ static enum drm_connector_status cdv_dp_detect(struct gma_encoder *encoder)
return status;
}
-/**
+/*
* Uses CRT_HOTPLUG_EN and CRT_HOTPLUG_STAT to detect DP connection.
*
* \return true if DP port is connected.
diff --git a/drivers/gpu/drm/gma500/cdv_intel_lvds.c b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
index eaaf4efec217..5bff7d9e3aa6 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
@@ -74,7 +74,7 @@ static u32 cdv_intel_lvds_get_max_backlight(struct drm_device *dev)
return retval;
}
-/**
+/*
* Sets the backlight level.
*
* level backlight level, from 0 to cdv_intel_lvds_get_max_backlight().
@@ -99,7 +99,7 @@ static void cdv_intel_lvds_set_backlight(struct drm_device *dev, int level)
}
}
-/**
+/*
* Sets the power state for the panel.
*/
static void cdv_intel_lvds_set_power(struct drm_device *dev,
@@ -291,7 +291,7 @@ static void cdv_intel_lvds_mode_set(struct drm_encoder *encoder,
REG_WRITE(PFIT_CONTROL, pfit_control);
}
-/**
+/*
* Return the list of DDC modes if available, or the BIOS fixed mode otherwise.
*/
static int cdv_intel_lvds_get_modes(struct drm_connector *connector)
@@ -471,6 +471,7 @@ static bool lvds_is_present_in_vbt(struct drm_device *dev,
/**
* cdv_intel_lvds_init - setup LVDS connectors on this device
* @dev: drm device
+ * @mode_dev: PSB mode device
*
* Create the connector, register the LVDS DDC bus, and try to figure out what
* modes we can display on the LVDS panel (if present).
@@ -554,7 +555,7 @@ void cdv_intel_lvds_init(struct drm_device *dev,
"LVDSBLC_B");
if (!gma_encoder->i2c_bus) {
dev_printk(KERN_ERR,
- &dev->pdev->dev, "I2C bus registration failed.\n");
+ dev->dev, "I2C bus registration failed.\n");
goto failed_blc_i2c;
}
gma_encoder->i2c_bus->slave_addr = 0x2C;
@@ -575,7 +576,7 @@ void cdv_intel_lvds_init(struct drm_device *dev,
GPIOC,
"LVDSDDC_C");
if (!gma_encoder->ddc_bus) {
- dev_printk(KERN_ERR, &dev->pdev->dev,
+ dev_printk(KERN_ERR, dev->dev,
"DDC bus registration " "failed.\n");
goto failed_ddc;
}
diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c
index fc4fda1d258b..ebe9dccf2d83 100644
--- a/drivers/gpu/drm/gma500/framebuffer.c
+++ b/drivers/gpu/drm/gma500/framebuffer.c
@@ -159,7 +159,7 @@ static const struct fb_ops psbfb_unaccel_ops = {
* @dev: our DRM device
* @fb: framebuffer to set up
* @mode_cmd: mode description
- * @gt: backing object
+ * @obj: backing object
*
* Configure and fill in the boilerplate for our frame buffer. Return
* 0 on success or an error code if we fail.
@@ -197,7 +197,7 @@ static int psb_framebuffer_init(struct drm_device *dev,
* psb_framebuffer_create - create a framebuffer backed by gt
* @dev: our DRM device
* @mode_cmd: the description of the requested mode
- * @gt: the backing object
+ * @obj: the backing object
*
* Create a framebuffer object backed by the gt, and fill in the
* boilerplate required
@@ -252,7 +252,7 @@ static struct gtt_range *psbfb_alloc(struct drm_device *dev, int aligned_size)
/**
* psbfb_create - create a framebuffer
- * @fbdev: the framebuffer device
+ * @fb_helper: the framebuffer helper
* @sizes: specification of the layout
*
* Create a framebuffer to the specifications provided
@@ -262,6 +262,7 @@ static int psbfb_create(struct drm_fb_helper *fb_helper,
{
struct drm_device *dev = fb_helper->dev;
struct drm_psb_private *dev_priv = dev->dev_private;
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
struct fb_info *info;
struct drm_framebuffer *fb;
struct drm_mode_fb_cmd2 mode_cmd;
@@ -325,8 +326,8 @@ static int psbfb_create(struct drm_fb_helper *fb_helper,
drm_fb_helper_fill_info(info, fb_helper, sizes);
- info->fix.mmio_start = pci_resource_start(dev->pdev, 0);
- info->fix.mmio_len = pci_resource_len(dev->pdev, 0);
+ info->fix.mmio_start = pci_resource_start(pdev, 0);
+ info->fix.mmio_len = pci_resource_len(pdev, 0);
/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
@@ -529,6 +530,7 @@ void psb_modeset_init(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = dev->dev_private;
struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
int i;
drm_mode_config_init(dev);
@@ -540,8 +542,7 @@ void psb_modeset_init(struct drm_device *dev)
/* set memory base */
/* Oaktrail and Poulsbo should use BAR 2*/
- pci_read_config_dword(dev->pdev, PSB_BSM, (u32 *)
- &(dev->mode_config.fb_base));
+ pci_read_config_dword(pdev, PSB_BSM, (u32 *)&(dev->mode_config.fb_base));
/* num pipes is 2 for PSB but 1 for Mrst */
for (i = 0; i < dev_priv->num_pipe; i++)
diff --git a/drivers/gpu/drm/gma500/gem.c b/drivers/gpu/drm/gma500/gem.c
index db827e591403..fbf420051ef5 100644
--- a/drivers/gpu/drm/gma500/gem.c
+++ b/drivers/gpu/drm/gma500/gem.c
@@ -16,6 +16,7 @@
#include <drm/drm.h>
#include <drm/drm_vma_manager.h>
+#include "gem.h"
#include "psb_drv.h"
static vm_fault_t psb_gem_fault(struct vm_fault *vmf);
@@ -49,6 +50,8 @@ const struct drm_gem_object_funcs psb_gem_object_funcs = {
* @dev: our device
* @size: the size requested
* @handlep: returned handle (opaque number)
+ * @stolen: unused
+ * @align: unused
*
* Create a GEM object, fill in the boilerplate and attach a handle to
* it so that userspace can speak about it. This does the core work
@@ -97,7 +100,7 @@ int psb_gem_create(struct drm_file *file, struct drm_device *dev, u64 size,
/**
* psb_gem_dumb_create - create a dumb buffer
- * @drm_file: our client file
+ * @file: our client file
* @dev: our device
* @args: the requested arguments copied from userspace
*
@@ -116,7 +119,6 @@ int psb_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
/**
* psb_gem_fault - pagefault handler for GEM objects
- * @vma: the VMA of the GEM object
* @vmf: fault detail
*
* Invoked when a fault occurs on an mmap of a GEM managed area. GEM
diff --git a/drivers/gpu/drm/gma500/gem.h b/drivers/gpu/drm/gma500/gem.h
index 3741a711b9fd..bae6454ead29 100644
--- a/drivers/gpu/drm/gma500/gem.h
+++ b/drivers/gpu/drm/gma500/gem.h
@@ -8,6 +8,8 @@
#ifndef _GEM_H
#define _GEM_H
+struct drm_device;
+
extern const struct drm_gem_object_funcs psb_gem_object_funcs;
extern int psb_gem_create(struct drm_file *file, struct drm_device *dev,
diff --git a/drivers/gpu/drm/gma500/gma_device.c b/drivers/gpu/drm/gma500/gma_device.c
index 869f30392566..4c91e86f4b14 100644
--- a/drivers/gpu/drm/gma500/gma_device.c
+++ b/drivers/gpu/drm/gma500/gma_device.c
@@ -6,12 +6,14 @@
**************************************************************************/
#include "psb_drv.h"
+#include "gma_device.h"
void gma_get_core_freq(struct drm_device *dev)
{
uint32_t clock;
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
struct pci_dev *pci_root =
- pci_get_domain_bus_and_slot(pci_domain_nr(dev->pdev->bus),
+ pci_get_domain_bus_and_slot(pci_domain_nr(pdev->bus),
0, 0);
struct drm_psb_private *dev_priv = dev->dev_private;
diff --git a/drivers/gpu/drm/gma500/gma_display.c b/drivers/gpu/drm/gma500/gma_display.c
index 3df6d6e850f5..b03f7b8241f2 100644
--- a/drivers/gpu/drm/gma500/gma_display.c
+++ b/drivers/gpu/drm/gma500/gma_display.c
@@ -20,7 +20,7 @@
#include "psb_intel_drv.h"
#include "psb_intel_reg.h"
-/**
+/*
* Returns whether any output on the specified pipe is of the specified type
*/
bool gma_pipe_has_type(struct drm_crtc *crtc, int type)
@@ -180,7 +180,7 @@ int gma_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, u16 *blue,
return 0;
}
-/**
+/*
* Sets the power management mode of the pipe and plane.
*
* This code should probably grow support for turning the cursor off and back
@@ -559,14 +559,14 @@ int gma_crtc_set_config(struct drm_mode_set *set,
if (!dev_priv->rpm_enabled)
return drm_crtc_helper_set_config(set, ctx);
- pm_runtime_forbid(&dev->pdev->dev);
+ pm_runtime_forbid(dev->dev);
ret = drm_crtc_helper_set_config(set, ctx);
- pm_runtime_allow(&dev->pdev->dev);
+ pm_runtime_allow(dev->dev);
return ret;
}
-/**
+/*
* Save HW states of given crtc
*/
void gma_crtc_save(struct drm_crtc *crtc)
@@ -609,7 +609,7 @@ void gma_crtc_save(struct drm_crtc *crtc)
crtc_state->savePalette[i] = REG_READ(palette_reg + (i << 2));
}
-/**
+/*
* Restore HW states of given crtc
*/
void gma_crtc_restore(struct drm_crtc *crtc)
diff --git a/drivers/gpu/drm/gma500/gtt.c b/drivers/gpu/drm/gma500/gtt.c
index d246b1f70366..e884750bc123 100644
--- a/drivers/gpu/drm/gma500/gtt.c
+++ b/drivers/gpu/drm/gma500/gtt.c
@@ -340,13 +340,14 @@ static void psb_gtt_alloc(struct drm_device *dev)
void psb_gtt_takedown(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = dev->dev_private;
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
if (dev_priv->gtt_map) {
iounmap(dev_priv->gtt_map);
dev_priv->gtt_map = NULL;
}
if (dev_priv->gtt_initialized) {
- pci_write_config_word(dev->pdev, PSB_GMCH_CTRL,
+ pci_write_config_word(pdev, PSB_GMCH_CTRL,
dev_priv->gmch_ctrl);
PSB_WVDC32(dev_priv->pge_ctl, PSB_PGETBL_CTL);
(void) PSB_RVDC32(PSB_PGETBL_CTL);
@@ -358,6 +359,7 @@ void psb_gtt_takedown(struct drm_device *dev)
int psb_gtt_init(struct drm_device *dev, int resume)
{
struct drm_psb_private *dev_priv = dev->dev_private;
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
unsigned gtt_pages;
unsigned long stolen_size, vram_stolen_size;
unsigned i, num_pages;
@@ -376,8 +378,8 @@ int psb_gtt_init(struct drm_device *dev, int resume)
pg = &dev_priv->gtt;
/* Enable the GTT */
- pci_read_config_word(dev->pdev, PSB_GMCH_CTRL, &dev_priv->gmch_ctrl);
- pci_write_config_word(dev->pdev, PSB_GMCH_CTRL,
+ pci_read_config_word(pdev, PSB_GMCH_CTRL, &dev_priv->gmch_ctrl);
+ pci_write_config_word(pdev, PSB_GMCH_CTRL,
dev_priv->gmch_ctrl | _PSB_GMCH_ENABLED);
dev_priv->pge_ctl = PSB_RVDC32(PSB_PGETBL_CTL);
@@ -397,8 +399,8 @@ int psb_gtt_init(struct drm_device *dev, int resume)
*/
pg->mmu_gatt_start = 0xE0000000;
- pg->gtt_start = pci_resource_start(dev->pdev, PSB_GTT_RESOURCE);
- gtt_pages = pci_resource_len(dev->pdev, PSB_GTT_RESOURCE)
+ pg->gtt_start = pci_resource_start(pdev, PSB_GTT_RESOURCE);
+ gtt_pages = pci_resource_len(pdev, PSB_GTT_RESOURCE)
>> PAGE_SHIFT;
/* CDV doesn't report this. In which case the system has 64 gtt pages */
if (pg->gtt_start == 0 || gtt_pages == 0) {
@@ -407,10 +409,10 @@ int psb_gtt_init(struct drm_device *dev, int resume)
pg->gtt_start = dev_priv->pge_ctl;
}
- pg->gatt_start = pci_resource_start(dev->pdev, PSB_GATT_RESOURCE);
- pg->gatt_pages = pci_resource_len(dev->pdev, PSB_GATT_RESOURCE)
+ pg->gatt_start = pci_resource_start(pdev, PSB_GATT_RESOURCE);
+ pg->gatt_pages = pci_resource_len(pdev, PSB_GATT_RESOURCE)
>> PAGE_SHIFT;
- dev_priv->gtt_mem = &dev->pdev->resource[PSB_GATT_RESOURCE];
+ dev_priv->gtt_mem = &pdev->resource[PSB_GATT_RESOURCE];
if (pg->gatt_pages == 0 || pg->gatt_start == 0) {
static struct resource fudge; /* Preferably peppermint */
@@ -431,7 +433,7 @@ int psb_gtt_init(struct drm_device *dev, int resume)
dev_priv->gtt_mem = &fudge;
}
- pci_read_config_dword(dev->pdev, PSB_BSM, &dev_priv->stolen_base);
+ pci_read_config_dword(pdev, PSB_BSM, &dev_priv->stolen_base);
vram_stolen_size = pg->gtt_phys_start - dev_priv->stolen_base
- PAGE_SIZE;
diff --git a/drivers/gpu/drm/gma500/intel_bios.c b/drivers/gpu/drm/gma500/intel_bios.c
index 8ad6337eeba3..d838369f0119 100644
--- a/drivers/gpu/drm/gma500/intel_bios.c
+++ b/drivers/gpu/drm/gma500/intel_bios.c
@@ -50,7 +50,7 @@ parse_edp(struct drm_psb_private *dev_priv, struct bdb_header *bdb)
uint8_t panel_type;
edp = find_section(bdb, BDB_EDP);
-
+
dev_priv->edp.bpp = 18;
if (!edp) {
if (dev_priv->edp.support) {
@@ -80,7 +80,7 @@ parse_edp(struct drm_psb_private *dev_priv, struct bdb_header *bdb)
dev_priv->edp.pps = *edp_pps;
DRM_DEBUG_KMS("EDP timing in vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
- dev_priv->edp.pps.t1_t3, dev_priv->edp.pps.t8,
+ dev_priv->edp.pps.t1_t3, dev_priv->edp.pps.t8,
dev_priv->edp.pps.t9, dev_priv->edp.pps.t10,
dev_priv->edp.pps.t11_t12);
@@ -516,7 +516,7 @@ parse_device_mapping(struct drm_psb_private *dev_priv,
int psb_intel_init_bios(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = dev->dev_private;
- struct pci_dev *pdev = dev->pdev;
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
struct vbt_header *vbt = NULL;
struct bdb_header *bdb = NULL;
u8 __iomem *bios = NULL;
@@ -574,7 +574,7 @@ int psb_intel_init_bios(struct drm_device *dev)
return 0;
}
-/**
+/*
* Destroy and free VBT data
*/
void psb_intel_destroy_bios(struct drm_device *dev)
diff --git a/drivers/gpu/drm/gma500/intel_gmbus.c b/drivers/gpu/drm/gma500/intel_gmbus.c
index a083fbfe35b8..370bd6451bd9 100644
--- a/drivers/gpu/drm/gma500/intel_gmbus.c
+++ b/drivers/gpu/drm/gma500/intel_gmbus.c
@@ -196,7 +196,7 @@ intel_gpio_create(struct drm_psb_private *dev_priv, u32 pin)
"gma500 GPIO%c", "?BACDE?F"[pin]);
gpio->adapter.owner = THIS_MODULE;
gpio->adapter.algo_data = &gpio->algo;
- gpio->adapter.dev.parent = &dev_priv->dev->pdev->dev;
+ gpio->adapter.dev.parent = dev_priv->dev->dev;
gpio->algo.setsda = set_data;
gpio->algo.setscl = set_clock;
gpio->algo.getsda = get_data;
@@ -417,7 +417,7 @@ int gma_intel_setup_gmbus(struct drm_device *dev)
"gma500 gmbus %s",
names[i]);
- bus->adapter.dev.parent = &dev->pdev->dev;
+ bus->adapter.dev.parent = dev->dev;
bus->adapter.algo_data = dev_priv;
bus->adapter.algo = &gmbus_algorithm;
diff --git a/drivers/gpu/drm/gma500/intel_i2c.c b/drivers/gpu/drm/gma500/intel_i2c.c
index de8810188190..5e1b4d70c317 100644
--- a/drivers/gpu/drm/gma500/intel_i2c.c
+++ b/drivers/gpu/drm/gma500/intel_i2c.c
@@ -85,7 +85,6 @@ static void set_data(void *data, int state_high)
/**
* psb_intel_i2c_create - instantiate an Intel i2c bus using the specified GPIO reg
* @dev: DRM device
- * @output: driver specific output device
* @reg: GPIO reg to use
* @name: name for this bus
*
@@ -117,7 +116,7 @@ struct psb_intel_i2c_chan *psb_intel_i2c_create(struct drm_device *dev,
snprintf(chan->adapter.name, I2C_NAME_SIZE, "intel drm %s", name);
chan->adapter.owner = THIS_MODULE;
chan->adapter.algo_data = &chan->algo;
- chan->adapter.dev.parent = &dev->pdev->dev;
+ chan->adapter.dev.parent = dev->dev;
chan->algo.setsda = set_data;
chan->algo.setscl = set_clock;
chan->algo.getsda = get_data;
@@ -145,7 +144,7 @@ out_free:
/**
* psb_intel_i2c_destroy - unregister and free i2c bus resources
- * @output: channel to free
+ * @chan: channel to free
*
* Unregister the adapter from the i2c layer, then free the structure.
*/
diff --git a/drivers/gpu/drm/gma500/mdfld_device.c b/drivers/gpu/drm/gma500/mdfld_device.c
index b83d59b21de5..684d6cf9856f 100644
--- a/drivers/gpu/drm/gma500/mdfld_device.c
+++ b/drivers/gpu/drm/gma500/mdfld_device.c
@@ -523,7 +523,9 @@ static struct gpiod_lookup_table mdfld_dsi_pipe_gpio_table = {
static int mdfld_chip_setup(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = dev->dev_private;
- if (pci_enable_msi(dev->pdev))
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
+
+ if (pci_enable_msi(pdev))
dev_warn(dev->dev, "Enabling MSI failed!\n");
dev_priv->regmap = mdfld_regmap;
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c b/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c
index ae1223f631a7..4c5a2f7348c5 100644
--- a/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c
+++ b/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c
@@ -138,7 +138,7 @@ static void dsi_set_pipe_plane_enable_state(struct drm_device *dev,
REG_WRITE(pipeconf_reg, BIT(31));
if (REG_BIT_WAIT(pipeconf_reg, 1, 30))
- dev_err(&dev->pdev->dev, "%s: Pipe enable timeout\n",
+ dev_err(dev->dev, "%s: Pipe enable timeout\n",
__func__);
/*Set up display plane */
@@ -165,11 +165,11 @@ static void dsi_set_pipe_plane_enable_state(struct drm_device *dev,
REG_FLD_MOD(pipeconf_reg, 0, 31, 31);
if (REG_BIT_WAIT(pipeconf_reg, 0, 30))
- dev_err(&dev->pdev->dev, "%s: Pipe disable timeout\n",
+ dev_err(dev->dev, "%s: Pipe disable timeout\n",
__func__);
if (REG_BIT_WAIT(MIPI_GEN_FIFO_STAT_REG(pipe), 1, 28))
- dev_err(&dev->pdev->dev, "%s: FIFO not empty\n",
+ dev_err(dev->dev, "%s: FIFO not empty\n",
__func__);
}
}
@@ -867,7 +867,7 @@ void mdfld_dsi_dpi_mode_set(struct drm_encoder *encoder,
REG_WRITE(MRST_DPLL_A, 0x80800000);
if (REG_BIT_WAIT(pipeconf_reg, 1, 29))
- dev_err(&dev->pdev->dev, "%s: DSI PLL lock timeout\n",
+ dev_err(dev->dev, "%s: DSI PLL lock timeout\n",
__func__);
REG_WRITE(MIPI_DPHY_PARAM_REG(pipe), 0x2A0c6008);
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_output.c b/drivers/gpu/drm/gma500/mdfld_dsi_output.c
index 4aab76613bd9..24105f45c1c4 100644
--- a/drivers/gpu/drm/gma500/mdfld_dsi_output.c
+++ b/drivers/gpu/drm/gma500/mdfld_dsi_output.c
@@ -63,7 +63,7 @@ static int __init parse_LABC_control(char *arg)
early_param("LABC", parse_LABC_control);
#endif
-/**
+/*
* Check and see if the generic control or data buffer is empty and ready.
*/
void mdfld_dsi_gen_fifo_ready(struct drm_device *dev, u32 gen_fifo_stat_reg,
@@ -85,7 +85,7 @@ void mdfld_dsi_gen_fifo_ready(struct drm_device *dev, u32 gen_fifo_stat_reg,
gen_fifo_stat_reg);
}
-/**
+/*
* Manage the DSI MIPI keyboard and display brightness.
* FIXME: this is exported to OSPM code. should work out an specific
* display interface to OSPM.
diff --git a/drivers/gpu/drm/gma500/mdfld_intel_display.c b/drivers/gpu/drm/gma500/mdfld_intel_display.c
index aae2d358364c..462aba8f7528 100644
--- a/drivers/gpu/drm/gma500/mdfld_intel_display.c
+++ b/drivers/gpu/drm/gma500/mdfld_intel_display.c
@@ -95,7 +95,7 @@ void mdfldWaitForPipeEnable(struct drm_device *dev, int pipe)
}
}
-/**
+/*
* Return the pipe currently connected to the panel fitter,
* or -1 if the panel fitter is not present or not in use
*/
@@ -263,7 +263,7 @@ void mdfld_disable_crtc(struct drm_device *dev, int pipe)
}
-/**
+/*
* Sets the power management mode of the pipe and plane.
*
* This code should probably grow support for turning the cursor off and back
@@ -599,7 +599,7 @@ static void mdfld_clock(int refclk, struct mrst_clock_t *clock)
clock->dot = (refclk * clock->m) / clock->p1;
}
-/**
+/*
* Returns a set of divisors for the desired target clock with the given refclk,
* or FALSE. Divisor values are the actual divisors for
*/
diff --git a/drivers/gpu/drm/gma500/mid_bios.c b/drivers/gpu/drm/gma500/mid_bios.c
index 8ab44fec4bfa..68e787924ed0 100644
--- a/drivers/gpu/drm/gma500/mid_bios.c
+++ b/drivers/gpu/drm/gma500/mid_bios.c
@@ -19,8 +19,9 @@
static void mid_get_fuse_settings(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = dev->dev_private;
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
struct pci_dev *pci_root =
- pci_get_domain_bus_and_slot(pci_domain_nr(dev->pdev->bus),
+ pci_get_domain_bus_and_slot(pci_domain_nr(pdev->bus),
0, 0);
uint32_t fuse_value = 0;
uint32_t fuse_value_tmp = 0;
@@ -93,7 +94,8 @@ static void mid_get_fuse_settings(struct drm_device *dev)
static void mid_get_pci_revID(struct drm_psb_private *dev_priv)
{
uint32_t platform_rev_id = 0;
- int domain = pci_domain_nr(dev_priv->dev->pdev->bus);
+ struct pci_dev *pdev = to_pci_dev(dev_priv->dev->dev);
+ int domain = pci_domain_nr(pdev->bus);
struct pci_dev *pci_gfx_root =
pci_get_domain_bus_and_slot(domain, 0, PCI_DEVFN(2, 0));
@@ -269,11 +271,12 @@ out:
static void mid_get_vbt_data(struct drm_psb_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
u32 addr;
u8 __iomem *vbt_virtual;
struct mid_vbt_header vbt_header;
struct pci_dev *pci_gfx_root =
- pci_get_domain_bus_and_slot(pci_domain_nr(dev->pdev->bus),
+ pci_get_domain_bus_and_slot(pci_domain_nr(pdev->bus),
0, PCI_DEVFN(2, 0));
int ret = -1;
diff --git a/drivers/gpu/drm/gma500/mmu.c b/drivers/gpu/drm/gma500/mmu.c
index 505044c9a673..13aff19aae9b 100644
--- a/drivers/gpu/drm/gma500/mmu.c
+++ b/drivers/gpu/drm/gma500/mmu.c
@@ -313,8 +313,8 @@ static struct psb_mmu_pt *psb_mmu_alloc_pt(struct psb_mmu_pd *pd)
return pt;
}
-struct psb_mmu_pt *psb_mmu_pt_alloc_map_lock(struct psb_mmu_pd *pd,
- unsigned long addr)
+static struct psb_mmu_pt *psb_mmu_pt_alloc_map_lock(struct psb_mmu_pd *pd,
+ unsigned long addr)
{
uint32_t index = psb_mmu_pd_index(addr);
struct psb_mmu_pt *pt;
@@ -416,15 +416,6 @@ struct psb_mmu_pd *psb_mmu_get_default_pd(struct psb_mmu_driver *driver)
return pd;
}
-/* Returns the physical address of the PD shared by sgx/msvdx */
-uint32_t psb_get_default_pd_addr(struct psb_mmu_driver *driver)
-{
- struct psb_mmu_pd *pd;
-
- pd = psb_mmu_get_default_pd(driver);
- return page_to_pfn(pd->p) << PAGE_SHIFT;
-}
-
void psb_mmu_driver_takedown(struct psb_mmu_driver *driver)
{
struct drm_device *dev = driver->dev;
@@ -690,7 +681,7 @@ out:
if (pd->hw_context != -1)
psb_mmu_flush(pd->driver);
- return 0;
+ return ret;
}
int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages,
diff --git a/drivers/gpu/drm/gma500/oaktrail_crtc.c b/drivers/gpu/drm/gma500/oaktrail_crtc.c
index 900e5499249d..129f87971002 100644
--- a/drivers/gpu/drm/gma500/oaktrail_crtc.c
+++ b/drivers/gpu/drm/gma500/oaktrail_crtc.c
@@ -174,7 +174,7 @@ static bool mrst_sdvo_find_best_pll(const struct gma_limit_t *limit,
return min_error == 0;
}
-/**
+/*
* Returns a set of divisors for the desired target clock with the given refclk,
* or FALSE. Divisor values are the actual divisors for
*/
@@ -205,7 +205,7 @@ static bool mrst_lvds_find_best_pll(const struct gma_limit_t *limit,
return err != target;
}
-/**
+/*
* Sets the power management mode of the pipe and plane.
*
* This code should probably grow support for turning the cursor off and back
@@ -337,7 +337,7 @@ static void oaktrail_crtc_dpms(struct drm_crtc *crtc, int mode)
gma_power_end(dev);
}
-/**
+/*
* Return the pipe currently connected to the panel fitter,
* or -1 if the panel fitter is not present or not in use
*/
diff --git a/drivers/gpu/drm/gma500/oaktrail_device.c b/drivers/gpu/drm/gma500/oaktrail_device.c
index 8754290b0e23..08cd5f73c868 100644
--- a/drivers/gpu/drm/gma500/oaktrail_device.c
+++ b/drivers/gpu/drm/gma500/oaktrail_device.c
@@ -504,9 +504,10 @@ static const struct psb_offset oaktrail_regmap[2] = {
static int oaktrail_chip_setup(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = dev->dev_private;
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
int ret;
-
- if (pci_enable_msi(dev->pdev))
+
+ if (pci_enable_msi(pdev))
dev_warn(dev->dev, "Enabling MSI failed!\n");
dev_priv->regmap = oaktrail_regmap;
diff --git a/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c b/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c
index e28107061148..fc9a34ed58bd 100644
--- a/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c
+++ b/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c
@@ -279,11 +279,8 @@ int oaktrail_hdmi_i2c_init(struct pci_dev *dev)
hdmi_dev = pci_get_drvdata(dev);
i2c_dev = kzalloc(sizeof(struct hdmi_i2c_dev), GFP_KERNEL);
- if (i2c_dev == NULL) {
- DRM_ERROR("Can't allocate interface\n");
- ret = -ENOMEM;
- goto exit;
- }
+ if (!i2c_dev)
+ return -ENOMEM;
i2c_dev->adap = &oaktrail_hdmi_i2c_adapter;
i2c_dev->status = I2C_STAT_INIT;
@@ -300,16 +297,23 @@ int oaktrail_hdmi_i2c_init(struct pci_dev *dev)
oaktrail_hdmi_i2c_adapter.name, hdmi_dev);
if (ret) {
DRM_ERROR("Failed to request IRQ for I2C controller\n");
- goto err;
+ goto free_dev;
}
/* Adapter registration */
ret = i2c_add_numbered_adapter(&oaktrail_hdmi_i2c_adapter);
- return ret;
+ if (ret) {
+ DRM_ERROR("Failed to add I2C adapter\n");
+ goto free_irq;
+ }
-err:
+ return 0;
+
+free_irq:
+ free_irq(dev->irq, hdmi_dev);
+free_dev:
kfree(i2c_dev);
-exit:
+
return ret;
}
diff --git a/drivers/gpu/drm/gma500/oaktrail_lvds.c b/drivers/gpu/drm/gma500/oaktrail_lvds.c
index 2828360153d1..432bdcc57ac9 100644
--- a/drivers/gpu/drm/gma500/oaktrail_lvds.c
+++ b/drivers/gpu/drm/gma500/oaktrail_lvds.c
@@ -29,7 +29,7 @@
#define MRST_BLC_MAX_PWM_REG_FREQ 0xFFFF
#define BRIGHTNESS_MAX_LEVEL 100
-/**
+/*
* Sets the power state for the panel.
*/
static void oaktrail_lvds_set_power(struct drm_device *dev,
@@ -60,7 +60,7 @@ static void oaktrail_lvds_set_power(struct drm_device *dev,
pp_status = REG_READ(PP_STATUS);
} while (pp_status & PP_ON);
dev_priv->is_lvds_on = false;
- pm_request_idle(&dev->pdev->dev);
+ pm_request_idle(dev->dev);
}
gma_power_end(dev);
}
@@ -282,6 +282,7 @@ static void oaktrail_lvds_get_configuration_mode(struct drm_device *dev,
/**
* oaktrail_lvds_init - setup LVDS connectors on this device
* @dev: drm device
+ * @mode_dev: PSB mode device
*
* Create the connector, register the LVDS DDC bus, and try to figure out what
* modes we can display on the LVDS panel (if present).
diff --git a/drivers/gpu/drm/gma500/oaktrail_lvds_i2c.c b/drivers/gpu/drm/gma500/oaktrail_lvds_i2c.c
index baaf8212e01d..1d2dd6ea1c71 100644
--- a/drivers/gpu/drm/gma500/oaktrail_lvds_i2c.c
+++ b/drivers/gpu/drm/gma500/oaktrail_lvds_i2c.c
@@ -66,12 +66,12 @@
static int get_clock(void *data)
{
struct psb_intel_i2c_chan *chan = data;
- u32 val, tmp;
+ u32 val;
val = LPC_READ_REG(chan, RGIO);
val |= GPIO_CLOCK;
LPC_WRITE_REG(chan, RGIO, val);
- tmp = LPC_READ_REG(chan, RGLVL);
+ LPC_READ_REG(chan, RGLVL);
val = (LPC_READ_REG(chan, RGLVL) & GPIO_CLOCK) ? 1 : 0;
return val;
@@ -80,12 +80,12 @@ static int get_clock(void *data)
static int get_data(void *data)
{
struct psb_intel_i2c_chan *chan = data;
- u32 val, tmp;
+ u32 val;
val = LPC_READ_REG(chan, RGIO);
val |= GPIO_DATA;
LPC_WRITE_REG(chan, RGIO, val);
- tmp = LPC_READ_REG(chan, RGLVL);
+ LPC_READ_REG(chan, RGLVL);
val = (LPC_READ_REG(chan, RGLVL) & GPIO_DATA) ? 1 : 0;
return val;
@@ -145,7 +145,7 @@ void oaktrail_lvds_i2c_init(struct drm_encoder *encoder)
strncpy(chan->adapter.name, "gma500 LPC", I2C_NAME_SIZE - 1);
chan->adapter.owner = THIS_MODULE;
chan->adapter.algo_data = &chan->algo;
- chan->adapter.dev.parent = &dev->pdev->dev;
+ chan->adapter.dev.parent = dev->dev;
chan->algo.setsda = set_data;
chan->algo.setscl = set_clock;
chan->algo.getsda = get_data;
diff --git a/drivers/gpu/drm/gma500/opregion.c b/drivers/gpu/drm/gma500/opregion.c
index eab6d889bde9..a1ffc6a1c255 100644
--- a/drivers/gpu/drm/gma500/opregion.c
+++ b/drivers/gpu/drm/gma500/opregion.c
@@ -305,12 +305,13 @@ void psb_intel_opregion_fini(struct drm_device *dev)
int psb_intel_opregion_setup(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = dev->dev_private;
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
struct psb_intel_opregion *opregion = &dev_priv->opregion;
u32 opregion_phy, mboxes;
void __iomem *base;
int err = 0;
- pci_read_config_dword(dev->pdev, PCI_ASLS, &opregion_phy);
+ pci_read_config_dword(pdev, PCI_ASLS, &opregion_phy);
if (opregion_phy == 0) {
DRM_DEBUG_DRIVER("ACPI Opregion not supported\n");
return -ENOTSUPP;
diff --git a/drivers/gpu/drm/gma500/power.c b/drivers/gpu/drm/gma500/power.c
index bea8578846d1..56ef88237ef6 100644
--- a/drivers/gpu/drm/gma500/power.c
+++ b/drivers/gpu/drm/gma500/power.c
@@ -70,8 +70,8 @@ void gma_power_init(struct drm_device *dev)
*/
void gma_power_uninit(struct drm_device *dev)
{
- pm_runtime_disable(&dev->pdev->dev);
- pm_runtime_set_suspended(&dev->pdev->dev);
+ pm_runtime_disable(dev->dev);
+ pm_runtime_set_suspended(dev->dev);
}
/**
@@ -93,6 +93,7 @@ static void gma_suspend_display(struct drm_device *dev)
/**
* gma_resume_display - resume display side logic
+ * @pdev: PCI device
*
* Resume the display hardware restoring state and enabling
* as necessary.
@@ -146,7 +147,7 @@ static void gma_suspend_pci(struct pci_dev *pdev)
/**
* gma_resume_pci - resume helper
- * @dev: our PCI device
+ * @pdev: our PCI device
*
* Perform the resume processing on our PCI device state - rewrite
* register state and re-enable the PCI device
@@ -178,8 +179,7 @@ static bool gma_resume_pci(struct pci_dev *pdev)
/**
* gma_power_suspend - bus callback for suspend
- * @pdev: our PCI device
- * @state: suspend type
+ * @_dev: our device
*
* Called back by the PCI layer during a suspend of the system. We
* perform the necessary shut down steps and save enough state that
@@ -208,7 +208,7 @@ int gma_power_suspend(struct device *_dev)
/**
* gma_power_resume - resume power
- * @pdev: PCI device
+ * @_dev: our device
*
* Resume the PCI side of the graphics and then the displays
*/
@@ -249,6 +249,7 @@ bool gma_power_is_on(struct drm_device *dev)
bool gma_power_begin(struct drm_device *dev, bool force_on)
{
struct drm_psb_private *dev_priv = dev->dev_private;
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
int ret;
unsigned long flags;
@@ -256,7 +257,7 @@ bool gma_power_begin(struct drm_device *dev, bool force_on)
/* Power already on ? */
if (dev_priv->display_power) {
dev_priv->display_count++;
- pm_runtime_get(&dev->pdev->dev);
+ pm_runtime_get(dev->dev);
spin_unlock_irqrestore(&power_ctrl_lock, flags);
return true;
}
@@ -264,11 +265,11 @@ bool gma_power_begin(struct drm_device *dev, bool force_on)
goto out_false;
/* Ok power up needed */
- ret = gma_resume_pci(dev->pdev);
+ ret = gma_resume_pci(pdev);
if (ret == 0) {
psb_irq_preinstall(dev);
psb_irq_postinstall(dev);
- pm_runtime_get(&dev->pdev->dev);
+ pm_runtime_get(dev->dev);
dev_priv->display_count++;
spin_unlock_irqrestore(&power_ctrl_lock, flags);
return true;
@@ -293,7 +294,7 @@ void gma_power_end(struct drm_device *dev)
dev_priv->display_count--;
WARN_ON(dev_priv->display_count < 0);
spin_unlock_irqrestore(&power_ctrl_lock, flags);
- pm_runtime_put(&dev->pdev->dev);
+ pm_runtime_put(dev->dev);
}
int psb_runtime_suspend(struct device *dev)
diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c
index cc2d59e8471d..a19f60d40394 100644
--- a/drivers/gpu/drm/gma500/psb_drv.c
+++ b/drivers/gpu/drm/gma500/psb_drv.c
@@ -208,6 +208,7 @@ static void psb_driver_unload(struct drm_device *dev)
static int psb_driver_load(struct drm_device *dev, unsigned long flags)
{
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
struct drm_psb_private *dev_priv;
unsigned long resource_start, resource_len;
unsigned long irqflags;
@@ -227,11 +228,11 @@ static int psb_driver_load(struct drm_device *dev, unsigned long flags)
pg = &dev_priv->gtt;
- pci_set_master(dev->pdev);
+ pci_set_master(pdev);
dev_priv->num_pipe = dev_priv->ops->pipes;
- resource_start = pci_resource_start(dev->pdev, PSB_MMIO_RESOURCE);
+ resource_start = pci_resource_start(pdev, PSB_MMIO_RESOURCE);
dev_priv->vdc_reg =
ioremap(resource_start + PSB_VDC_OFFSET, PSB_VDC_SIZE);
@@ -244,7 +245,7 @@ static int psb_driver_load(struct drm_device *dev, unsigned long flags)
goto out_err;
if (IS_MRST(dev)) {
- int domain = pci_domain_nr(dev->pdev->bus);
+ int domain = pci_domain_nr(pdev->bus);
dev_priv->aux_pdev =
pci_get_domain_bus_and_slot(domain, 0,
@@ -312,6 +313,8 @@ static int psb_driver_load(struct drm_device *dev, unsigned long flags)
if (ret)
goto out_err;
+ ret = -ENOMEM;
+
dev_priv->mmu = psb_mmu_driver_init(dev, 1, 0, 0);
if (!dev_priv->mmu)
goto out_err;
@@ -359,7 +362,7 @@ static int psb_driver_load(struct drm_device *dev, unsigned long flags)
PSB_WVDC32(0xFFFFFFFF, PSB_INT_MASK_R);
spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
- drm_irq_install(dev, dev->pdev->irq);
+ drm_irq_install(dev, pdev->irq);
dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
@@ -385,8 +388,8 @@ static int psb_driver_load(struct drm_device *dev, unsigned long flags)
psb_intel_opregion_enable_asle(dev);
#if 0
/* Enable runtime pm at last */
- pm_runtime_enable(&dev->pdev->dev);
- pm_runtime_set_active(&dev->pdev->dev);
+ pm_runtime_enable(dev->dev);
+ pm_runtime_set_active(dev->dev);
#endif
/* Intel drm driver load is done, continue doing pvr load */
return 0;
@@ -415,7 +418,7 @@ static long psb_unlocked_ioctl(struct file *filp, unsigned int cmd,
if (runtime_allowed == 1 && dev_priv->is_lvds_on) {
runtime_allowed++;
- pm_runtime_allow(&dev->pdev->dev);
+ pm_runtime_allow(dev->dev);
dev_priv->rpm_enabled = 1;
}
return drm_ioctl(filp, cmd, arg);
@@ -437,7 +440,6 @@ static int psb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_pci_disable_device;
}
- dev->pdev = pdev;
pci_set_drvdata(pdev, dev);
ret = psb_driver_load(dev, ent->driver_data);
diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h
index 5b7f7a312d53..d303f8271f7e 100644
--- a/drivers/gpu/drm/gma500/psb_drv.h
+++ b/drivers/gpu/drm/gma500/psb_drv.h
@@ -43,10 +43,10 @@ enum {
CHIP_MFLD_0130 = 3, /* Medfield */
};
-#define IS_PSB(dev) (((dev)->pdev->device & 0xfffe) == 0x8108)
-#define IS_MRST(dev) (((dev)->pdev->device & 0xfff0) == 0x4100)
-#define IS_MFLD(dev) (((dev)->pdev->device & 0xfff8) == 0x0130)
-#define IS_CDV(dev) (((dev)->pdev->device & 0xfff0) == 0x0be0)
+#define IS_PSB(drm) ((to_pci_dev((drm)->dev)->device & 0xfffe) == 0x8108)
+#define IS_MRST(drm) ((to_pci_dev((drm)->dev)->device & 0xfff0) == 0x4100)
+#define IS_MFLD(drm) ((to_pci_dev((drm)->dev)->device & 0xfff8) == 0x0130)
+#define IS_CDV(drm) ((to_pci_dev((drm)->dev)->device & 0xfff0) == 0x0be0)
/* Hardware offsets */
#define PSB_VDC_OFFSET 0x00000000
diff --git a/drivers/gpu/drm/gma500/psb_intel_display.c b/drivers/gpu/drm/gma500/psb_intel_display.c
index 531c5485be17..9c3cb1b80bbd 100644
--- a/drivers/gpu/drm/gma500/psb_intel_display.c
+++ b/drivers/gpu/drm/gma500/psb_intel_display.c
@@ -71,7 +71,7 @@ static void psb_intel_clock(int refclk, struct gma_clock_t *clock)
clock->dot = clock->vco / clock->p;
}
-/**
+/*
* Return the pipe currently connected to the panel fitter,
* or -1 if the panel fitter is not present or not in use
*/
diff --git a/drivers/gpu/drm/gma500/psb_intel_lvds.c b/drivers/gpu/drm/gma500/psb_intel_lvds.c
index 063c66bb946d..ace95d4bdb6f 100644
--- a/drivers/gpu/drm/gma500/psb_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/psb_intel_lvds.c
@@ -216,7 +216,7 @@ static void psb_intel_lvds_set_power(struct drm_device *dev, bool on)
dev_err(dev->dev, "set power, chip off!\n");
return;
}
-
+
if (on) {
REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) |
POWER_TARGET_ON);
@@ -626,6 +626,7 @@ const struct drm_connector_funcs psb_intel_lvds_connector_funcs = {
/**
* psb_intel_lvds_init - setup LVDS connectors on this device
* @dev: drm device
+ * @mode_dev: mode device
*
* Create the connector, register the LVDS DDC bus, and try to figure out what
* modes we can display on the LVDS panel (if present).
@@ -700,7 +701,7 @@ void psb_intel_lvds_init(struct drm_device *dev,
lvds_priv->i2c_bus = psb_intel_i2c_create(dev, GPIOB, "LVDSBLC_B");
if (!lvds_priv->i2c_bus) {
dev_printk(KERN_ERR,
- &dev->pdev->dev, "I2C bus registration failed.\n");
+ dev->dev, "I2C bus registration failed.\n");
goto failed_blc_i2c;
}
lvds_priv->i2c_bus->slave_addr = 0x2C;
@@ -719,7 +720,7 @@ void psb_intel_lvds_init(struct drm_device *dev,
/* Set up the DDC bus. */
lvds_priv->ddc_bus = psb_intel_i2c_create(dev, GPIOC, "LVDSDDC_C");
if (!lvds_priv->ddc_bus) {
- dev_printk(KERN_ERR, &dev->pdev->dev,
+ dev_printk(KERN_ERR, dev->dev,
"DDC bus registration " "failed.\n");
goto failed_ddc;
}
diff --git a/drivers/gpu/drm/gma500/psb_intel_modes.c b/drivers/gpu/drm/gma500/psb_intel_modes.c
index 88653a40aeb5..60306780e16c 100644
--- a/drivers/gpu/drm/gma500/psb_intel_modes.c
+++ b/drivers/gpu/drm/gma500/psb_intel_modes.c
@@ -11,7 +11,7 @@
/**
* psb_intel_ddc_probe
- *
+ * @adapter: Associated I2C adaptor
*/
bool psb_intel_ddc_probe(struct i2c_adapter *adapter)
{
@@ -43,6 +43,7 @@ bool psb_intel_ddc_probe(struct i2c_adapter *adapter)
/**
* psb_intel_ddc_get_modes - get modelist from monitor
* @connector: DRM connector device to use
+ * @adapter: Associated I2C adaptor
*
* Fetch the EDID information from @connector using the DDC bus.
*/
diff --git a/drivers/gpu/drm/gma500/psb_intel_sdvo.c b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
index 907f966d6f22..355da2856389 100644
--- a/drivers/gpu/drm/gma500/psb_intel_sdvo.c
+++ b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
@@ -221,7 +221,7 @@ static bool
psb_intel_sdvo_create_enhance_property(struct psb_intel_sdvo *psb_intel_sdvo,
struct psb_intel_sdvo_connector *psb_intel_sdvo_connector);
-/**
+/*
* Writes the SDVOB or SDVOC with the given value, but always writes both
* SDVOB and SDVOC to work around apparent hardware issues (according to
* comments in the BIOS).
@@ -588,7 +588,7 @@ static bool psb_intel_sdvo_set_target_input(struct psb_intel_sdvo *psb_intel_sdv
&targets, sizeof(targets));
}
-/**
+/*
* Return whether each input is trained.
*
* This function is making an assumption about the layout of the response,
@@ -1818,7 +1818,7 @@ psb_intel_sdvo_guess_ddc_bus(struct psb_intel_sdvo *sdvo)
#endif
}
-/**
+/*
* Choose the appropriate DDC bus for control bus switch command for this
* SDVO output based on the controlled output.
*
@@ -2406,7 +2406,7 @@ psb_intel_sdvo_init_ddc_proxy(struct psb_intel_sdvo *sdvo,
sdvo->ddc.owner = THIS_MODULE;
sdvo->ddc.class = I2C_CLASS_DDC;
snprintf(sdvo->ddc.name, I2C_NAME_SIZE, "SDVO DDC proxy");
- sdvo->ddc.dev.parent = &dev->pdev->dev;
+ sdvo->ddc.dev.parent = dev->dev;
sdvo->ddc.algo_data = sdvo;
sdvo->ddc.algo = &psb_intel_sdvo_ddc_proxy;
diff --git a/drivers/gpu/drm/gma500/psb_irq.c b/drivers/gpu/drm/gma500/psb_irq.c
index 361e3a0c5ab6..5cceea9be534 100644
--- a/drivers/gpu/drm/gma500/psb_irq.c
+++ b/drivers/gpu/drm/gma500/psb_irq.c
@@ -126,9 +126,8 @@ static void mid_disable_pipe_event(struct drm_psb_private *dev_priv, int pipe)
}
}
-/**
+/*
* Display controller interrupt handler for pipe event.
- *
*/
static void mid_pipe_event_handler(struct drm_device *dev, int pipe)
{
diff --git a/drivers/gpu/drm/gma500/tc35876x-dsi-lvds.c b/drivers/gpu/drm/gma500/tc35876x-dsi-lvds.c
index e5bdd99ad453..99d2ffc2fed9 100644
--- a/drivers/gpu/drm/gma500/tc35876x-dsi-lvds.c
+++ b/drivers/gpu/drm/gma500/tc35876x-dsi-lvds.c
@@ -454,13 +454,13 @@ static void tc35876x_brightness_init(struct drm_device *dev)
ret = intel_scu_ipc_ioread8(GPIOPWMCTRL, &pwmctrl);
if (ret || pwmctrl != 0x01) {
if (ret)
- dev_err(&dev->pdev->dev, "GPIOPWMCTRL read failed\n");
+ dev_err(dev->dev, "GPIOPWMCTRL read failed\n");
else
- dev_warn(&dev->pdev->dev, "GPIOPWMCTRL was not set to system clock (pwmctrl = 0x%02x)\n", pwmctrl);
+ dev_warn(dev->dev, "GPIOPWMCTRL was not set to system clock (pwmctrl = 0x%02x)\n", pwmctrl);
ret = intel_scu_ipc_iowrite8(GPIOPWMCTRL, 0x01);
if (ret)
- dev_err(&dev->pdev->dev, "GPIOPWMCTRL set failed\n");
+ dev_err(dev->dev, "GPIOPWMCTRL set failed\n");
}
clkdiv = calc_clkdiv(SYSTEMCLK, PWM_FREQUENCY);
@@ -470,9 +470,9 @@ static void tc35876x_brightness_init(struct drm_device *dev)
ret = intel_scu_ipc_iowrite8(PWM0CLKDIV0, clkdiv & 0xff);
if (ret)
- dev_err(&dev->pdev->dev, "PWM0CLKDIV set failed\n");
+ dev_err(dev->dev, "PWM0CLKDIV set failed\n");
else
- dev_dbg(&dev->pdev->dev, "PWM0CLKDIV set to 0x%04x (%d Hz)\n",
+ dev_dbg(dev->dev, "PWM0CLKDIV set to 0x%04x (%d Hz)\n",
clkdiv, PWM_FREQUENCY);
}
@@ -575,7 +575,7 @@ static struct drm_display_mode *tc35876x_get_config_mode(struct drm_device *dev)
{
struct drm_display_mode *mode;
- dev_dbg(&dev->pdev->dev, "%s\n", __func__);
+ dev_dbg(dev->dev, "%s\n", __func__);
mode = kzalloc(sizeof(*mode), GFP_KERNEL);
if (!mode)
@@ -592,15 +592,15 @@ static struct drm_display_mode *tc35876x_get_config_mode(struct drm_device *dev)
mode->vtotal = 838;
mode->clock = 33324 << 1;
- dev_info(&dev->pdev->dev, "hdisplay(w) = %d\n", mode->hdisplay);
- dev_info(&dev->pdev->dev, "vdisplay(h) = %d\n", mode->vdisplay);
- dev_info(&dev->pdev->dev, "HSS = %d\n", mode->hsync_start);
- dev_info(&dev->pdev->dev, "HSE = %d\n", mode->hsync_end);
- dev_info(&dev->pdev->dev, "htotal = %d\n", mode->htotal);
- dev_info(&dev->pdev->dev, "VSS = %d\n", mode->vsync_start);
- dev_info(&dev->pdev->dev, "VSE = %d\n", mode->vsync_end);
- dev_info(&dev->pdev->dev, "vtotal = %d\n", mode->vtotal);
- dev_info(&dev->pdev->dev, "clock = %d\n", mode->clock);
+ dev_info(dev->dev, "hdisplay(w) = %d\n", mode->hdisplay);
+ dev_info(dev->dev, "vdisplay(h) = %d\n", mode->vdisplay);
+ dev_info(dev->dev, "HSS = %d\n", mode->hsync_start);
+ dev_info(dev->dev, "HSE = %d\n", mode->hsync_end);
+ dev_info(dev->dev, "htotal = %d\n", mode->htotal);
+ dev_info(dev->dev, "VSS = %d\n", mode->vsync_start);
+ dev_info(dev->dev, "VSE = %d\n", mode->vsync_end);
+ dev_info(dev->dev, "vtotal = %d\n", mode->vtotal);
+ dev_info(dev->dev, "clock = %d\n", mode->clock);
drm_mode_set_name(mode);
drm_mode_set_crtcinfo(mode, 0);
@@ -775,19 +775,19 @@ void tc35876x_init(struct drm_device *dev)
{
int r;
- dev_dbg(&dev->pdev->dev, "%s\n", __func__);
+ dev_dbg(dev->dev, "%s\n", __func__);
cmi_lcd_hack_create_device();
r = i2c_add_driver(&cmi_lcd_i2c_driver);
if (r < 0)
- dev_err(&dev->pdev->dev,
+ dev_err(dev->dev,
"%s: i2c_add_driver() for %s failed (%d)\n",
__func__, cmi_lcd_i2c_driver.driver.name, r);
r = i2c_add_driver(&tc35876x_bridge_i2c_driver);
if (r < 0)
- dev_err(&dev->pdev->dev,
+ dev_err(dev->dev,
"%s: i2c_add_driver() for %s failed (%d)\n",
__func__, tc35876x_bridge_i2c_driver.driver.name, r);
diff --git a/drivers/gpu/drm/hisilicon/hibmc/Makefile b/drivers/gpu/drm/hisilicon/hibmc/Makefile
index 684ef794eb7c..d25c75e60d3d 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/Makefile
+++ b/drivers/gpu/drm/hisilicon/hibmc/Makefile
@@ -1,4 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
-hibmc-drm-y := hibmc_drm_drv.o hibmc_drm_de.o hibmc_drm_vdac.o hibmc_ttm.o hibmc_drm_i2c.o
+hibmc-drm-y := hibmc_drm_drv.o hibmc_drm_de.o hibmc_drm_vdac.o hibmc_drm_i2c.o
obj-$(CONFIG_DRM_HISI_HIBMC) += hibmc-drm.o
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c
index ea962acfeae0..096eea985b6f 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c
@@ -499,7 +499,7 @@ static const struct drm_crtc_helper_funcs hibmc_crtc_helper_funcs = {
int hibmc_de_init(struct hibmc_drm_private *priv)
{
- struct drm_device *dev = priv->dev;
+ struct drm_device *dev = &priv->dev;
struct drm_crtc *crtc = &priv->crtc;
struct drm_plane *plane = &priv->primary_plane;
int ret;
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
index d845657fd99c..abd6250d5a14 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
@@ -16,6 +16,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_gem_vram_helper.h>
#include <drm/drm_irq.h>
#include <drm/drm_managed.h>
@@ -43,6 +44,12 @@ static irqreturn_t hibmc_drm_interrupt(int irq, void *arg)
return IRQ_HANDLED;
}
+static int hibmc_dumb_create(struct drm_file *file, struct drm_device *dev,
+ struct drm_mode_create_dumb *args)
+{
+ return drm_gem_vram_fill_create_dumb(file, dev, 0, 128, args);
+}
+
static const struct drm_driver hibmc_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.fops = &hibmc_fops,
@@ -77,47 +84,48 @@ static const struct dev_pm_ops hibmc_pm_ops = {
hibmc_pm_resume)
};
+static const struct drm_mode_config_funcs hibmc_mode_funcs = {
+ .mode_valid = drm_vram_helper_mode_valid,
+ .atomic_check = drm_atomic_helper_check,
+ .atomic_commit = drm_atomic_helper_commit,
+ .fb_create = drm_gem_fb_create,
+};
+
static int hibmc_kms_init(struct hibmc_drm_private *priv)
{
+ struct drm_device *dev = &priv->dev;
int ret;
- drm_mode_config_init(priv->dev);
- priv->mode_config_initialized = true;
+ ret = drmm_mode_config_init(dev);
+ if (ret)
+ return ret;
- priv->dev->mode_config.min_width = 0;
- priv->dev->mode_config.min_height = 0;
- priv->dev->mode_config.max_width = 1920;
- priv->dev->mode_config.max_height = 1200;
+ dev->mode_config.min_width = 0;
+ dev->mode_config.min_height = 0;
+ dev->mode_config.max_width = 1920;
+ dev->mode_config.max_height = 1200;
- priv->dev->mode_config.fb_base = priv->fb_base;
- priv->dev->mode_config.preferred_depth = 32;
- priv->dev->mode_config.prefer_shadow = 1;
+ dev->mode_config.fb_base = priv->fb_base;
+ dev->mode_config.preferred_depth = 32;
+ dev->mode_config.prefer_shadow = 1;
- priv->dev->mode_config.funcs = (void *)&hibmc_mode_funcs;
+ dev->mode_config.funcs = (void *)&hibmc_mode_funcs;
ret = hibmc_de_init(priv);
if (ret) {
- drm_err(priv->dev, "failed to init de: %d\n", ret);
+ drm_err(dev, "failed to init de: %d\n", ret);
return ret;
}
ret = hibmc_vdac_init(priv);
if (ret) {
- drm_err(priv->dev, "failed to init vdac: %d\n", ret);
+ drm_err(dev, "failed to init vdac: %d\n", ret);
return ret;
}
return 0;
}
-static void hibmc_kms_fini(struct hibmc_drm_private *priv)
-{
- if (priv->mode_config_initialized) {
- drm_mode_config_cleanup(priv->dev);
- priv->mode_config_initialized = false;
- }
-}
-
/*
* It can operate in one of three modes: 0, 1 or Sleep.
*/
@@ -202,8 +210,8 @@ static void hibmc_hw_config(struct hibmc_drm_private *priv)
static int hibmc_hw_map(struct hibmc_drm_private *priv)
{
- struct drm_device *dev = priv->dev;
- struct pci_dev *pdev = dev->pdev;
+ struct drm_device *dev = &priv->dev;
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
resource_size_t addr, size, ioaddr, iosize;
ioaddr = pci_resource_start(pdev, 1);
@@ -242,40 +250,31 @@ static int hibmc_hw_init(struct hibmc_drm_private *priv)
static int hibmc_unload(struct drm_device *dev)
{
- struct hibmc_drm_private *priv = to_hibmc_drm_private(dev);
-
drm_atomic_helper_shutdown(dev);
if (dev->irq_enabled)
drm_irq_uninstall(dev);
- pci_disable_msi(dev->pdev);
- hibmc_kms_fini(priv);
- hibmc_mm_fini(priv);
- dev->dev_private = NULL;
+ pci_disable_msi(to_pci_dev(dev->dev));
+
return 0;
}
static int hibmc_load(struct drm_device *dev)
{
- struct hibmc_drm_private *priv;
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
+ struct hibmc_drm_private *priv = to_hibmc_drm_private(dev);
int ret;
- priv = drmm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
- if (!priv) {
- drm_err(dev, "no memory to allocate for hibmc_drm_private\n");
- return -ENOMEM;
- }
- dev->dev_private = priv;
- priv->dev = dev;
-
ret = hibmc_hw_init(priv);
if (ret)
goto err;
- ret = hibmc_mm_init(priv);
- if (ret)
+ ret = drmm_vram_helper_init(dev, pci_resource_start(pdev, 0), priv->fb_size);
+ if (ret) {
+ drm_err(dev, "Error initializing VRAM MM; %d\n", ret);
goto err;
+ }
ret = hibmc_kms_init(priv);
if (ret)
@@ -287,11 +286,11 @@ static int hibmc_load(struct drm_device *dev)
goto err;
}
- ret = pci_enable_msi(dev->pdev);
+ ret = pci_enable_msi(pdev);
if (ret) {
drm_warn(dev, "enabling MSI failed: %d\n", ret);
} else {
- ret = drm_irq_install(dev, dev->pdev->irq);
+ ret = drm_irq_install(dev, pdev->irq);
if (ret)
drm_warn(dev, "install irq failed: %d\n", ret);
}
@@ -310,6 +309,7 @@ err:
static int hibmc_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
+ struct hibmc_drm_private *priv;
struct drm_device *dev;
int ret;
@@ -318,25 +318,26 @@ static int hibmc_pci_probe(struct pci_dev *pdev,
if (ret)
return ret;
- dev = drm_dev_alloc(&hibmc_driver, &pdev->dev);
- if (IS_ERR(dev)) {
+ priv = devm_drm_dev_alloc(&pdev->dev, &hibmc_driver,
+ struct hibmc_drm_private, dev);
+ if (IS_ERR(priv)) {
DRM_ERROR("failed to allocate drm_device\n");
- return PTR_ERR(dev);
+ return PTR_ERR(priv);
}
- dev->pdev = pdev;
+ dev = &priv->dev;
pci_set_drvdata(pdev, dev);
- ret = pci_enable_device(pdev);
+ ret = pcim_enable_device(pdev);
if (ret) {
drm_err(dev, "failed to enable pci device: %d\n", ret);
- goto err_free;
+ goto err_return;
}
ret = hibmc_load(dev);
if (ret) {
drm_err(dev, "failed to load hibmc: %d\n", ret);
- goto err_disable;
+ goto err_return;
}
ret = drm_dev_register(dev, 0);
@@ -352,11 +353,7 @@ static int hibmc_pci_probe(struct pci_dev *pdev,
err_unload:
hibmc_unload(dev);
-err_disable:
- pci_disable_device(pdev);
-err_free:
- drm_dev_put(dev);
-
+err_return:
return ret;
}
@@ -366,7 +363,6 @@ static void hibmc_pci_remove(struct pci_dev *pdev)
drm_dev_unregister(dev);
hibmc_unload(dev);
- drm_dev_put(dev);
}
static const struct pci_device_id hibmc_pci_table[] = {
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h
index f310a83d9c48..7d263f4d7078 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h
@@ -37,12 +37,11 @@ struct hibmc_drm_private {
resource_size_t fb_size;
/* drm */
- struct drm_device *dev;
+ struct drm_device dev;
struct drm_plane primary_plane;
struct drm_crtc crtc;
struct drm_encoder encoder;
struct hibmc_connector connector;
- bool mode_config_initialized;
};
static inline struct hibmc_connector *to_hibmc_connector(struct drm_connector *connector)
@@ -52,7 +51,7 @@ static inline struct hibmc_connector *to_hibmc_connector(struct drm_connector *c
static inline struct hibmc_drm_private *to_hibmc_drm_private(struct drm_device *dev)
{
- return dev->dev_private;
+ return container_of(dev, struct hibmc_drm_private, dev);
}
void hibmc_set_power_mode(struct hibmc_drm_private *priv,
@@ -64,11 +63,6 @@ int hibmc_de_init(struct hibmc_drm_private *priv);
int hibmc_vdac_init(struct hibmc_drm_private *priv);
int hibmc_mm_init(struct hibmc_drm_private *hibmc);
-void hibmc_mm_fini(struct hibmc_drm_private *hibmc);
-int hibmc_dumb_create(struct drm_file *file, struct drm_device *dev,
- struct drm_mode_create_dumb *args);
int hibmc_ddc_create(struct drm_device *drm_dev, struct hibmc_connector *connector);
-extern const struct drm_mode_config_funcs hibmc_mode_funcs;
-
#endif
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_i2c.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_i2c.c
index 86d712090d87..410bd019bb35 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_i2c.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_i2c.c
@@ -83,7 +83,7 @@ int hibmc_ddc_create(struct drm_device *drm_dev,
connector->adapter.owner = THIS_MODULE;
connector->adapter.class = I2C_CLASS_DDC;
snprintf(connector->adapter.name, I2C_NAME_SIZE, "HIS i2c bit bus");
- connector->adapter.dev.parent = &drm_dev->pdev->dev;
+ connector->adapter.dev.parent = drm_dev->dev;
i2c_set_adapdata(&connector->adapter, connector);
connector->adapter.algo_data = &connector->bit_data;
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c
index 74e26c27d878..c228091fb0e6 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c
@@ -14,6 +14,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_print.h>
+#include <drm/drm_simple_kms_helper.h>
#include "hibmc_drm_drv.h"
#include "hibmc_drm_regs.h"
@@ -42,12 +43,6 @@ out:
return count;
}
-static enum drm_mode_status hibmc_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
-{
- return MODE_OK;
-}
-
static void hibmc_connector_destroy(struct drm_connector *connector)
{
struct hibmc_connector *hibmc_connector = to_hibmc_connector(connector);
@@ -59,7 +54,6 @@ static void hibmc_connector_destroy(struct drm_connector *connector)
static const struct drm_connector_helper_funcs
hibmc_connector_helper_funcs = {
.get_modes = hibmc_connector_get_modes,
- .mode_valid = hibmc_connector_mode_valid,
};
static const struct drm_connector_funcs hibmc_connector_funcs = {
@@ -90,15 +84,12 @@ static const struct drm_encoder_helper_funcs hibmc_encoder_helper_funcs = {
.mode_set = hibmc_encoder_mode_set,
};
-static const struct drm_encoder_funcs hibmc_encoder_funcs = {
- .destroy = drm_encoder_cleanup,
-};
-
int hibmc_vdac_init(struct hibmc_drm_private *priv)
{
- struct drm_device *dev = priv->dev;
+ struct drm_device *dev = &priv->dev;
struct hibmc_connector *hibmc_connector = &priv->connector;
struct drm_encoder *encoder = &priv->encoder;
+ struct drm_crtc *crtc = &priv->crtc;
struct drm_connector *connector = &hibmc_connector->base;
int ret;
@@ -108,9 +99,8 @@ int hibmc_vdac_init(struct hibmc_drm_private *priv)
return ret;
}
- encoder->possible_crtcs = 0x1;
- ret = drm_encoder_init(dev, encoder, &hibmc_encoder_funcs,
- DRM_MODE_ENCODER_DAC, NULL);
+ encoder->possible_crtcs = drm_crtc_mask(crtc);
+ ret = drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_DAC);
if (ret) {
drm_err(dev, "failed to init encoder: %d\n", ret);
return ret;
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c
deleted file mode 100644
index 602ece11bb4a..000000000000
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c
+++ /dev/null
@@ -1,61 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/* Hisilicon Hibmc SoC drm driver
- *
- * Based on the bochs drm driver.
- *
- * Copyright (c) 2016 Huawei Limited.
- *
- * Author:
- * Rongrong Zou <[email protected]>
- * Rongrong Zou <[email protected]>
- * Jianhua Li <[email protected]>
- */
-
-#include <linux/pci.h>
-
-#include <drm/drm_atomic_helper.h>
-#include <drm/drm_gem.h>
-#include <drm/drm_gem_framebuffer_helper.h>
-#include <drm/drm_gem_vram_helper.h>
-#include <drm/drm_print.h>
-
-#include "hibmc_drm_drv.h"
-
-int hibmc_mm_init(struct hibmc_drm_private *hibmc)
-{
- struct drm_vram_mm *vmm;
- int ret;
- struct drm_device *dev = hibmc->dev;
-
- vmm = drm_vram_helper_alloc_mm(dev,
- pci_resource_start(dev->pdev, 0),
- hibmc->fb_size);
- if (IS_ERR(vmm)) {
- ret = PTR_ERR(vmm);
- drm_err(dev, "Error initializing VRAM MM; %d\n", ret);
- return ret;
- }
-
- return 0;
-}
-
-void hibmc_mm_fini(struct hibmc_drm_private *hibmc)
-{
- if (!hibmc->dev->vram_mm)
- return;
-
- drm_vram_helper_release_mm(hibmc->dev);
-}
-
-int hibmc_dumb_create(struct drm_file *file, struct drm_device *dev,
- struct drm_mode_create_dumb *args)
-{
- return drm_gem_vram_fill_create_dumb(file, dev, 0, 128, args);
-}
-
-const struct drm_mode_config_funcs hibmc_mode_funcs = {
- .mode_valid = drm_vram_helper_mode_valid,
- .atomic_check = drm_atomic_helper_check,
- .atomic_commit = drm_atomic_helper_commit,
- .fb_create = drm_gem_fb_create,
-};
diff --git a/drivers/gpu/drm/i915/Kconfig.debug b/drivers/gpu/drm/i915/Kconfig.debug
index 25cd9788a4d5..be76054c01d8 100644
--- a/drivers/gpu/drm/i915/Kconfig.debug
+++ b/drivers/gpu/drm/i915/Kconfig.debug
@@ -31,10 +31,13 @@ config DRM_I915_DEBUG
select DRM_DEBUG_SELFTEST
select DMABUF_SELFTESTS
select SW_SYNC # signaling validation framework (igt/syncobj*)
+ select DRM_I915_WERROR
+ select DRM_I915_DEBUG_GEM
+ select DRM_I915_DEBUG_GEM_ONCE
+ select DRM_I915_DEBUG_MMIO
+ select DRM_I915_DEBUG_RUNTIME_PM
select DRM_I915_SW_FENCE_DEBUG_OBJECTS
select DRM_I915_SELFTEST
- select DRM_I915_DEBUG_RUNTIME_PM
- select DRM_I915_DEBUG_MMIO
default n
help
Choose this option to turn on extra driver debugging that may affect
@@ -69,6 +72,21 @@ config DRM_I915_DEBUG_GEM
If in doubt, say "N".
+config DRM_I915_DEBUG_GEM_ONCE
+ bool "Make a GEM debug failure fatal"
+ default n
+ depends on DRM_I915_DEBUG_GEM
+ help
+ During development, we often only want the very first failure
+ as that would otherwise be lost in the deluge of subsequent
+ failures. However, more casual testers may not want to trigger
+ a hard BUG_ON and hope that the system remains sufficiently usable
+ to capture a bug report in situ.
+
+ Recommended for driver developers only.
+
+ If in doubt, say "N".
+
config DRM_I915_ERRLOG_GEM
bool "Insert extra logging (very verbose) for common GEM errors"
default n
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index e5574e506a5c..32bd1fdffffe 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -38,6 +38,7 @@ i915-y += i915_drv.o \
i915_config.o \
i915_irq.o \
i915_getparam.o \
+ i915_mitigations.o \
i915_params.o \
i915_pci.o \
i915_scatterlist.o \
@@ -58,6 +59,7 @@ i915-y += i915_drv.o \
# core library code
i915-y += \
+ dma_resv_utils.o \
i915_memcpy.o \
i915_mm.o \
i915_sw_fence.o \
@@ -82,6 +84,7 @@ gt-y += \
gt/gen6_engine_cs.o \
gt/gen6_ppgtt.o \
gt/gen7_renderclear.o \
+ gt/gen8_engine_cs.o \
gt/gen8_ppgtt.o \
gt/intel_breadcrumbs.o \
gt/intel_context.o \
@@ -91,6 +94,7 @@ gt-y += \
gt/intel_engine_heartbeat.o \
gt/intel_engine_pm.o \
gt/intel_engine_user.o \
+ gt/intel_execlists_submission.o \
gt/intel_ggtt.o \
gt/intel_ggtt_fencing.o \
gt/intel_gt.o \
@@ -106,6 +110,7 @@ gt-y += \
gt/intel_mocs.o \
gt/intel_ppgtt.o \
gt/intel_rc6.o \
+ gt/intel_region_lmem.o \
gt/intel_renderstate.o \
gt/intel_reset.o \
gt/intel_ring.o \
@@ -131,6 +136,7 @@ gem-y += \
gem/i915_gem_clflush.o \
gem/i915_gem_client_blt.o \
gem/i915_gem_context.o \
+ gem/i915_gem_create.o \
gem/i915_gem_dmabuf.o \
gem/i915_gem_domain.o \
gem/i915_gem_execbuffer.o \
@@ -166,7 +172,6 @@ i915-y += \
i915_scheduler.o \
i915_trace_points.o \
i915_vma.o \
- intel_region_lmem.o \
intel_wopcm.o
# general-purpose microcontroller (GuC) support
@@ -196,13 +201,17 @@ i915-y += \
display/intel_color.o \
display/intel_combo_phy.o \
display/intel_connector.o \
+ display/intel_crtc.o \
display/intel_csr.o \
+ display/intel_cursor.o \
display/intel_display.o \
display/intel_display_power.o \
display/intel_dpio_phy.o \
+ display/intel_dpll.o \
display/intel_dpll_mgr.o \
display/intel_dsb.o \
display/intel_fbc.o \
+ display/intel_fdi.o \
display/intel_fifo_underrun.o \
display/intel_frontbuffer.o \
display/intel_global_state.o \
@@ -214,7 +223,8 @@ i915-y += \
display/intel_quirks.o \
display/intel_sprite.o \
display/intel_tc.o \
- display/intel_vga.o
+ display/intel_vga.o \
+ display/i9xx_plane.o
i915-$(CONFIG_ACPI) += \
display/intel_acpi.o \
display/intel_opregion.o
@@ -233,6 +243,7 @@ i915-y += \
display/intel_crt.o \
display/intel_ddi.o \
display/intel_dp.o \
+ display/intel_dp_aux.o \
display/intel_dp_aux_backlight.o \
display/intel_dp_hdcp.o \
display/intel_dp_link_training.o \
@@ -246,9 +257,11 @@ i915-y += \
display/intel_lspcon.o \
display/intel_lvds.o \
display/intel_panel.o \
+ display/intel_pps.o \
display/intel_sdvo.o \
display/intel_tv.o \
display/intel_vdsc.o \
+ display/intel_vrr.o \
display/vlv_dsi.o \
display/vlv_dsi_pll.o
@@ -283,15 +296,7 @@ obj-$(CONFIG_DRM_I915_GVT_KVMGT) += gvt/kvmgt.o
# exclude some broken headers from the test coverage
no-header-test := \
- display/intel_vbt_defs.h \
- gvt/execlist.h \
- gvt/fb_decoder.h \
- gvt/gtt.h \
- gvt/gvt.h \
- gvt/interrupt.h \
- gvt/mmio_context.h \
- gvt/mpt.h \
- gvt/scheduler.h
+ display/intel_vbt_defs.h
extra-$(CONFIG_DRM_I915_WERROR) += \
$(patsubst %.h,%.hdrtest, $(filter-out $(no-header-test), \
diff --git a/drivers/gpu/drm/i915/display/i9xx_plane.c b/drivers/gpu/drm/i915/display/i9xx_plane.c
new file mode 100644
index 000000000000..d30374df67f0
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/i9xx_plane.c
@@ -0,0 +1,899 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+#include <linux/kernel.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_plane_helper.h>
+
+#include "intel_atomic.h"
+#include "intel_atomic_plane.h"
+#include "intel_display_types.h"
+#include "intel_sprite.h"
+#include "i9xx_plane.h"
+
+/* Primary plane formats for gen <= 3 */
+static const u32 i8xx_primary_formats[] = {
+ DRM_FORMAT_C8,
+ DRM_FORMAT_XRGB1555,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_XRGB8888,
+};
+
+/* Primary plane formats for ivb (no fp16 due to hw issue) */
+static const u32 ivb_primary_formats[] = {
+ DRM_FORMAT_C8,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_XRGB2101010,
+ DRM_FORMAT_XBGR2101010,
+};
+
+/* Primary plane formats for gen >= 4, except ivb */
+static const u32 i965_primary_formats[] = {
+ DRM_FORMAT_C8,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_XRGB2101010,
+ DRM_FORMAT_XBGR2101010,
+ DRM_FORMAT_XBGR16161616F,
+};
+
+/* Primary plane formats for vlv/chv */
+static const u32 vlv_primary_formats[] = {
+ DRM_FORMAT_C8,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_XRGB2101010,
+ DRM_FORMAT_XBGR2101010,
+ DRM_FORMAT_ARGB2101010,
+ DRM_FORMAT_ABGR2101010,
+ DRM_FORMAT_XBGR16161616F,
+};
+
+static const u64 i9xx_format_modifiers[] = {
+ I915_FORMAT_MOD_X_TILED,
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_INVALID
+};
+
+static bool i8xx_plane_format_mod_supported(struct drm_plane *_plane,
+ u32 format, u64 modifier)
+{
+ switch (modifier) {
+ case DRM_FORMAT_MOD_LINEAR:
+ case I915_FORMAT_MOD_X_TILED:
+ break;
+ default:
+ return false;
+ }
+
+ switch (format) {
+ case DRM_FORMAT_C8:
+ case DRM_FORMAT_RGB565:
+ case DRM_FORMAT_XRGB1555:
+ case DRM_FORMAT_XRGB8888:
+ return modifier == DRM_FORMAT_MOD_LINEAR ||
+ modifier == I915_FORMAT_MOD_X_TILED;
+ default:
+ return false;
+ }
+}
+
+static bool i965_plane_format_mod_supported(struct drm_plane *_plane,
+ u32 format, u64 modifier)
+{
+ switch (modifier) {
+ case DRM_FORMAT_MOD_LINEAR:
+ case I915_FORMAT_MOD_X_TILED:
+ break;
+ default:
+ return false;
+ }
+
+ switch (format) {
+ case DRM_FORMAT_C8:
+ case DRM_FORMAT_RGB565:
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_ARGB8888:
+ case DRM_FORMAT_ABGR8888:
+ case DRM_FORMAT_XRGB2101010:
+ case DRM_FORMAT_XBGR2101010:
+ case DRM_FORMAT_ARGB2101010:
+ case DRM_FORMAT_ABGR2101010:
+ case DRM_FORMAT_XBGR16161616F:
+ return modifier == DRM_FORMAT_MOD_LINEAR ||
+ modifier == I915_FORMAT_MOD_X_TILED;
+ default:
+ return false;
+ }
+}
+
+static bool i9xx_plane_has_fbc(struct drm_i915_private *dev_priv,
+ enum i9xx_plane_id i9xx_plane)
+{
+ if (!HAS_FBC(dev_priv))
+ return false;
+
+ if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
+ return i9xx_plane == PLANE_A; /* tied to pipe A */
+ else if (IS_IVYBRIDGE(dev_priv))
+ return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B ||
+ i9xx_plane == PLANE_C;
+ else if (INTEL_GEN(dev_priv) >= 4)
+ return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B;
+ else
+ return i9xx_plane == PLANE_A;
+}
+
+static bool i9xx_plane_has_windowing(struct intel_plane *plane)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
+
+ if (IS_CHERRYVIEW(dev_priv))
+ return i9xx_plane == PLANE_B;
+ else if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
+ return false;
+ else if (IS_GEN(dev_priv, 4))
+ return i9xx_plane == PLANE_C;
+ else
+ return i9xx_plane == PLANE_B ||
+ i9xx_plane == PLANE_C;
+}
+
+static u32 i9xx_plane_ctl(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ struct drm_i915_private *dev_priv =
+ to_i915(plane_state->uapi.plane->dev);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ unsigned int rotation = plane_state->hw.rotation;
+ u32 dspcntr;
+
+ dspcntr = DISPLAY_PLANE_ENABLE;
+
+ if (IS_G4X(dev_priv) || IS_GEN(dev_priv, 5) ||
+ IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
+ dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
+
+ switch (fb->format->format) {
+ case DRM_FORMAT_C8:
+ dspcntr |= DISPPLANE_8BPP;
+ break;
+ case DRM_FORMAT_XRGB1555:
+ dspcntr |= DISPPLANE_BGRX555;
+ break;
+ case DRM_FORMAT_ARGB1555:
+ dspcntr |= DISPPLANE_BGRA555;
+ break;
+ case DRM_FORMAT_RGB565:
+ dspcntr |= DISPPLANE_BGRX565;
+ break;
+ case DRM_FORMAT_XRGB8888:
+ dspcntr |= DISPPLANE_BGRX888;
+ break;
+ case DRM_FORMAT_XBGR8888:
+ dspcntr |= DISPPLANE_RGBX888;
+ break;
+ case DRM_FORMAT_ARGB8888:
+ dspcntr |= DISPPLANE_BGRA888;
+ break;
+ case DRM_FORMAT_ABGR8888:
+ dspcntr |= DISPPLANE_RGBA888;
+ break;
+ case DRM_FORMAT_XRGB2101010:
+ dspcntr |= DISPPLANE_BGRX101010;
+ break;
+ case DRM_FORMAT_XBGR2101010:
+ dspcntr |= DISPPLANE_RGBX101010;
+ break;
+ case DRM_FORMAT_ARGB2101010:
+ dspcntr |= DISPPLANE_BGRA101010;
+ break;
+ case DRM_FORMAT_ABGR2101010:
+ dspcntr |= DISPPLANE_RGBA101010;
+ break;
+ case DRM_FORMAT_XBGR16161616F:
+ dspcntr |= DISPPLANE_RGBX161616;
+ break;
+ default:
+ MISSING_CASE(fb->format->format);
+ return 0;
+ }
+
+ if (INTEL_GEN(dev_priv) >= 4 &&
+ fb->modifier == I915_FORMAT_MOD_X_TILED)
+ dspcntr |= DISPPLANE_TILED;
+
+ if (rotation & DRM_MODE_ROTATE_180)
+ dspcntr |= DISPPLANE_ROTATE_180;
+
+ if (rotation & DRM_MODE_REFLECT_X)
+ dspcntr |= DISPPLANE_MIRROR;
+
+ return dspcntr;
+}
+
+int i9xx_check_plane_surface(struct intel_plane_state *plane_state)
+{
+ struct drm_i915_private *dev_priv =
+ to_i915(plane_state->uapi.plane->dev);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ int src_x, src_y, src_w;
+ u32 offset;
+ int ret;
+
+ ret = intel_plane_compute_gtt(plane_state);
+ if (ret)
+ return ret;
+
+ if (!plane_state->uapi.visible)
+ return 0;
+
+ src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
+ src_x = plane_state->uapi.src.x1 >> 16;
+ src_y = plane_state->uapi.src.y1 >> 16;
+
+ /* Undocumented hardware limit on i965/g4x/vlv/chv */
+ if (HAS_GMCH(dev_priv) && fb->format->cpp[0] == 8 && src_w > 2048)
+ return -EINVAL;
+
+ intel_add_fb_offsets(&src_x, &src_y, plane_state, 0);
+
+ if (INTEL_GEN(dev_priv) >= 4)
+ offset = intel_plane_compute_aligned_offset(&src_x, &src_y,
+ plane_state, 0);
+ else
+ offset = 0;
+
+ /*
+ * Put the final coordinates back so that the src
+ * coordinate checks will see the right values.
+ */
+ drm_rect_translate_to(&plane_state->uapi.src,
+ src_x << 16, src_y << 16);
+
+ /* HSW/BDW do this automagically in hardware */
+ if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv)) {
+ unsigned int rotation = plane_state->hw.rotation;
+ int src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
+ int src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
+
+ if (rotation & DRM_MODE_ROTATE_180) {
+ src_x += src_w - 1;
+ src_y += src_h - 1;
+ } else if (rotation & DRM_MODE_REFLECT_X) {
+ src_x += src_w - 1;
+ }
+ }
+
+ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
+ drm_WARN_ON(&dev_priv->drm, src_x > 8191 || src_y > 4095);
+ } else if (INTEL_GEN(dev_priv) >= 4 &&
+ fb->modifier == I915_FORMAT_MOD_X_TILED) {
+ drm_WARN_ON(&dev_priv->drm, src_x > 4095 || src_y > 4095);
+ }
+
+ plane_state->color_plane[0].offset = offset;
+ plane_state->color_plane[0].x = src_x;
+ plane_state->color_plane[0].y = src_y;
+
+ return 0;
+}
+
+static int
+i9xx_plane_check(struct intel_crtc_state *crtc_state,
+ struct intel_plane_state *plane_state)
+{
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+ int ret;
+
+ ret = chv_plane_check_rotation(plane_state);
+ if (ret)
+ return ret;
+
+ ret = intel_atomic_plane_check_clipping(plane_state, crtc_state,
+ DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_HELPER_NO_SCALING,
+ i9xx_plane_has_windowing(plane));
+ if (ret)
+ return ret;
+
+ ret = i9xx_check_plane_surface(plane_state);
+ if (ret)
+ return ret;
+
+ if (!plane_state->uapi.visible)
+ return 0;
+
+ ret = intel_plane_check_src_coordinates(plane_state);
+ if (ret)
+ return ret;
+
+ plane_state->ctl = i9xx_plane_ctl(crtc_state, plane_state);
+
+ return 0;
+}
+
+static u32 i9xx_plane_ctl_crtc(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ u32 dspcntr = 0;
+
+ if (crtc_state->gamma_enable)
+ dspcntr |= DISPPLANE_GAMMA_ENABLE;
+
+ if (crtc_state->csc_enable)
+ dspcntr |= DISPPLANE_PIPE_CSC_ENABLE;
+
+ if (INTEL_GEN(dev_priv) < 5)
+ dspcntr |= DISPPLANE_SEL_PIPE(crtc->pipe);
+
+ return dspcntr;
+}
+
+static void i9xx_plane_ratio(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
+ unsigned int *num, unsigned int *den)
+{
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ unsigned int cpp = fb->format->cpp[0];
+
+ /*
+ * g4x bspec says 64bpp pixel rate can't exceed 80%
+ * of cdclk when the sprite plane is enabled on the
+ * same pipe. ilk/snb bspec says 64bpp pixel rate is
+ * never allowed to exceed 80% of cdclk. Let's just go
+ * with the ilk/snb limit always.
+ */
+ if (cpp == 8) {
+ *num = 10;
+ *den = 8;
+ } else {
+ *num = 1;
+ *den = 1;
+ }
+}
+
+static int i9xx_plane_min_cdclk(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ unsigned int pixel_rate;
+ unsigned int num, den;
+
+ /*
+ * Note that crtc_state->pixel_rate accounts for both
+ * horizontal and vertical panel fitter downscaling factors.
+ * Pre-HSW bspec tells us to only consider the horizontal
+ * downscaling factor here. We ignore that and just consider
+ * both for simplicity.
+ */
+ pixel_rate = crtc_state->pixel_rate;
+
+ i9xx_plane_ratio(crtc_state, plane_state, &num, &den);
+
+ /* two pixels per clock with double wide pipe */
+ if (crtc_state->double_wide)
+ den *= 2;
+
+ return DIV_ROUND_UP(pixel_rate * num, den);
+}
+
+static void i9xx_update_plane(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
+ u32 linear_offset;
+ int x = plane_state->color_plane[0].x;
+ int y = plane_state->color_plane[0].y;
+ int crtc_x = plane_state->uapi.dst.x1;
+ int crtc_y = plane_state->uapi.dst.y1;
+ int crtc_w = drm_rect_width(&plane_state->uapi.dst);
+ int crtc_h = drm_rect_height(&plane_state->uapi.dst);
+ unsigned long irqflags;
+ u32 dspaddr_offset;
+ u32 dspcntr;
+
+ dspcntr = plane_state->ctl | i9xx_plane_ctl_crtc(crtc_state);
+
+ linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
+
+ if (INTEL_GEN(dev_priv) >= 4)
+ dspaddr_offset = plane_state->color_plane[0].offset;
+ else
+ dspaddr_offset = linear_offset;
+
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+
+ intel_de_write_fw(dev_priv, DSPSTRIDE(i9xx_plane),
+ plane_state->color_plane[0].stride);
+
+ if (INTEL_GEN(dev_priv) < 4) {
+ /*
+ * PLANE_A doesn't actually have a full window
+ * generator but let's assume we still need to
+ * program whatever is there.
+ */
+ intel_de_write_fw(dev_priv, DSPPOS(i9xx_plane),
+ (crtc_y << 16) | crtc_x);
+ intel_de_write_fw(dev_priv, DSPSIZE(i9xx_plane),
+ ((crtc_h - 1) << 16) | (crtc_w - 1));
+ } else if (IS_CHERRYVIEW(dev_priv) && i9xx_plane == PLANE_B) {
+ intel_de_write_fw(dev_priv, PRIMPOS(i9xx_plane),
+ (crtc_y << 16) | crtc_x);
+ intel_de_write_fw(dev_priv, PRIMSIZE(i9xx_plane),
+ ((crtc_h - 1) << 16) | (crtc_w - 1));
+ intel_de_write_fw(dev_priv, PRIMCNSTALPHA(i9xx_plane), 0);
+ }
+
+ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
+ intel_de_write_fw(dev_priv, DSPOFFSET(i9xx_plane),
+ (y << 16) | x);
+ } else if (INTEL_GEN(dev_priv) >= 4) {
+ intel_de_write_fw(dev_priv, DSPLINOFF(i9xx_plane),
+ linear_offset);
+ intel_de_write_fw(dev_priv, DSPTILEOFF(i9xx_plane),
+ (y << 16) | x);
+ }
+
+ /*
+ * The control register self-arms if the plane was previously
+ * disabled. Try to make the plane enable atomic by writing
+ * the control register just before the surface register.
+ */
+ intel_de_write_fw(dev_priv, DSPCNTR(i9xx_plane), dspcntr);
+ if (INTEL_GEN(dev_priv) >= 4)
+ intel_de_write_fw(dev_priv, DSPSURF(i9xx_plane),
+ intel_plane_ggtt_offset(plane_state) + dspaddr_offset);
+ else
+ intel_de_write_fw(dev_priv, DSPADDR(i9xx_plane),
+ intel_plane_ggtt_offset(plane_state) + dspaddr_offset);
+
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+}
+
+static void i9xx_disable_plane(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
+ unsigned long irqflags;
+ u32 dspcntr;
+
+ /*
+ * DSPCNTR pipe gamma enable on g4x+ and pipe csc
+ * enable on ilk+ affect the pipe bottom color as
+ * well, so we must configure them even if the plane
+ * is disabled.
+ *
+ * On pre-g4x there is no way to gamma correct the
+ * pipe bottom color but we'll keep on doing this
+ * anyway so that the crtc state readout works correctly.
+ */
+ dspcntr = i9xx_plane_ctl_crtc(crtc_state);
+
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+
+ intel_de_write_fw(dev_priv, DSPCNTR(i9xx_plane), dspcntr);
+ if (INTEL_GEN(dev_priv) >= 4)
+ intel_de_write_fw(dev_priv, DSPSURF(i9xx_plane), 0);
+ else
+ intel_de_write_fw(dev_priv, DSPADDR(i9xx_plane), 0);
+
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+}
+
+static void
+g4x_primary_async_flip(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
+ bool async_flip)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ u32 dspcntr = plane_state->ctl | i9xx_plane_ctl_crtc(crtc_state);
+ u32 dspaddr_offset = plane_state->color_plane[0].offset;
+ enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
+ unsigned long irqflags;
+
+ if (async_flip)
+ dspcntr |= DISPPLANE_ASYNC_FLIP;
+
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+ intel_de_write_fw(dev_priv, DSPCNTR(i9xx_plane), dspcntr);
+ intel_de_write_fw(dev_priv, DSPSURF(i9xx_plane),
+ intel_plane_ggtt_offset(plane_state) + dspaddr_offset);
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+}
+
+static void
+vlv_primary_async_flip(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
+ bool async_flip)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ u32 dspaddr_offset = plane_state->color_plane[0].offset;
+ enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+ intel_de_write_fw(dev_priv, DSPADDR_VLV(i9xx_plane),
+ intel_plane_ggtt_offset(plane_state) + dspaddr_offset);
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+}
+
+static void
+bdw_primary_enable_flip_done(struct intel_plane *plane)
+{
+ struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ enum pipe pipe = plane->pipe;
+
+ spin_lock_irq(&i915->irq_lock);
+ bdw_enable_pipe_irq(i915, pipe, GEN8_PIPE_PRIMARY_FLIP_DONE);
+ spin_unlock_irq(&i915->irq_lock);
+}
+
+static void
+bdw_primary_disable_flip_done(struct intel_plane *plane)
+{
+ struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ enum pipe pipe = plane->pipe;
+
+ spin_lock_irq(&i915->irq_lock);
+ bdw_disable_pipe_irq(i915, pipe, GEN8_PIPE_PRIMARY_FLIP_DONE);
+ spin_unlock_irq(&i915->irq_lock);
+}
+
+static void
+ivb_primary_enable_flip_done(struct intel_plane *plane)
+{
+ struct drm_i915_private *i915 = to_i915(plane->base.dev);
+
+ spin_lock_irq(&i915->irq_lock);
+ ilk_enable_display_irq(i915, DE_PLANE_FLIP_DONE_IVB(plane->i9xx_plane));
+ spin_unlock_irq(&i915->irq_lock);
+}
+
+static void
+ivb_primary_disable_flip_done(struct intel_plane *plane)
+{
+ struct drm_i915_private *i915 = to_i915(plane->base.dev);
+
+ spin_lock_irq(&i915->irq_lock);
+ ilk_disable_display_irq(i915, DE_PLANE_FLIP_DONE_IVB(plane->i9xx_plane));
+ spin_unlock_irq(&i915->irq_lock);
+}
+
+static void
+ilk_primary_enable_flip_done(struct intel_plane *plane)
+{
+ struct drm_i915_private *i915 = to_i915(plane->base.dev);
+
+ spin_lock_irq(&i915->irq_lock);
+ ilk_enable_display_irq(i915, DE_PLANE_FLIP_DONE(plane->i9xx_plane));
+ spin_unlock_irq(&i915->irq_lock);
+}
+
+static void
+ilk_primary_disable_flip_done(struct intel_plane *plane)
+{
+ struct drm_i915_private *i915 = to_i915(plane->base.dev);
+
+ spin_lock_irq(&i915->irq_lock);
+ ilk_disable_display_irq(i915, DE_PLANE_FLIP_DONE(plane->i9xx_plane));
+ spin_unlock_irq(&i915->irq_lock);
+}
+
+static void
+vlv_primary_enable_flip_done(struct intel_plane *plane)
+{
+ struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ enum pipe pipe = plane->pipe;
+
+ spin_lock_irq(&i915->irq_lock);
+ i915_enable_pipestat(i915, pipe, PLANE_FLIP_DONE_INT_STATUS_VLV);
+ spin_unlock_irq(&i915->irq_lock);
+}
+
+static void
+vlv_primary_disable_flip_done(struct intel_plane *plane)
+{
+ struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ enum pipe pipe = plane->pipe;
+
+ spin_lock_irq(&i915->irq_lock);
+ i915_disable_pipestat(i915, pipe, PLANE_FLIP_DONE_INT_STATUS_VLV);
+ spin_unlock_irq(&i915->irq_lock);
+}
+
+static bool i9xx_plane_get_hw_state(struct intel_plane *plane,
+ enum pipe *pipe)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ enum intel_display_power_domain power_domain;
+ enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
+ intel_wakeref_t wakeref;
+ bool ret;
+ u32 val;
+
+ /*
+ * Not 100% correct for planes that can move between pipes,
+ * but that's only the case for gen2-4 which don't have any
+ * display power wells.
+ */
+ power_domain = POWER_DOMAIN_PIPE(plane->pipe);
+ wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
+ if (!wakeref)
+ return false;
+
+ val = intel_de_read(dev_priv, DSPCNTR(i9xx_plane));
+
+ ret = val & DISPLAY_PLANE_ENABLE;
+
+ if (INTEL_GEN(dev_priv) >= 5)
+ *pipe = plane->pipe;
+ else
+ *pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
+ DISPPLANE_SEL_PIPE_SHIFT;
+
+ intel_display_power_put(dev_priv, power_domain, wakeref);
+
+ return ret;
+}
+
+static unsigned int
+hsw_primary_max_stride(struct intel_plane *plane,
+ u32 pixel_format, u64 modifier,
+ unsigned int rotation)
+{
+ const struct drm_format_info *info = drm_format_info(pixel_format);
+ int cpp = info->cpp[0];
+
+ /* Limit to 8k pixels to guarantee OFFSET.x doesn't get too big. */
+ return min(8192 * cpp, 32 * 1024);
+}
+
+static unsigned int
+ilk_primary_max_stride(struct intel_plane *plane,
+ u32 pixel_format, u64 modifier,
+ unsigned int rotation)
+{
+ const struct drm_format_info *info = drm_format_info(pixel_format);
+ int cpp = info->cpp[0];
+
+ /* Limit to 4k pixels to guarantee TILEOFF.x doesn't get too big. */
+ if (modifier == I915_FORMAT_MOD_X_TILED)
+ return min(4096 * cpp, 32 * 1024);
+ else
+ return 32 * 1024;
+}
+
+unsigned int
+i965_plane_max_stride(struct intel_plane *plane,
+ u32 pixel_format, u64 modifier,
+ unsigned int rotation)
+{
+ const struct drm_format_info *info = drm_format_info(pixel_format);
+ int cpp = info->cpp[0];
+
+ /* Limit to 4k pixels to guarantee TILEOFF.x doesn't get too big. */
+ if (modifier == I915_FORMAT_MOD_X_TILED)
+ return min(4096 * cpp, 16 * 1024);
+ else
+ return 32 * 1024;
+}
+
+static unsigned int
+i9xx_plane_max_stride(struct intel_plane *plane,
+ u32 pixel_format, u64 modifier,
+ unsigned int rotation)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+
+ if (INTEL_GEN(dev_priv) >= 3) {
+ if (modifier == I915_FORMAT_MOD_X_TILED)
+ return 8*1024;
+ else
+ return 16*1024;
+ } else {
+ if (plane->i9xx_plane == PLANE_C)
+ return 4*1024;
+ else
+ return 8*1024;
+ }
+}
+
+static const struct drm_plane_funcs i965_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = intel_plane_destroy,
+ .atomic_duplicate_state = intel_plane_duplicate_state,
+ .atomic_destroy_state = intel_plane_destroy_state,
+ .format_mod_supported = i965_plane_format_mod_supported,
+};
+
+static const struct drm_plane_funcs i8xx_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = intel_plane_destroy,
+ .atomic_duplicate_state = intel_plane_duplicate_state,
+ .atomic_destroy_state = intel_plane_destroy_state,
+ .format_mod_supported = i8xx_plane_format_mod_supported,
+};
+
+struct intel_plane *
+intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
+{
+ struct intel_plane *plane;
+ const struct drm_plane_funcs *plane_funcs;
+ unsigned int supported_rotations;
+ const u32 *formats;
+ int num_formats;
+ int ret, zpos;
+
+ if (INTEL_GEN(dev_priv) >= 9)
+ return skl_universal_plane_create(dev_priv, pipe,
+ PLANE_PRIMARY);
+
+ plane = intel_plane_alloc();
+ if (IS_ERR(plane))
+ return plane;
+
+ plane->pipe = pipe;
+ /*
+ * On gen2/3 only plane A can do FBC, but the panel fitter and LVDS
+ * port is hooked to pipe B. Hence we want plane A feeding pipe B.
+ */
+ if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) < 4 &&
+ INTEL_NUM_PIPES(dev_priv) == 2)
+ plane->i9xx_plane = (enum i9xx_plane_id) !pipe;
+ else
+ plane->i9xx_plane = (enum i9xx_plane_id) pipe;
+ plane->id = PLANE_PRIMARY;
+ plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, plane->id);
+
+ plane->has_fbc = i9xx_plane_has_fbc(dev_priv, plane->i9xx_plane);
+ if (plane->has_fbc) {
+ struct intel_fbc *fbc = &dev_priv->fbc;
+
+ fbc->possible_framebuffer_bits |= plane->frontbuffer_bit;
+ }
+
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+ formats = vlv_primary_formats;
+ num_formats = ARRAY_SIZE(vlv_primary_formats);
+ } else if (INTEL_GEN(dev_priv) >= 4) {
+ /*
+ * WaFP16GammaEnabling:ivb
+ * "Workaround : When using the 64-bit format, the plane
+ * output on each color channel has one quarter amplitude.
+ * It can be brought up to full amplitude by using pipe
+ * gamma correction or pipe color space conversion to
+ * multiply the plane output by four."
+ *
+ * There is no dedicated plane gamma for the primary plane,
+ * and using the pipe gamma/csc could conflict with other
+ * planes, so we choose not to expose fp16 on IVB primary
+ * planes. HSW primary planes no longer have this problem.
+ */
+ if (IS_IVYBRIDGE(dev_priv)) {
+ formats = ivb_primary_formats;
+ num_formats = ARRAY_SIZE(ivb_primary_formats);
+ } else {
+ formats = i965_primary_formats;
+ num_formats = ARRAY_SIZE(i965_primary_formats);
+ }
+ } else {
+ formats = i8xx_primary_formats;
+ num_formats = ARRAY_SIZE(i8xx_primary_formats);
+ }
+
+ if (INTEL_GEN(dev_priv) >= 4)
+ plane_funcs = &i965_plane_funcs;
+ else
+ plane_funcs = &i8xx_plane_funcs;
+
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ plane->min_cdclk = vlv_plane_min_cdclk;
+ else if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
+ plane->min_cdclk = hsw_plane_min_cdclk;
+ else if (IS_IVYBRIDGE(dev_priv))
+ plane->min_cdclk = ivb_plane_min_cdclk;
+ else
+ plane->min_cdclk = i9xx_plane_min_cdclk;
+
+ if (HAS_GMCH(dev_priv)) {
+ if (INTEL_GEN(dev_priv) >= 4)
+ plane->max_stride = i965_plane_max_stride;
+ else
+ plane->max_stride = i9xx_plane_max_stride;
+ } else {
+ if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
+ plane->max_stride = hsw_primary_max_stride;
+ else
+ plane->max_stride = ilk_primary_max_stride;
+ }
+
+ plane->update_plane = i9xx_update_plane;
+ plane->disable_plane = i9xx_disable_plane;
+ plane->get_hw_state = i9xx_plane_get_hw_state;
+ plane->check_plane = i9xx_plane_check;
+
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+ plane->async_flip = vlv_primary_async_flip;
+ plane->enable_flip_done = vlv_primary_enable_flip_done;
+ plane->disable_flip_done = vlv_primary_disable_flip_done;
+ } else if (IS_BROADWELL(dev_priv)) {
+ plane->need_async_flip_disable_wa = true;
+ plane->async_flip = g4x_primary_async_flip;
+ plane->enable_flip_done = bdw_primary_enable_flip_done;
+ plane->disable_flip_done = bdw_primary_disable_flip_done;
+ } else if (INTEL_GEN(dev_priv) >= 7) {
+ plane->async_flip = g4x_primary_async_flip;
+ plane->enable_flip_done = ivb_primary_enable_flip_done;
+ plane->disable_flip_done = ivb_primary_disable_flip_done;
+ } else if (INTEL_GEN(dev_priv) >= 5) {
+ plane->async_flip = g4x_primary_async_flip;
+ plane->enable_flip_done = ilk_primary_enable_flip_done;
+ plane->disable_flip_done = ilk_primary_disable_flip_done;
+ }
+
+ if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
+ ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
+ 0, plane_funcs,
+ formats, num_formats,
+ i9xx_format_modifiers,
+ DRM_PLANE_TYPE_PRIMARY,
+ "primary %c", pipe_name(pipe));
+ else
+ ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
+ 0, plane_funcs,
+ formats, num_formats,
+ i9xx_format_modifiers,
+ DRM_PLANE_TYPE_PRIMARY,
+ "plane %c",
+ plane_name(plane->i9xx_plane));
+ if (ret)
+ goto fail;
+
+ if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
+ supported_rotations =
+ DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180 |
+ DRM_MODE_REFLECT_X;
+ } else if (INTEL_GEN(dev_priv) >= 4) {
+ supported_rotations =
+ DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180;
+ } else {
+ supported_rotations = DRM_MODE_ROTATE_0;
+ }
+
+ if (INTEL_GEN(dev_priv) >= 4)
+ drm_plane_create_rotation_property(&plane->base,
+ DRM_MODE_ROTATE_0,
+ supported_rotations);
+
+ zpos = 0;
+ drm_plane_create_zpos_immutable_property(&plane->base, zpos);
+
+ drm_plane_helper_add(&plane->base, &intel_plane_helper_funcs);
+
+ return plane;
+
+fail:
+ intel_plane_free(plane);
+
+ return ERR_PTR(ret);
+}
+
diff --git a/drivers/gpu/drm/i915/display/i9xx_plane.h b/drivers/gpu/drm/i915/display/i9xx_plane.h
new file mode 100644
index 000000000000..ca963c2a8457
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/i9xx_plane.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef _I9XX_PLANE_H_
+#define _I9XX_PLANE_H_
+
+#include <linux/types.h>
+
+enum pipe;
+struct drm_i915_private;
+struct intel_plane;
+struct intel_plane_state;
+
+unsigned int i965_plane_max_stride(struct intel_plane *plane,
+ u32 pixel_format, u64 modifier,
+ unsigned int rotation);
+int i9xx_check_plane_surface(struct intel_plane_state *plane_state);
+
+struct intel_plane *
+intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe);
+
+#endif
diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c
index a9439b415603..9d245a689323 100644
--- a/drivers/gpu/drm/i915/display/icl_dsi.c
+++ b/drivers/gpu/drm/i915/display/icl_dsi.c
@@ -1535,6 +1535,9 @@ static int gen11_dsi_dsc_compute_config(struct intel_encoder *encoder,
vdsc_cfg->convert_rgb = true;
+ /* FIXME: initialize from VBT */
+ vdsc_cfg->rc_model_size = DSC_RC_MODEL_SIZE_CONST;
+
ret = intel_dsc_compute_params(encoder, crtc_state);
if (ret)
return ret;
@@ -1616,10 +1619,6 @@ static void gen11_dsi_get_power_domains(struct intel_encoder *encoder,
get_dsi_io_power_domains(i915,
enc_to_intel_dsi(encoder));
-
- if (crtc_state->dsc.compression_enable)
- intel_display_power_get(i915,
- intel_dsc_power_domain(crtc_state));
}
static bool gen11_dsi_get_hw_state(struct intel_encoder *encoder,
diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.c b/drivers/gpu/drm/i915/display/intel_atomic_plane.c
index 7e9f84b00859..4683f98f7e54 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic_plane.c
+++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.c
@@ -312,10 +312,13 @@ int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_
int ret;
intel_plane_set_invisible(new_crtc_state, new_plane_state);
+ new_crtc_state->enabled_planes &= ~BIT(plane->id);
if (!new_plane_state->hw.crtc && !old_plane_state->hw.crtc)
return 0;
+ new_crtc_state->enabled_planes |= BIT(plane->id);
+
ret = plane->check_plane(new_crtc_state, new_plane_state);
if (ret)
return ret;
@@ -449,7 +452,7 @@ void intel_update_plane(struct intel_plane *plane,
trace_intel_update_plane(&plane->base, crtc);
if (crtc_state->uapi.async_flip && plane->async_flip)
- plane->async_flip(plane, crtc_state, plane_state);
+ plane->async_flip(plane, crtc_state, plane_state, true);
else
plane->update_plane(plane, crtc_state, plane_state);
}
diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c
index 4cc949b228f2..987cf509337f 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.c
+++ b/drivers/gpu/drm/i915/display/intel_bios.c
@@ -1623,6 +1623,13 @@ static const u8 icp_ddc_pin_map[] = {
[TGL_DDC_BUS_PORT_6] = GMBUS_PIN_14_TC6_TGP,
};
+static const u8 rkl_pch_tgp_ddc_pin_map[] = {
+ [ICL_DDC_BUS_DDI_A] = GMBUS_PIN_1_BXT,
+ [ICL_DDC_BUS_DDI_B] = GMBUS_PIN_2_BXT,
+ [RKL_DDC_BUS_DDI_D] = GMBUS_PIN_9_TC1_ICP,
+ [RKL_DDC_BUS_DDI_E] = GMBUS_PIN_10_TC2_ICP,
+};
+
static u8 map_ddc_pin(struct drm_i915_private *dev_priv, u8 vbt_pin)
{
const u8 *ddc_pin_map;
@@ -1630,6 +1637,9 @@ static u8 map_ddc_pin(struct drm_i915_private *dev_priv, u8 vbt_pin)
if (INTEL_PCH_TYPE(dev_priv) >= PCH_DG1) {
return vbt_pin;
+ } else if (IS_ROCKETLAKE(dev_priv) && INTEL_PCH_TYPE(dev_priv) == PCH_TGP) {
+ ddc_pin_map = rkl_pch_tgp_ddc_pin_map;
+ n_entries = ARRAY_SIZE(rkl_pch_tgp_ddc_pin_map);
} else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) {
ddc_pin_map = icp_ddc_pin_map;
n_entries = ARRAY_SIZE(icp_ddc_pin_map);
@@ -2555,16 +2565,11 @@ static void fill_dsc(struct intel_crtc_state *crtc_state,
crtc_state->dsc.slice_count);
/*
- * FIXME: Use VBT rc_buffer_block_size and rc_buffer_size for the
- * implementation specific physical rate buffer size. Currently we use
- * the required rate buffer model size calculated in
- * drm_dsc_compute_rc_parameters() according to VESA DSC Annex E.
- *
* The VBT rc_buffer_block_size and rc_buffer_size definitions
- * correspond to DP 1.4 DPCD offsets 0x62 and 0x63. The DP DSC
- * implementation should also use the DPCD (or perhaps VBT for eDP)
- * provided value for the buffer size.
+ * correspond to DP 1.4 DPCD offsets 0x62 and 0x63.
*/
+ vdsc_cfg->rc_model_size = drm_dsc_dp_rc_buffer_size(dsc->rc_buffer_block_size,
+ dsc->rc_buffer_size);
/* FIXME: DSI spec says bpc + 1 for this one */
vdsc_cfg->line_buf_depth = VBT_DSC_LINE_BUFFER_DEPTH(dsc->line_buffer_depth);
diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c
index bd060404d249..4b5a30ac84bc 100644
--- a/drivers/gpu/drm/i915/display/intel_bw.c
+++ b/drivers/gpu/drm/i915/display/intel_bw.c
@@ -20,76 +20,9 @@ struct intel_qgv_point {
struct intel_qgv_info {
struct intel_qgv_point points[I915_NUM_QGV_POINTS];
u8 num_points;
- u8 num_channels;
u8 t_bl;
- enum intel_dram_type dram_type;
};
-static int icl_pcode_read_mem_global_info(struct drm_i915_private *dev_priv,
- struct intel_qgv_info *qi)
-{
- u32 val = 0;
- int ret;
-
- ret = sandybridge_pcode_read(dev_priv,
- ICL_PCODE_MEM_SUBSYSYSTEM_INFO |
- ICL_PCODE_MEM_SS_READ_GLOBAL_INFO,
- &val, NULL);
- if (ret)
- return ret;
-
- if (IS_GEN(dev_priv, 12)) {
- switch (val & 0xf) {
- case 0:
- qi->dram_type = INTEL_DRAM_DDR4;
- break;
- case 3:
- qi->dram_type = INTEL_DRAM_LPDDR4;
- break;
- case 4:
- qi->dram_type = INTEL_DRAM_DDR3;
- break;
- case 5:
- qi->dram_type = INTEL_DRAM_LPDDR3;
- break;
- default:
- MISSING_CASE(val & 0xf);
- break;
- }
- } else if (IS_GEN(dev_priv, 11)) {
- switch (val & 0xf) {
- case 0:
- qi->dram_type = INTEL_DRAM_DDR4;
- break;
- case 1:
- qi->dram_type = INTEL_DRAM_DDR3;
- break;
- case 2:
- qi->dram_type = INTEL_DRAM_LPDDR3;
- break;
- case 3:
- qi->dram_type = INTEL_DRAM_LPDDR4;
- break;
- default:
- MISSING_CASE(val & 0xf);
- break;
- }
- } else {
- MISSING_CASE(INTEL_GEN(dev_priv));
- qi->dram_type = INTEL_DRAM_LPDDR3; /* Conservative default */
- }
-
- qi->num_channels = (val & 0xf0) >> 4;
- qi->num_points = (val & 0xf00) >> 8;
-
- if (IS_GEN(dev_priv, 12))
- qi->t_bl = qi->dram_type == INTEL_DRAM_DDR4 ? 4 : 16;
- else if (IS_GEN(dev_priv, 11))
- qi->t_bl = qi->dram_type == INTEL_DRAM_DDR4 ? 4 : 8;
-
- return 0;
-}
-
static int icl_pcode_read_qgv_point_info(struct drm_i915_private *dev_priv,
struct intel_qgv_point *sp,
int point)
@@ -139,11 +72,15 @@ int icl_pcode_restrict_qgv_points(struct drm_i915_private *dev_priv,
static int icl_get_qgv_points(struct drm_i915_private *dev_priv,
struct intel_qgv_info *qi)
{
+ const struct dram_info *dram_info = &dev_priv->dram_info;
int i, ret;
- ret = icl_pcode_read_mem_global_info(dev_priv, qi);
- if (ret)
- return ret;
+ qi->num_points = dram_info->num_qgv_points;
+
+ if (IS_GEN(dev_priv, 12))
+ qi->t_bl = dev_priv->dram_info.type == INTEL_DRAM_DDR4 ? 4 : 16;
+ else if (IS_GEN(dev_priv, 11))
+ qi->t_bl = dev_priv->dram_info.type == INTEL_DRAM_DDR4 ? 4 : 8;
if (drm_WARN_ON(&dev_priv->drm,
qi->num_points > ARRAY_SIZE(qi->points)))
@@ -209,7 +146,7 @@ static int icl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel
{
struct intel_qgv_info qi = {};
bool is_y_tile = true; /* assume y tile may be used */
- int num_channels;
+ int num_channels = dev_priv->dram_info.num_channels;
int deinterleave;
int ipqdepth, ipqdepthpch;
int dclk_max;
@@ -222,7 +159,6 @@ static int icl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel
"Failed to get memory subsystem information, ignoring bandwidth limits");
return ret;
}
- num_channels = qi.num_channels;
deinterleave = DIV_ROUND_UP(num_channels, is_y_tile ? 4 : 2);
dclk_max = icl_sagv_max_dclk(&qi);
diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c
index c449d28d0560..2e878cc274b7 100644
--- a/drivers/gpu/drm/i915/display/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/display/intel_cdclk.c
@@ -2415,8 +2415,7 @@ static int intel_modeset_all_pipes(struct intel_atomic_state *state)
if (ret)
return ret;
- ret = drm_atomic_add_affected_planes(&state->base,
- &crtc->base);
+ ret = intel_atomic_add_affected_planes(state, crtc);
if (ret)
return ret;
@@ -2710,8 +2709,8 @@ static int dg1_rawclk(struct drm_i915_private *dev_priv)
* DG1 always uses a 38.4 MHz rawclk. The bspec tells us
* "Program Numerator=2, Denominator=4, Divider=37 decimal."
*/
- I915_WRITE(PCH_RAWCLK_FREQ,
- CNP_RAWCLK_DEN(4) | CNP_RAWCLK_DIV(37) | ICP_RAWCLK_NUM(2));
+ intel_de_write(dev_priv, PCH_RAWCLK_FREQ,
+ CNP_RAWCLK_DEN(4) | CNP_RAWCLK_DIV(37) | ICP_RAWCLK_NUM(2));
return 38400;
}
diff --git a/drivers/gpu/drm/i915/display/intel_color.c b/drivers/gpu/drm/i915/display/intel_color.c
index 172d398081ee..ff7dcb7088bf 100644
--- a/drivers/gpu/drm/i915/display/intel_color.c
+++ b/drivers/gpu/drm/i915/display/intel_color.c
@@ -1485,6 +1485,7 @@ static u32 ivb_csc_mode(const struct intel_crtc_state *crtc_state)
static int ivb_color_check(struct intel_crtc_state *crtc_state)
{
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
bool limited_color_range = ilk_csc_limited_range(crtc_state);
int ret;
@@ -1492,6 +1493,13 @@ static int ivb_color_check(struct intel_crtc_state *crtc_state)
if (ret)
return ret;
+ if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB &&
+ crtc_state->hw.ctm) {
+ drm_dbg_kms(&dev_priv->drm,
+ "YCBCR and CTM together are not possible\n");
+ return -EINVAL;
+ }
+
crtc_state->gamma_enable =
(crtc_state->hw.gamma_lut ||
crtc_state->hw.degamma_lut) &&
@@ -1525,12 +1533,20 @@ static u32 glk_gamma_mode(const struct intel_crtc_state *crtc_state)
static int glk_color_check(struct intel_crtc_state *crtc_state)
{
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
int ret;
ret = check_luts(crtc_state);
if (ret)
return ret;
+ if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB &&
+ crtc_state->hw.ctm) {
+ drm_dbg_kms(&dev_priv->drm,
+ "YCBCR and CTM together are not possible\n");
+ return -EINVAL;
+ }
+
crtc_state->gamma_enable =
crtc_state->hw.gamma_lut &&
!crtc_state->c8_planes;
diff --git a/drivers/gpu/drm/i915/display/intel_combo_phy.c b/drivers/gpu/drm/i915/display/intel_combo_phy.c
index d5ad61e4083e..996ae0608a62 100644
--- a/drivers/gpu/drm/i915/display/intel_combo_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_combo_phy.c
@@ -427,10 +427,22 @@ static void icl_combo_phys_uninit(struct drm_i915_private *dev_priv)
u32 val;
if (phy == PHY_A &&
- !icl_combo_phy_verify_state(dev_priv, phy))
- drm_warn(&dev_priv->drm,
- "Combo PHY %c HW state changed unexpectedly\n",
- phy_name(phy));
+ !icl_combo_phy_verify_state(dev_priv, phy)) {
+ if (IS_TIGERLAKE(dev_priv) || IS_DG1(dev_priv)) {
+ /*
+ * A known problem with old ifwi:
+ * https://gitlab.freedesktop.org/drm/intel/-/issues/2411
+ * Suppress the warning for CI. Remove ASAP!
+ */
+ drm_dbg_kms(&dev_priv->drm,
+ "Combo PHY %c HW state changed unexpectedly\n",
+ phy_name(phy));
+ } else {
+ drm_warn(&dev_priv->drm,
+ "Combo PHY %c HW state changed unexpectedly\n",
+ phy_name(phy));
+ }
+ }
if (!has_phy_misc(dev_priv, phy))
goto skip_phy_misc;
diff --git a/drivers/gpu/drm/i915/display/intel_connector.c b/drivers/gpu/drm/i915/display/intel_connector.c
index 406e96785c76..d5ceb7bdc14b 100644
--- a/drivers/gpu/drm/i915/display/intel_connector.c
+++ b/drivers/gpu/drm/i915/display/intel_connector.c
@@ -279,24 +279,17 @@ intel_attach_aspect_ratio_property(struct drm_connector *connector)
}
void
-intel_attach_colorspace_property(struct drm_connector *connector)
+intel_attach_hdmi_colorspace_property(struct drm_connector *connector)
{
- switch (connector->connector_type) {
- case DRM_MODE_CONNECTOR_HDMIA:
- case DRM_MODE_CONNECTOR_HDMIB:
- if (drm_mode_create_hdmi_colorspace_property(connector))
- return;
- break;
- case DRM_MODE_CONNECTOR_DisplayPort:
- case DRM_MODE_CONNECTOR_eDP:
- if (drm_mode_create_dp_colorspace_property(connector))
- return;
- break;
- default:
- MISSING_CASE(connector->connector_type);
- return;
- }
+ if (!drm_mode_create_hdmi_colorspace_property(connector))
+ drm_object_attach_property(&connector->base,
+ connector->colorspace_property, 0);
+}
- drm_object_attach_property(&connector->base,
- connector->colorspace_property, 0);
+void
+intel_attach_dp_colorspace_property(struct drm_connector *connector)
+{
+ if (!drm_mode_create_dp_colorspace_property(connector))
+ drm_object_attach_property(&connector->base,
+ connector->colorspace_property, 0);
}
diff --git a/drivers/gpu/drm/i915/display/intel_connector.h b/drivers/gpu/drm/i915/display/intel_connector.h
index 93a7375c8196..661a37a3c6d8 100644
--- a/drivers/gpu/drm/i915/display/intel_connector.h
+++ b/drivers/gpu/drm/i915/display/intel_connector.h
@@ -30,6 +30,7 @@ int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter);
void intel_attach_force_audio_property(struct drm_connector *connector);
void intel_attach_broadcast_rgb_property(struct drm_connector *connector);
void intel_attach_aspect_ratio_property(struct drm_connector *connector);
-void intel_attach_colorspace_property(struct drm_connector *connector);
+void intel_attach_hdmi_colorspace_property(struct drm_connector *connector);
+void intel_attach_dp_colorspace_property(struct drm_connector *connector);
#endif /* __INTEL_CONNECTOR_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_crtc.c b/drivers/gpu/drm/i915/display/intel_crtc.c
new file mode 100644
index 000000000000..57b0a3ebe908
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_crtc.c
@@ -0,0 +1,325 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+#include <linux/kernel.h>
+#include <linux/slab.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_plane.h>
+#include <drm/drm_plane_helper.h>
+
+#include "intel_atomic.h"
+#include "intel_atomic_plane.h"
+#include "intel_color.h"
+#include "intel_crtc.h"
+#include "intel_cursor.h"
+#include "intel_display_debugfs.h"
+#include "intel_display_types.h"
+#include "intel_pipe_crc.h"
+#include "intel_sprite.h"
+#include "i9xx_plane.h"
+
+static void assert_vblank_disabled(struct drm_crtc *crtc)
+{
+ if (I915_STATE_WARN_ON(drm_crtc_vblank_get(crtc) == 0))
+ drm_crtc_vblank_put(crtc);
+}
+
+u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_vblank_crtc *vblank = &dev->vblank[drm_crtc_index(&crtc->base)];
+
+ if (!vblank->max_vblank_count)
+ return (u32)drm_crtc_accurate_vblank_count(&crtc->base);
+
+ return crtc->base.funcs->get_vblank_counter(&crtc->base);
+}
+
+u32 intel_crtc_max_vblank_count(const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ u32 mode_flags = crtc->mode_flags;
+
+ /*
+ * From Gen 11, In case of dsi cmd mode, frame counter wouldnt
+ * have updated at the beginning of TE, if we want to use
+ * the hw counter, then we would find it updated in only
+ * the next TE, hence switching to sw counter.
+ */
+ if (mode_flags & (I915_MODE_FLAG_DSI_USE_TE0 | I915_MODE_FLAG_DSI_USE_TE1))
+ return 0;
+
+ /*
+ * On i965gm the hardware frame counter reads
+ * zero when the TV encoder is enabled :(
+ */
+ if (IS_I965GM(dev_priv) &&
+ (crtc_state->output_types & BIT(INTEL_OUTPUT_TVOUT)))
+ return 0;
+
+ if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
+ return 0xffffffff; /* full 32 bit counter */
+ else if (INTEL_GEN(dev_priv) >= 3)
+ return 0xffffff; /* only 24 bits of frame count */
+ else
+ return 0; /* Gen2 doesn't have a hardware frame counter */
+}
+
+void intel_crtc_vblank_on(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+
+ assert_vblank_disabled(&crtc->base);
+ drm_crtc_set_max_vblank_count(&crtc->base,
+ intel_crtc_max_vblank_count(crtc_state));
+ drm_crtc_vblank_on(&crtc->base);
+}
+
+void intel_crtc_vblank_off(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+
+ drm_crtc_vblank_off(&crtc->base);
+ assert_vblank_disabled(&crtc->base);
+}
+
+struct intel_crtc_state *intel_crtc_state_alloc(struct intel_crtc *crtc)
+{
+ struct intel_crtc_state *crtc_state;
+
+ crtc_state = kmalloc(sizeof(*crtc_state), GFP_KERNEL);
+
+ if (crtc_state)
+ intel_crtc_state_reset(crtc_state, crtc);
+
+ return crtc_state;
+}
+
+void intel_crtc_state_reset(struct intel_crtc_state *crtc_state,
+ struct intel_crtc *crtc)
+{
+ memset(crtc_state, 0, sizeof(*crtc_state));
+
+ __drm_atomic_helper_crtc_state_reset(&crtc_state->uapi, &crtc->base);
+
+ crtc_state->cpu_transcoder = INVALID_TRANSCODER;
+ crtc_state->master_transcoder = INVALID_TRANSCODER;
+ crtc_state->hsw_workaround_pipe = INVALID_PIPE;
+ crtc_state->output_format = INTEL_OUTPUT_FORMAT_INVALID;
+ crtc_state->scaler_state.scaler_id = -1;
+ crtc_state->mst_master_transcoder = INVALID_TRANSCODER;
+}
+
+static struct intel_crtc *intel_crtc_alloc(void)
+{
+ struct intel_crtc_state *crtc_state;
+ struct intel_crtc *crtc;
+
+ crtc = kzalloc(sizeof(*crtc), GFP_KERNEL);
+ if (!crtc)
+ return ERR_PTR(-ENOMEM);
+
+ crtc_state = intel_crtc_state_alloc(crtc);
+ if (!crtc_state) {
+ kfree(crtc);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ crtc->base.state = &crtc_state->uapi;
+ crtc->config = crtc_state;
+
+ return crtc;
+}
+
+static void intel_crtc_free(struct intel_crtc *crtc)
+{
+ intel_crtc_destroy_state(&crtc->base, crtc->base.state);
+ kfree(crtc);
+}
+
+static void intel_crtc_destroy(struct drm_crtc *crtc)
+{
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+ drm_crtc_cleanup(crtc);
+ kfree(intel_crtc);
+}
+
+static int intel_crtc_late_register(struct drm_crtc *crtc)
+{
+ intel_crtc_debugfs_add(crtc);
+ return 0;
+}
+
+#define INTEL_CRTC_FUNCS \
+ .set_config = drm_atomic_helper_set_config, \
+ .destroy = intel_crtc_destroy, \
+ .page_flip = drm_atomic_helper_page_flip, \
+ .atomic_duplicate_state = intel_crtc_duplicate_state, \
+ .atomic_destroy_state = intel_crtc_destroy_state, \
+ .set_crc_source = intel_crtc_set_crc_source, \
+ .verify_crc_source = intel_crtc_verify_crc_source, \
+ .get_crc_sources = intel_crtc_get_crc_sources, \
+ .late_register = intel_crtc_late_register
+
+static const struct drm_crtc_funcs bdw_crtc_funcs = {
+ INTEL_CRTC_FUNCS,
+
+ .get_vblank_counter = g4x_get_vblank_counter,
+ .enable_vblank = bdw_enable_vblank,
+ .disable_vblank = bdw_disable_vblank,
+ .get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
+};
+
+static const struct drm_crtc_funcs ilk_crtc_funcs = {
+ INTEL_CRTC_FUNCS,
+
+ .get_vblank_counter = g4x_get_vblank_counter,
+ .enable_vblank = ilk_enable_vblank,
+ .disable_vblank = ilk_disable_vblank,
+ .get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
+};
+
+static const struct drm_crtc_funcs g4x_crtc_funcs = {
+ INTEL_CRTC_FUNCS,
+
+ .get_vblank_counter = g4x_get_vblank_counter,
+ .enable_vblank = i965_enable_vblank,
+ .disable_vblank = i965_disable_vblank,
+ .get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
+};
+
+static const struct drm_crtc_funcs i965_crtc_funcs = {
+ INTEL_CRTC_FUNCS,
+
+ .get_vblank_counter = i915_get_vblank_counter,
+ .enable_vblank = i965_enable_vblank,
+ .disable_vblank = i965_disable_vblank,
+ .get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
+};
+
+static const struct drm_crtc_funcs i915gm_crtc_funcs = {
+ INTEL_CRTC_FUNCS,
+
+ .get_vblank_counter = i915_get_vblank_counter,
+ .enable_vblank = i915gm_enable_vblank,
+ .disable_vblank = i915gm_disable_vblank,
+ .get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
+};
+
+static const struct drm_crtc_funcs i915_crtc_funcs = {
+ INTEL_CRTC_FUNCS,
+
+ .get_vblank_counter = i915_get_vblank_counter,
+ .enable_vblank = i8xx_enable_vblank,
+ .disable_vblank = i8xx_disable_vblank,
+ .get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
+};
+
+static const struct drm_crtc_funcs i8xx_crtc_funcs = {
+ INTEL_CRTC_FUNCS,
+
+ /* no hw vblank counter */
+ .enable_vblank = i8xx_enable_vblank,
+ .disable_vblank = i8xx_disable_vblank,
+ .get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
+};
+
+int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
+{
+ struct intel_plane *primary, *cursor;
+ const struct drm_crtc_funcs *funcs;
+ struct intel_crtc *crtc;
+ int sprite, ret;
+
+ crtc = intel_crtc_alloc();
+ if (IS_ERR(crtc))
+ return PTR_ERR(crtc);
+
+ crtc->pipe = pipe;
+ crtc->num_scalers = RUNTIME_INFO(dev_priv)->num_scalers[pipe];
+
+ primary = intel_primary_plane_create(dev_priv, pipe);
+ if (IS_ERR(primary)) {
+ ret = PTR_ERR(primary);
+ goto fail;
+ }
+ crtc->plane_ids_mask |= BIT(primary->id);
+
+ for_each_sprite(dev_priv, pipe, sprite) {
+ struct intel_plane *plane;
+
+ plane = intel_sprite_plane_create(dev_priv, pipe, sprite);
+ if (IS_ERR(plane)) {
+ ret = PTR_ERR(plane);
+ goto fail;
+ }
+ crtc->plane_ids_mask |= BIT(plane->id);
+ }
+
+ cursor = intel_cursor_plane_create(dev_priv, pipe);
+ if (IS_ERR(cursor)) {
+ ret = PTR_ERR(cursor);
+ goto fail;
+ }
+ crtc->plane_ids_mask |= BIT(cursor->id);
+
+ if (HAS_GMCH(dev_priv)) {
+ if (IS_CHERRYVIEW(dev_priv) ||
+ IS_VALLEYVIEW(dev_priv) || IS_G4X(dev_priv))
+ funcs = &g4x_crtc_funcs;
+ else if (IS_GEN(dev_priv, 4))
+ funcs = &i965_crtc_funcs;
+ else if (IS_I945GM(dev_priv) || IS_I915GM(dev_priv))
+ funcs = &i915gm_crtc_funcs;
+ else if (IS_GEN(dev_priv, 3))
+ funcs = &i915_crtc_funcs;
+ else
+ funcs = &i8xx_crtc_funcs;
+ } else {
+ if (INTEL_GEN(dev_priv) >= 8)
+ funcs = &bdw_crtc_funcs;
+ else
+ funcs = &ilk_crtc_funcs;
+ }
+
+ ret = drm_crtc_init_with_planes(&dev_priv->drm, &crtc->base,
+ &primary->base, &cursor->base,
+ funcs, "pipe %c", pipe_name(pipe));
+ if (ret)
+ goto fail;
+
+ BUG_ON(pipe >= ARRAY_SIZE(dev_priv->pipe_to_crtc_mapping) ||
+ dev_priv->pipe_to_crtc_mapping[pipe] != NULL);
+ dev_priv->pipe_to_crtc_mapping[pipe] = crtc;
+
+ if (INTEL_GEN(dev_priv) < 9) {
+ enum i9xx_plane_id i9xx_plane = primary->i9xx_plane;
+
+ BUG_ON(i9xx_plane >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
+ dev_priv->plane_to_crtc_mapping[i9xx_plane] != NULL);
+ dev_priv->plane_to_crtc_mapping[i9xx_plane] = crtc;
+ }
+
+ if (INTEL_GEN(dev_priv) >= 10)
+ drm_crtc_create_scaling_filter_property(&crtc->base,
+ BIT(DRM_SCALING_FILTER_DEFAULT) |
+ BIT(DRM_SCALING_FILTER_NEAREST_NEIGHBOR));
+
+ intel_color_init(crtc);
+
+ intel_crtc_crc_init(crtc);
+
+ drm_WARN_ON(&dev_priv->drm, drm_crtc_index(&crtc->base) != crtc->pipe);
+
+ return 0;
+
+fail:
+ intel_crtc_free(crtc);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_crtc.h b/drivers/gpu/drm/i915/display/intel_crtc.h
new file mode 100644
index 000000000000..08112d557411
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_crtc.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef _INTEL_CRTC_H_
+#define _INTEL_CRTC_H_
+
+#include <linux/types.h>
+
+enum pipe;
+struct drm_i915_private;
+struct intel_crtc;
+struct intel_crtc_state;
+
+u32 intel_crtc_max_vblank_count(const struct intel_crtc_state *crtc_state);
+int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe);
+struct intel_crtc_state *intel_crtc_state_alloc(struct intel_crtc *crtc);
+void intel_crtc_state_reset(struct intel_crtc_state *crtc_state,
+ struct intel_crtc *crtc);
+
+#endif
diff --git a/drivers/gpu/drm/i915/display/intel_cursor.c b/drivers/gpu/drm/i915/display/intel_cursor.c
new file mode 100644
index 000000000000..21fe4d2753e9
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_cursor.c
@@ -0,0 +1,806 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+#include <linux/kernel.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_atomic_uapi.h>
+#include <drm/drm_damage_helper.h>
+#include <drm/drm_plane_helper.h>
+#include <drm/drm_fourcc.h>
+
+#include "intel_atomic.h"
+#include "intel_atomic_plane.h"
+#include "intel_cursor.h"
+#include "intel_display_types.h"
+#include "intel_display.h"
+
+#include "intel_frontbuffer.h"
+#include "intel_pm.h"
+#include "intel_psr.h"
+#include "intel_sprite.h"
+
+/* Cursor formats */
+static const u32 intel_cursor_formats[] = {
+ DRM_FORMAT_ARGB8888,
+};
+
+static const u64 cursor_format_modifiers[] = {
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_INVALID
+};
+
+static u32 intel_cursor_base(const struct intel_plane_state *plane_state)
+{
+ struct drm_i915_private *dev_priv =
+ to_i915(plane_state->uapi.plane->dev);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ const struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+ u32 base;
+
+ if (INTEL_INFO(dev_priv)->display.cursor_needs_physical)
+ base = sg_dma_address(obj->mm.pages->sgl);
+ else
+ base = intel_plane_ggtt_offset(plane_state);
+
+ return base + plane_state->color_plane[0].offset;
+}
+
+static u32 intel_cursor_position(const struct intel_plane_state *plane_state)
+{
+ int x = plane_state->uapi.dst.x1;
+ int y = plane_state->uapi.dst.y1;
+ u32 pos = 0;
+
+ if (x < 0) {
+ pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
+ x = -x;
+ }
+ pos |= x << CURSOR_X_SHIFT;
+
+ if (y < 0) {
+ pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
+ y = -y;
+ }
+ pos |= y << CURSOR_Y_SHIFT;
+
+ return pos;
+}
+
+static bool intel_cursor_size_ok(const struct intel_plane_state *plane_state)
+{
+ const struct drm_mode_config *config =
+ &plane_state->uapi.plane->dev->mode_config;
+ int width = drm_rect_width(&plane_state->uapi.dst);
+ int height = drm_rect_height(&plane_state->uapi.dst);
+
+ return width > 0 && width <= config->cursor_width &&
+ height > 0 && height <= config->cursor_height;
+}
+
+static int intel_cursor_check_surface(struct intel_plane_state *plane_state)
+{
+ struct drm_i915_private *dev_priv =
+ to_i915(plane_state->uapi.plane->dev);
+ unsigned int rotation = plane_state->hw.rotation;
+ int src_x, src_y;
+ u32 offset;
+ int ret;
+
+ ret = intel_plane_compute_gtt(plane_state);
+ if (ret)
+ return ret;
+
+ if (!plane_state->uapi.visible)
+ return 0;
+
+ src_x = plane_state->uapi.src.x1 >> 16;
+ src_y = plane_state->uapi.src.y1 >> 16;
+
+ intel_add_fb_offsets(&src_x, &src_y, plane_state, 0);
+ offset = intel_plane_compute_aligned_offset(&src_x, &src_y,
+ plane_state, 0);
+
+ if (src_x != 0 || src_y != 0) {
+ drm_dbg_kms(&dev_priv->drm,
+ "Arbitrary cursor panning not supported\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Put the final coordinates back so that the src
+ * coordinate checks will see the right values.
+ */
+ drm_rect_translate_to(&plane_state->uapi.src,
+ src_x << 16, src_y << 16);
+
+ /* ILK+ do this automagically in hardware */
+ if (HAS_GMCH(dev_priv) && rotation & DRM_MODE_ROTATE_180) {
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ int src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
+ int src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
+
+ offset += (src_h * src_w - 1) * fb->format->cpp[0];
+ }
+
+ plane_state->color_plane[0].offset = offset;
+ plane_state->color_plane[0].x = src_x;
+ plane_state->color_plane[0].y = src_y;
+
+ return 0;
+}
+
+static int intel_check_cursor(struct intel_crtc_state *crtc_state,
+ struct intel_plane_state *plane_state)
+{
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
+ const struct drm_rect src = plane_state->uapi.src;
+ const struct drm_rect dst = plane_state->uapi.dst;
+ int ret;
+
+ if (fb && fb->modifier != DRM_FORMAT_MOD_LINEAR) {
+ drm_dbg_kms(&i915->drm, "cursor cannot be tiled\n");
+ return -EINVAL;
+ }
+
+ ret = intel_atomic_plane_check_clipping(plane_state, crtc_state,
+ DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_HELPER_NO_SCALING,
+ true);
+ if (ret)
+ return ret;
+
+ /* Use the unclipped src/dst rectangles, which we program to hw */
+ plane_state->uapi.src = src;
+ plane_state->uapi.dst = dst;
+
+ ret = intel_cursor_check_surface(plane_state);
+ if (ret)
+ return ret;
+
+ if (!plane_state->uapi.visible)
+ return 0;
+
+ ret = intel_plane_check_src_coordinates(plane_state);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static unsigned int
+i845_cursor_max_stride(struct intel_plane *plane,
+ u32 pixel_format, u64 modifier,
+ unsigned int rotation)
+{
+ return 2048;
+}
+
+static u32 i845_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state)
+{
+ u32 cntl = 0;
+
+ if (crtc_state->gamma_enable)
+ cntl |= CURSOR_GAMMA_ENABLE;
+
+ return cntl;
+}
+
+static u32 i845_cursor_ctl(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ return CURSOR_ENABLE |
+ CURSOR_FORMAT_ARGB |
+ CURSOR_STRIDE(plane_state->color_plane[0].stride);
+}
+
+static bool i845_cursor_size_ok(const struct intel_plane_state *plane_state)
+{
+ int width = drm_rect_width(&plane_state->uapi.dst);
+
+ /*
+ * 845g/865g are only limited by the width of their cursors,
+ * the height is arbitrary up to the precision of the register.
+ */
+ return intel_cursor_size_ok(plane_state) && IS_ALIGNED(width, 64);
+}
+
+static int i845_check_cursor(struct intel_crtc_state *crtc_state,
+ struct intel_plane_state *plane_state)
+{
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
+ int ret;
+
+ ret = intel_check_cursor(crtc_state, plane_state);
+ if (ret)
+ return ret;
+
+ /* if we want to turn off the cursor ignore width and height */
+ if (!fb)
+ return 0;
+
+ /* Check for which cursor types we support */
+ if (!i845_cursor_size_ok(plane_state)) {
+ drm_dbg_kms(&i915->drm,
+ "Cursor dimension %dx%d not supported\n",
+ drm_rect_width(&plane_state->uapi.dst),
+ drm_rect_height(&plane_state->uapi.dst));
+ return -EINVAL;
+ }
+
+ drm_WARN_ON(&i915->drm, plane_state->uapi.visible &&
+ plane_state->color_plane[0].stride != fb->pitches[0]);
+
+ switch (fb->pitches[0]) {
+ case 256:
+ case 512:
+ case 1024:
+ case 2048:
+ break;
+ default:
+ drm_dbg_kms(&i915->drm, "Invalid cursor stride (%u)\n",
+ fb->pitches[0]);
+ return -EINVAL;
+ }
+
+ plane_state->ctl = i845_cursor_ctl(crtc_state, plane_state);
+
+ return 0;
+}
+
+static void i845_update_cursor(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ u32 cntl = 0, base = 0, pos = 0, size = 0;
+ unsigned long irqflags;
+
+ if (plane_state && plane_state->uapi.visible) {
+ unsigned int width = drm_rect_width(&plane_state->uapi.dst);
+ unsigned int height = drm_rect_height(&plane_state->uapi.dst);
+
+ cntl = plane_state->ctl |
+ i845_cursor_ctl_crtc(crtc_state);
+
+ size = (height << 12) | width;
+
+ base = intel_cursor_base(plane_state);
+ pos = intel_cursor_position(plane_state);
+ }
+
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+
+ /* On these chipsets we can only modify the base/size/stride
+ * whilst the cursor is disabled.
+ */
+ if (plane->cursor.base != base ||
+ plane->cursor.size != size ||
+ plane->cursor.cntl != cntl) {
+ intel_de_write_fw(dev_priv, CURCNTR(PIPE_A), 0);
+ intel_de_write_fw(dev_priv, CURBASE(PIPE_A), base);
+ intel_de_write_fw(dev_priv, CURSIZE, size);
+ intel_de_write_fw(dev_priv, CURPOS(PIPE_A), pos);
+ intel_de_write_fw(dev_priv, CURCNTR(PIPE_A), cntl);
+
+ plane->cursor.base = base;
+ plane->cursor.size = size;
+ plane->cursor.cntl = cntl;
+ } else {
+ intel_de_write_fw(dev_priv, CURPOS(PIPE_A), pos);
+ }
+
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+}
+
+static void i845_disable_cursor(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state)
+{
+ i845_update_cursor(plane, crtc_state, NULL);
+}
+
+static bool i845_cursor_get_hw_state(struct intel_plane *plane,
+ enum pipe *pipe)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ enum intel_display_power_domain power_domain;
+ intel_wakeref_t wakeref;
+ bool ret;
+
+ power_domain = POWER_DOMAIN_PIPE(PIPE_A);
+ wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
+ if (!wakeref)
+ return false;
+
+ ret = intel_de_read(dev_priv, CURCNTR(PIPE_A)) & CURSOR_ENABLE;
+
+ *pipe = PIPE_A;
+
+ intel_display_power_put(dev_priv, power_domain, wakeref);
+
+ return ret;
+}
+
+static unsigned int
+i9xx_cursor_max_stride(struct intel_plane *plane,
+ u32 pixel_format, u64 modifier,
+ unsigned int rotation)
+{
+ return plane->base.dev->mode_config.cursor_width * 4;
+}
+
+static u32 i9xx_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ u32 cntl = 0;
+
+ if (INTEL_GEN(dev_priv) >= 11)
+ return cntl;
+
+ if (crtc_state->gamma_enable)
+ cntl = MCURSOR_GAMMA_ENABLE;
+
+ if (crtc_state->csc_enable)
+ cntl |= MCURSOR_PIPE_CSC_ENABLE;
+
+ if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
+ cntl |= MCURSOR_PIPE_SELECT(crtc->pipe);
+
+ return cntl;
+}
+
+static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ struct drm_i915_private *dev_priv =
+ to_i915(plane_state->uapi.plane->dev);
+ u32 cntl = 0;
+
+ if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
+ cntl |= MCURSOR_TRICKLE_FEED_DISABLE;
+
+ switch (drm_rect_width(&plane_state->uapi.dst)) {
+ case 64:
+ cntl |= MCURSOR_MODE_64_ARGB_AX;
+ break;
+ case 128:
+ cntl |= MCURSOR_MODE_128_ARGB_AX;
+ break;
+ case 256:
+ cntl |= MCURSOR_MODE_256_ARGB_AX;
+ break;
+ default:
+ MISSING_CASE(drm_rect_width(&plane_state->uapi.dst));
+ return 0;
+ }
+
+ if (plane_state->hw.rotation & DRM_MODE_ROTATE_180)
+ cntl |= MCURSOR_ROTATE_180;
+
+ return cntl;
+}
+
+static bool i9xx_cursor_size_ok(const struct intel_plane_state *plane_state)
+{
+ struct drm_i915_private *dev_priv =
+ to_i915(plane_state->uapi.plane->dev);
+ int width = drm_rect_width(&plane_state->uapi.dst);
+ int height = drm_rect_height(&plane_state->uapi.dst);
+
+ if (!intel_cursor_size_ok(plane_state))
+ return false;
+
+ /* Cursor width is limited to a few power-of-two sizes */
+ switch (width) {
+ case 256:
+ case 128:
+ case 64:
+ break;
+ default:
+ return false;
+ }
+
+ /*
+ * IVB+ have CUR_FBC_CTL which allows an arbitrary cursor
+ * height from 8 lines up to the cursor width, when the
+ * cursor is not rotated. Everything else requires square
+ * cursors.
+ */
+ if (HAS_CUR_FBC(dev_priv) &&
+ plane_state->hw.rotation & DRM_MODE_ROTATE_0) {
+ if (height < 8 || height > width)
+ return false;
+ } else {
+ if (height != width)
+ return false;
+ }
+
+ return true;
+}
+
+static int i9xx_check_cursor(struct intel_crtc_state *crtc_state,
+ struct intel_plane_state *plane_state)
+{
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ enum pipe pipe = plane->pipe;
+ int ret;
+
+ ret = intel_check_cursor(crtc_state, plane_state);
+ if (ret)
+ return ret;
+
+ /* if we want to turn off the cursor ignore width and height */
+ if (!fb)
+ return 0;
+
+ /* Check for which cursor types we support */
+ if (!i9xx_cursor_size_ok(plane_state)) {
+ drm_dbg(&dev_priv->drm,
+ "Cursor dimension %dx%d not supported\n",
+ drm_rect_width(&plane_state->uapi.dst),
+ drm_rect_height(&plane_state->uapi.dst));
+ return -EINVAL;
+ }
+
+ drm_WARN_ON(&dev_priv->drm, plane_state->uapi.visible &&
+ plane_state->color_plane[0].stride != fb->pitches[0]);
+
+ if (fb->pitches[0] !=
+ drm_rect_width(&plane_state->uapi.dst) * fb->format->cpp[0]) {
+ drm_dbg_kms(&dev_priv->drm,
+ "Invalid cursor stride (%u) (cursor width %d)\n",
+ fb->pitches[0],
+ drm_rect_width(&plane_state->uapi.dst));
+ return -EINVAL;
+ }
+
+ /*
+ * There's something wrong with the cursor on CHV pipe C.
+ * If it straddles the left edge of the screen then
+ * moving it away from the edge or disabling it often
+ * results in a pipe underrun, and often that can lead to
+ * dead pipe (constant underrun reported, and it scans
+ * out just a solid color). To recover from that, the
+ * display power well must be turned off and on again.
+ * Refuse the put the cursor into that compromised position.
+ */
+ if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_C &&
+ plane_state->uapi.visible && plane_state->uapi.dst.x1 < 0) {
+ drm_dbg_kms(&dev_priv->drm,
+ "CHV cursor C not allowed to straddle the left screen edge\n");
+ return -EINVAL;
+ }
+
+ plane_state->ctl = i9xx_cursor_ctl(crtc_state, plane_state);
+
+ return 0;
+}
+
+static void i9xx_update_cursor(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ enum pipe pipe = plane->pipe;
+ u32 cntl = 0, base = 0, pos = 0, fbc_ctl = 0;
+ unsigned long irqflags;
+
+ if (plane_state && plane_state->uapi.visible) {
+ int width = drm_rect_width(&plane_state->uapi.dst);
+ int height = drm_rect_height(&plane_state->uapi.dst);
+
+ cntl = plane_state->ctl |
+ i9xx_cursor_ctl_crtc(crtc_state);
+
+ if (width != height)
+ fbc_ctl = CUR_FBC_CTL_EN | (height - 1);
+
+ base = intel_cursor_base(plane_state);
+ pos = intel_cursor_position(plane_state);
+ }
+
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+
+ /*
+ * On some platforms writing CURCNTR first will also
+ * cause CURPOS to be armed by the CURBASE write.
+ * Without the CURCNTR write the CURPOS write would
+ * arm itself. Thus we always update CURCNTR before
+ * CURPOS.
+ *
+ * On other platforms CURPOS always requires the
+ * CURBASE write to arm the update. Additonally
+ * a write to any of the cursor register will cancel
+ * an already armed cursor update. Thus leaving out
+ * the CURBASE write after CURPOS could lead to a
+ * cursor that doesn't appear to move, or even change
+ * shape. Thus we always write CURBASE.
+ *
+ * The other registers are armed by the CURBASE write
+ * except when the plane is getting enabled at which time
+ * the CURCNTR write arms the update.
+ */
+
+ if (INTEL_GEN(dev_priv) >= 9)
+ skl_write_cursor_wm(plane, crtc_state);
+
+ if (!intel_crtc_needs_modeset(crtc_state))
+ intel_psr2_program_plane_sel_fetch(plane, crtc_state, plane_state, 0);
+
+ if (plane->cursor.base != base ||
+ plane->cursor.size != fbc_ctl ||
+ plane->cursor.cntl != cntl) {
+ if (HAS_CUR_FBC(dev_priv))
+ intel_de_write_fw(dev_priv, CUR_FBC_CTL(pipe),
+ fbc_ctl);
+ intel_de_write_fw(dev_priv, CURCNTR(pipe), cntl);
+ intel_de_write_fw(dev_priv, CURPOS(pipe), pos);
+ intel_de_write_fw(dev_priv, CURBASE(pipe), base);
+
+ plane->cursor.base = base;
+ plane->cursor.size = fbc_ctl;
+ plane->cursor.cntl = cntl;
+ } else {
+ intel_de_write_fw(dev_priv, CURPOS(pipe), pos);
+ intel_de_write_fw(dev_priv, CURBASE(pipe), base);
+ }
+
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+}
+
+static void i9xx_disable_cursor(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state)
+{
+ i9xx_update_cursor(plane, crtc_state, NULL);
+}
+
+static bool i9xx_cursor_get_hw_state(struct intel_plane *plane,
+ enum pipe *pipe)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ enum intel_display_power_domain power_domain;
+ intel_wakeref_t wakeref;
+ bool ret;
+ u32 val;
+
+ /*
+ * Not 100% correct for planes that can move between pipes,
+ * but that's only the case for gen2-3 which don't have any
+ * display power wells.
+ */
+ power_domain = POWER_DOMAIN_PIPE(plane->pipe);
+ wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
+ if (!wakeref)
+ return false;
+
+ val = intel_de_read(dev_priv, CURCNTR(plane->pipe));
+
+ ret = val & MCURSOR_MODE;
+
+ if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
+ *pipe = plane->pipe;
+ else
+ *pipe = (val & MCURSOR_PIPE_SELECT_MASK) >>
+ MCURSOR_PIPE_SELECT_SHIFT;
+
+ intel_display_power_put(dev_priv, power_domain, wakeref);
+
+ return ret;
+}
+
+static bool intel_cursor_format_mod_supported(struct drm_plane *_plane,
+ u32 format, u64 modifier)
+{
+ return modifier == DRM_FORMAT_MOD_LINEAR &&
+ format == DRM_FORMAT_ARGB8888;
+}
+
+static int
+intel_legacy_cursor_update(struct drm_plane *_plane,
+ struct drm_crtc *_crtc,
+ struct drm_framebuffer *fb,
+ int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ u32 src_x, u32 src_y,
+ u32 src_w, u32 src_h,
+ struct drm_modeset_acquire_ctx *ctx)
+{
+ struct intel_plane *plane = to_intel_plane(_plane);
+ struct intel_crtc *crtc = to_intel_crtc(_crtc);
+ struct intel_plane_state *old_plane_state =
+ to_intel_plane_state(plane->base.state);
+ struct intel_plane_state *new_plane_state;
+ struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+ struct intel_crtc_state *new_crtc_state;
+ int ret;
+
+ /*
+ * When crtc is inactive or there is a modeset pending,
+ * wait for it to complete in the slowpath
+ *
+ * FIXME bigjoiner fastpath would be good
+ */
+ if (!crtc_state->hw.active || intel_crtc_needs_modeset(crtc_state) ||
+ crtc_state->update_pipe || crtc_state->bigjoiner)
+ goto slow;
+
+ /*
+ * Don't do an async update if there is an outstanding commit modifying
+ * the plane. This prevents our async update's changes from getting
+ * overridden by a previous synchronous update's state.
+ */
+ if (old_plane_state->uapi.commit &&
+ !try_wait_for_completion(&old_plane_state->uapi.commit->hw_done))
+ goto slow;
+
+ /*
+ * If any parameters change that may affect watermarks,
+ * take the slowpath. Only changing fb or position should be
+ * in the fastpath.
+ */
+ if (old_plane_state->uapi.crtc != &crtc->base ||
+ old_plane_state->uapi.src_w != src_w ||
+ old_plane_state->uapi.src_h != src_h ||
+ old_plane_state->uapi.crtc_w != crtc_w ||
+ old_plane_state->uapi.crtc_h != crtc_h ||
+ !old_plane_state->uapi.fb != !fb)
+ goto slow;
+
+ new_plane_state = to_intel_plane_state(intel_plane_duplicate_state(&plane->base));
+ if (!new_plane_state)
+ return -ENOMEM;
+
+ new_crtc_state = to_intel_crtc_state(intel_crtc_duplicate_state(&crtc->base));
+ if (!new_crtc_state) {
+ ret = -ENOMEM;
+ goto out_free;
+ }
+
+ drm_atomic_set_fb_for_plane(&new_plane_state->uapi, fb);
+
+ new_plane_state->uapi.src_x = src_x;
+ new_plane_state->uapi.src_y = src_y;
+ new_plane_state->uapi.src_w = src_w;
+ new_plane_state->uapi.src_h = src_h;
+ new_plane_state->uapi.crtc_x = crtc_x;
+ new_plane_state->uapi.crtc_y = crtc_y;
+ new_plane_state->uapi.crtc_w = crtc_w;
+ new_plane_state->uapi.crtc_h = crtc_h;
+
+ intel_plane_copy_uapi_to_hw_state(new_plane_state, new_plane_state, crtc);
+
+ ret = intel_plane_atomic_check_with_state(crtc_state, new_crtc_state,
+ old_plane_state, new_plane_state);
+ if (ret)
+ goto out_free;
+
+ ret = intel_plane_pin_fb(new_plane_state);
+ if (ret)
+ goto out_free;
+
+ intel_frontbuffer_flush(to_intel_frontbuffer(new_plane_state->hw.fb),
+ ORIGIN_FLIP);
+ intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->hw.fb),
+ to_intel_frontbuffer(new_plane_state->hw.fb),
+ plane->frontbuffer_bit);
+
+ /* Swap plane state */
+ plane->base.state = &new_plane_state->uapi;
+
+ /*
+ * We cannot swap crtc_state as it may be in use by an atomic commit or
+ * page flip that's running simultaneously. If we swap crtc_state and
+ * destroy the old state, we will cause a use-after-free there.
+ *
+ * Only update active_planes, which is needed for our internal
+ * bookkeeping. Either value will do the right thing when updating
+ * planes atomically. If the cursor was part of the atomic update then
+ * we would have taken the slowpath.
+ */
+ crtc_state->active_planes = new_crtc_state->active_planes;
+
+ if (new_plane_state->uapi.visible)
+ intel_update_plane(plane, crtc_state, new_plane_state);
+ else
+ intel_disable_plane(plane, crtc_state);
+
+ intel_plane_unpin_fb(old_plane_state);
+
+out_free:
+ if (new_crtc_state)
+ intel_crtc_destroy_state(&crtc->base, &new_crtc_state->uapi);
+ if (ret)
+ intel_plane_destroy_state(&plane->base, &new_plane_state->uapi);
+ else
+ intel_plane_destroy_state(&plane->base, &old_plane_state->uapi);
+ return ret;
+
+slow:
+ return drm_atomic_helper_update_plane(&plane->base, &crtc->base, fb,
+ crtc_x, crtc_y, crtc_w, crtc_h,
+ src_x, src_y, src_w, src_h, ctx);
+}
+
+static const struct drm_plane_funcs intel_cursor_plane_funcs = {
+ .update_plane = intel_legacy_cursor_update,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = intel_plane_destroy,
+ .atomic_duplicate_state = intel_plane_duplicate_state,
+ .atomic_destroy_state = intel_plane_destroy_state,
+ .format_mod_supported = intel_cursor_format_mod_supported,
+};
+
+struct intel_plane *
+intel_cursor_plane_create(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
+{
+ struct intel_plane *cursor;
+ int ret, zpos;
+
+ cursor = intel_plane_alloc();
+ if (IS_ERR(cursor))
+ return cursor;
+
+ cursor->pipe = pipe;
+ cursor->i9xx_plane = (enum i9xx_plane_id) pipe;
+ cursor->id = PLANE_CURSOR;
+ cursor->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, cursor->id);
+
+ if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) {
+ cursor->max_stride = i845_cursor_max_stride;
+ cursor->update_plane = i845_update_cursor;
+ cursor->disable_plane = i845_disable_cursor;
+ cursor->get_hw_state = i845_cursor_get_hw_state;
+ cursor->check_plane = i845_check_cursor;
+ } else {
+ cursor->max_stride = i9xx_cursor_max_stride;
+ cursor->update_plane = i9xx_update_cursor;
+ cursor->disable_plane = i9xx_disable_cursor;
+ cursor->get_hw_state = i9xx_cursor_get_hw_state;
+ cursor->check_plane = i9xx_check_cursor;
+ }
+
+ cursor->cursor.base = ~0;
+ cursor->cursor.cntl = ~0;
+
+ if (IS_I845G(dev_priv) || IS_I865G(dev_priv) || HAS_CUR_FBC(dev_priv))
+ cursor->cursor.size = ~0;
+
+ ret = drm_universal_plane_init(&dev_priv->drm, &cursor->base,
+ 0, &intel_cursor_plane_funcs,
+ intel_cursor_formats,
+ ARRAY_SIZE(intel_cursor_formats),
+ cursor_format_modifiers,
+ DRM_PLANE_TYPE_CURSOR,
+ "cursor %c", pipe_name(pipe));
+ if (ret)
+ goto fail;
+
+ if (INTEL_GEN(dev_priv) >= 4)
+ drm_plane_create_rotation_property(&cursor->base,
+ DRM_MODE_ROTATE_0,
+ DRM_MODE_ROTATE_0 |
+ DRM_MODE_ROTATE_180);
+
+ zpos = RUNTIME_INFO(dev_priv)->num_sprites[pipe] + 1;
+ drm_plane_create_zpos_immutable_property(&cursor->base, zpos);
+
+ if (INTEL_GEN(dev_priv) >= 12)
+ drm_plane_enable_fb_damage_clips(&cursor->base);
+
+ drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs);
+
+ return cursor;
+
+fail:
+ intel_plane_free(cursor);
+
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_cursor.h b/drivers/gpu/drm/i915/display/intel_cursor.h
new file mode 100644
index 000000000000..ce333bf4c2d5
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_cursor.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef _INTEL_CURSOR_H_
+#define _INTEL_CURSOR_H_
+
+enum pipe;
+struct drm_i915_private;
+struct intel_plane;
+
+struct intel_plane *
+intel_cursor_plane_create(struct drm_i915_private *dev_priv,
+ enum pipe pipe);
+
+#endif
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
index 92940a0c5ef8..fbd857f2bdb2 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi.c
@@ -46,10 +46,12 @@
#include "intel_hotplug.h"
#include "intel_lspcon.h"
#include "intel_panel.h"
+#include "intel_pps.h"
#include "intel_psr.h"
#include "intel_sprite.h"
#include "intel_tc.h"
#include "intel_vdsc.h"
+#include "intel_vrr.h"
struct ddi_buf_trans {
u32 trans1; /* balance leg enable, de-emph level */
@@ -611,6 +613,34 @@ static const struct cnl_ddi_buf_trans jsl_combo_phy_ddi_translations_edp_hbr2[]
{ 0xA, 0x35, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */
};
+static const struct cnl_ddi_buf_trans dg1_combo_phy_ddi_translations_dp_rbr_hbr[] = {
+ /* NT mV Trans mV db */
+ { 0xA, 0x32, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */
+ { 0xA, 0x48, 0x35, 0x00, 0x0A }, /* 350 500 3.1 */
+ { 0xC, 0x63, 0x2F, 0x00, 0x10 }, /* 350 700 6.0 */
+ { 0x6, 0x7F, 0x2C, 0x00, 0x13 }, /* 350 900 8.2 */
+ { 0xA, 0x43, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */
+ { 0xC, 0x60, 0x36, 0x00, 0x09 }, /* 500 700 2.9 */
+ { 0x6, 0x7F, 0x30, 0x00, 0x0F }, /* 500 900 5.1 */
+ { 0xC, 0x60, 0x3F, 0x00, 0x00 }, /* 650 700 0.6 */
+ { 0x6, 0x7F, 0x37, 0x00, 0x08 }, /* 600 900 3.5 */
+ { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */
+};
+
+static const struct cnl_ddi_buf_trans dg1_combo_phy_ddi_translations_dp_hbr2_hbr3[] = {
+ /* NT mV Trans mV db */
+ { 0xA, 0x32, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */
+ { 0xA, 0x48, 0x35, 0x00, 0x0A }, /* 350 500 3.1 */
+ { 0xC, 0x63, 0x2F, 0x00, 0x10 }, /* 350 700 6.0 */
+ { 0x6, 0x7F, 0x2C, 0x00, 0x13 }, /* 350 900 8.2 */
+ { 0xA, 0x43, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */
+ { 0xC, 0x60, 0x36, 0x00, 0x09 }, /* 500 700 2.9 */
+ { 0x6, 0x7F, 0x30, 0x00, 0x0F }, /* 500 900 5.1 */
+ { 0xC, 0x58, 0x3F, 0x00, 0x00 }, /* 650 700 0.6 */
+ { 0x6, 0x7F, 0x35, 0x00, 0x0A }, /* 600 900 3.5 */
+ { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */
+};
+
struct icl_mg_phy_ddi_buf_trans {
u32 cri_txdeemph_override_11_6;
u32 cri_txdeemph_override_5_0;
@@ -766,6 +796,34 @@ static const struct cnl_ddi_buf_trans tgl_combo_phy_ddi_translations_edp_hbr2_ho
{ 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 2 1 */
};
+static const struct cnl_ddi_buf_trans rkl_combo_phy_ddi_translations_dp_hbr[] = {
+ /* NT mV Trans mV db */
+ { 0xA, 0x2F, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */
+ { 0xA, 0x4F, 0x37, 0x00, 0x08 }, /* 350 500 3.1 */
+ { 0xC, 0x63, 0x2F, 0x00, 0x10 }, /* 350 700 6.0 */
+ { 0x6, 0x7D, 0x2A, 0x00, 0x15 }, /* 350 900 8.2 */
+ { 0xA, 0x4C, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */
+ { 0xC, 0x73, 0x34, 0x00, 0x0B }, /* 500 700 2.9 */
+ { 0x6, 0x7F, 0x2F, 0x00, 0x10 }, /* 500 900 5.1 */
+ { 0xC, 0x6E, 0x3E, 0x00, 0x01 }, /* 650 700 0.6 */
+ { 0x6, 0x7F, 0x35, 0x00, 0x0A }, /* 600 900 3.5 */
+ { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */
+};
+
+static const struct cnl_ddi_buf_trans rkl_combo_phy_ddi_translations_dp_hbr2_hbr3[] = {
+ /* NT mV Trans mV db */
+ { 0xA, 0x35, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */
+ { 0xA, 0x50, 0x38, 0x00, 0x07 }, /* 350 500 3.1 */
+ { 0xC, 0x61, 0x33, 0x00, 0x0C }, /* 350 700 6.0 */
+ { 0x6, 0x7F, 0x2E, 0x00, 0x11 }, /* 350 900 8.2 */
+ { 0xA, 0x47, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */
+ { 0xC, 0x5F, 0x38, 0x00, 0x07 }, /* 500 700 2.9 */
+ { 0x6, 0x7F, 0x2F, 0x00, 0x10 }, /* 500 900 5.1 */
+ { 0xC, 0x5F, 0x3F, 0x00, 0x00 }, /* 650 700 0.6 */
+ { 0x6, 0x7E, 0x36, 0x00, 0x09 }, /* 600 900 3.5 */
+ { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */
+};
+
static bool is_hobl_buf_trans(const struct cnl_ddi_buf_trans *table)
{
return table == tgl_combo_phy_ddi_translations_edp_hbr2_hobl;
@@ -1093,6 +1151,12 @@ icl_get_combo_buf_trans_edp(struct intel_encoder *encoder,
} else if (dev_priv->vbt.edp.low_vswing) {
*n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_hbr2);
return icl_combo_phy_ddi_translations_edp_hbr2;
+ } else if (IS_DG1(dev_priv) && crtc_state->port_clock > 270000) {
+ *n_entries = ARRAY_SIZE(dg1_combo_phy_ddi_translations_dp_hbr2_hbr3);
+ return dg1_combo_phy_ddi_translations_dp_hbr2_hbr3;
+ } else if (IS_DG1(dev_priv)) {
+ *n_entries = ARRAY_SIZE(dg1_combo_phy_ddi_translations_dp_rbr_hbr);
+ return dg1_combo_phy_ddi_translations_dp_rbr_hbr;
}
return icl_get_combo_buf_trans_dp(encoder, crtc_state, n_entries);
@@ -1259,7 +1323,10 @@ tgl_get_combo_buf_trans_dp(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
if (crtc_state->port_clock > 270000) {
- if (IS_TGL_U(dev_priv) || IS_TGL_Y(dev_priv)) {
+ if (IS_ROCKETLAKE(dev_priv)) {
+ *n_entries = ARRAY_SIZE(rkl_combo_phy_ddi_translations_dp_hbr2_hbr3);
+ return rkl_combo_phy_ddi_translations_dp_hbr2_hbr3;
+ } else if (IS_TGL_U(dev_priv) || IS_TGL_Y(dev_priv)) {
*n_entries = ARRAY_SIZE(tgl_uy_combo_phy_ddi_translations_dp_hbr2);
return tgl_uy_combo_phy_ddi_translations_dp_hbr2;
} else {
@@ -1267,8 +1334,13 @@ tgl_get_combo_buf_trans_dp(struct intel_encoder *encoder,
return tgl_combo_phy_ddi_translations_dp_hbr2;
}
} else {
- *n_entries = ARRAY_SIZE(tgl_combo_phy_ddi_translations_dp_hbr);
- return tgl_combo_phy_ddi_translations_dp_hbr;
+ if (IS_ROCKETLAKE(dev_priv)) {
+ *n_entries = ARRAY_SIZE(rkl_combo_phy_ddi_translations_dp_hbr);
+ return rkl_combo_phy_ddi_translations_dp_hbr;
+ } else {
+ *n_entries = ARRAY_SIZE(tgl_combo_phy_ddi_translations_dp_hbr);
+ return tgl_combo_phy_ddi_translations_dp_hbr;
+ }
}
}
@@ -2029,9 +2101,9 @@ void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state
}
}
-int intel_ddi_toggle_hdcp_signalling(struct intel_encoder *intel_encoder,
- enum transcoder cpu_transcoder,
- bool enable)
+int intel_ddi_toggle_hdcp_bits(struct intel_encoder *intel_encoder,
+ enum transcoder cpu_transcoder,
+ bool enable, u32 hdcp_mask)
{
struct drm_device *dev = intel_encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -2046,9 +2118,9 @@ int intel_ddi_toggle_hdcp_signalling(struct intel_encoder *intel_encoder,
tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder));
if (enable)
- tmp |= TRANS_DDI_HDCP_SIGNALLING;
+ tmp |= hdcp_mask;
else
- tmp &= ~TRANS_DDI_HDCP_SIGNALLING;
+ tmp &= ~hdcp_mask;
intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder), tmp);
intel_display_power_put(dev_priv, intel_encoder->power_domain, wakeref);
return ret;
@@ -2285,18 +2357,23 @@ static void intel_ddi_get_power_domains(struct intel_encoder *encoder,
dig_port = enc_to_dig_port(encoder);
if (!intel_phy_is_tc(dev_priv, phy) ||
- dig_port->tc_mode != TC_PORT_TBT_ALT)
- intel_display_power_get(dev_priv,
- dig_port->ddi_io_power_domain);
+ dig_port->tc_mode != TC_PORT_TBT_ALT) {
+ drm_WARN_ON(&dev_priv->drm, dig_port->ddi_io_wakeref);
+ dig_port->ddi_io_wakeref = intel_display_power_get(dev_priv,
+ dig_port->ddi_io_power_domain);
+ }
/*
* AUX power is only needed for (e)DP mode, and for HDMI mode on TC
* ports.
*/
if (intel_crtc_has_dp_encoder(crtc_state) ||
- intel_phy_is_tc(dev_priv, phy))
- intel_display_power_get(dev_priv,
- intel_ddi_main_link_aux_domain(dig_port));
+ intel_phy_is_tc(dev_priv, phy)) {
+ drm_WARN_ON(&dev_priv->drm, dig_port->aux_wakeref);
+ dig_port->aux_wakeref =
+ intel_display_power_get(dev_priv,
+ intel_ddi_main_link_aux_domain(dig_port));
+ }
}
void intel_ddi_enable_pipe_clock(struct intel_encoder *encoder,
@@ -2626,15 +2703,11 @@ static void icl_ddi_combo_vswing_program(struct intel_encoder *encoder,
ddi_translations = ehl_get_combo_buf_trans(encoder, crtc_state, &n_entries);
else
ddi_translations = icl_get_combo_buf_trans(encoder, crtc_state, &n_entries);
- if (!ddi_translations)
- return;
- if (level >= n_entries) {
- drm_dbg_kms(&dev_priv->drm,
- "DDI translation not found for level %d. Using %d instead.",
- level, n_entries - 1);
+ if (drm_WARN_ON_ONCE(&dev_priv->drm, !ddi_translations))
+ return;
+ if (drm_WARN_ON_ONCE(&dev_priv->drm, level >= n_entries))
level = n_entries - 1;
- }
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP)) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
@@ -2755,13 +2828,11 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
u32 val;
ddi_translations = icl_get_mg_buf_trans(encoder, crtc_state, &n_entries);
- /* The table does not have values for level 3 and level 9. */
- if (level >= n_entries || level == 3 || level == 9) {
- drm_dbg_kms(&dev_priv->drm,
- "DDI translation not found for level %d. Using %d instead.",
- level, n_entries - 2);
- level = n_entries - 2;
- }
+
+ if (drm_WARN_ON_ONCE(&dev_priv->drm, !ddi_translations))
+ return;
+ if (drm_WARN_ON_ONCE(&dev_priv->drm, level >= n_entries))
+ level = n_entries - 1;
/* Set MG_TX_LINK_PARAMS cri_use_fs32 to 0. */
for (ln = 0; ln < 2; ln++) {
@@ -2893,7 +2964,9 @@ tgl_dkl_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
ddi_translations = tgl_get_dkl_buf_trans(encoder, crtc_state, &n_entries);
- if (level >= n_entries)
+ if (drm_WARN_ON_ONCE(&dev_priv->drm, !ddi_translations))
+ return;
+ if (drm_WARN_ON_ONCE(&dev_priv->drm, level >= n_entries))
level = n_entries - 1;
dpcnt_mask = (DKL_TX_PRESHOOT_COEFF_MASK |
@@ -3480,6 +3553,22 @@ i915_reg_t dp_tp_status_reg(struct intel_encoder *encoder,
return DP_TP_STATUS(encoder->port);
}
+static void intel_dp_sink_set_msa_timing_par_ignore_state(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state,
+ bool enable)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+
+ if (!crtc_state->vrr.enable)
+ return;
+
+ if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_DOWNSPREAD_CTRL,
+ enable ? DP_MSA_TIMING_PAR_IGNORE_EN : 0) <= 0)
+ drm_dbg_kms(&i915->drm,
+ "Failed to set MSA_TIMING_PAR_IGNORE %s in the sink\n",
+ enable ? "enable" : "disable");
+}
+
static void intel_dp_sink_set_fec_ready(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
@@ -3507,12 +3596,6 @@ static void intel_ddi_enable_fec(struct intel_encoder *encoder,
val = intel_de_read(dev_priv, dp_tp_ctl_reg(encoder, crtc_state));
val |= DP_TP_CTL_FEC_ENABLE;
intel_de_write(dev_priv, dp_tp_ctl_reg(encoder, crtc_state), val);
-
- if (intel_de_wait_for_set(dev_priv,
- dp_tp_status_reg(encoder, crtc_state),
- DP_TP_STATUS_FEC_ENABLE_LIVE, 1))
- drm_err(&dev_priv->drm,
- "Timed out waiting for FEC Enable Status\n");
}
static void intel_ddi_disable_fec_state(struct intel_encoder *encoder,
@@ -3556,7 +3639,7 @@ static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state,
*/
/* 2. Enable Panel Power if PPS is required */
- intel_edp_panel_on(intel_dp);
+ intel_pps_on(intel_dp);
/*
* 3. For non-TBT Type-C ports, set FIA lane count
@@ -3577,9 +3660,11 @@ static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state,
/* 5. If IO power is controlled through PWR_WELL_CTL, Enable IO Power */
if (!intel_phy_is_tc(dev_priv, phy) ||
- dig_port->tc_mode != TC_PORT_TBT_ALT)
- intel_display_power_get(dev_priv,
- dig_port->ddi_io_power_domain);
+ dig_port->tc_mode != TC_PORT_TBT_ALT) {
+ drm_WARN_ON(&dev_priv->drm, dig_port->ddi_io_wakeref);
+ dig_port->ddi_io_wakeref = intel_display_power_get(dev_priv,
+ dig_port->ddi_io_power_domain);
+ }
/* 6. Program DP_MODE */
icl_program_mg_dp_mode(dig_port, crtc_state);
@@ -3643,6 +3728,7 @@ static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state,
if (!is_mst)
intel_dp_set_power(intel_dp, DP_SET_POWER_D0);
+ intel_dp_configure_protocol_converter(intel_dp, crtc_state);
intel_dp_sink_set_decompression_state(intel_dp, crtc_state, true);
/*
* DDI FEC: "anticipates enabling FEC encoding sets the FEC_READY bit
@@ -3651,6 +3737,9 @@ static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state,
*/
intel_dp_sink_set_fec_ready(intel_dp, crtc_state);
+ intel_dp_check_frl_training(intel_dp);
+ intel_dp_pcon_dsc_configure(intel_dp, crtc_state);
+
/*
* 7.i Follow DisplayPort specification training sequence (see notes for
* failure handling)
@@ -3693,14 +3782,16 @@ static void hsw_ddi_pre_enable_dp(struct intel_atomic_state *state,
crtc_state->port_clock,
crtc_state->lane_count);
- intel_edp_panel_on(intel_dp);
+ intel_pps_on(intel_dp);
intel_ddi_clk_select(encoder, crtc_state);
if (!intel_phy_is_tc(dev_priv, phy) ||
- dig_port->tc_mode != TC_PORT_TBT_ALT)
- intel_display_power_get(dev_priv,
- dig_port->ddi_io_power_domain);
+ dig_port->tc_mode != TC_PORT_TBT_ALT) {
+ drm_WARN_ON(&dev_priv->drm, dig_port->ddi_io_wakeref);
+ dig_port->ddi_io_wakeref = intel_display_power_get(dev_priv,
+ dig_port->ddi_io_power_domain);
+ }
icl_program_mg_dp_mode(dig_port, crtc_state);
@@ -3725,7 +3816,7 @@ static void hsw_ddi_pre_enable_dp(struct intel_atomic_state *state,
intel_ddi_init_dp_buf_reg(encoder, crtc_state);
if (!is_mst)
intel_dp_set_power(intel_dp, DP_SET_POWER_D0);
- intel_dp_configure_protocol_converter(intel_dp);
+ intel_dp_configure_protocol_converter(intel_dp, crtc_state);
intel_dp_sink_set_decompression_state(intel_dp, crtc_state,
true);
intel_dp_sink_set_fec_ready(intel_dp, crtc_state);
@@ -3778,7 +3869,9 @@ static void intel_ddi_pre_enable_hdmi(struct intel_atomic_state *state,
intel_dp_dual_mode_set_tmds_output(intel_hdmi, true);
intel_ddi_clk_select(encoder, crtc_state);
- intel_display_power_get(dev_priv, dig_port->ddi_io_power_domain);
+ drm_WARN_ON(&dev_priv->drm, dig_port->ddi_io_wakeref);
+ dig_port->ddi_io_wakeref = intel_display_power_get(dev_priv,
+ dig_port->ddi_io_power_domain);
icl_program_mg_dp_mode(dig_port, crtc_state);
@@ -3931,13 +4024,14 @@ static void intel_ddi_post_disable_dp(struct intel_atomic_state *state,
if (INTEL_GEN(dev_priv) >= 12)
intel_ddi_disable_pipe_clock(old_crtc_state);
- intel_edp_panel_vdd_on(intel_dp);
- intel_edp_panel_off(intel_dp);
+ intel_pps_vdd_on(intel_dp);
+ intel_pps_off(intel_dp);
if (!intel_phy_is_tc(dev_priv, phy) ||
dig_port->tc_mode != TC_PORT_TBT_ALT)
- intel_display_power_put_unchecked(dev_priv,
- dig_port->ddi_io_power_domain);
+ intel_display_power_put(dev_priv,
+ dig_port->ddi_io_power_domain,
+ fetch_and_zero(&dig_port->ddi_io_wakeref));
intel_ddi_clk_disable(encoder);
}
@@ -3958,8 +4052,9 @@ static void intel_ddi_post_disable_hdmi(struct intel_atomic_state *state,
intel_disable_ddi_buf(encoder, old_crtc_state);
- intel_display_power_put_unchecked(dev_priv,
- dig_port->ddi_io_power_domain);
+ intel_display_power_put(dev_priv,
+ dig_port->ddi_io_power_domain,
+ fetch_and_zero(&dig_port->ddi_io_wakeref));
intel_ddi_clk_disable(encoder);
@@ -3981,6 +4076,8 @@ static void intel_ddi_post_disable(struct intel_atomic_state *state,
intel_disable_pipe(old_crtc_state);
+ intel_vrr_disable(old_crtc_state);
+
intel_ddi_disable_transcoder_func(old_crtc_state);
intel_dsc_disable(old_crtc_state);
@@ -4032,8 +4129,9 @@ static void intel_ddi_post_disable(struct intel_atomic_state *state,
icl_unmap_plls_to_ports(encoder);
if (intel_crtc_has_dp_encoder(old_crtc_state) || is_tc_port)
- intel_display_power_put_unchecked(dev_priv,
- intel_ddi_main_link_aux_domain(dig_port));
+ intel_display_power_put(dev_priv,
+ intel_ddi_main_link_aux_domain(dig_port),
+ fetch_and_zero(&dig_port->aux_wakeref));
if (is_tc_port)
intel_tc_port_put_link(dig_port);
@@ -4118,6 +4216,7 @@ static void intel_enable_ddi_dp(struct intel_atomic_state *state,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
enum port port = encoder->port;
if (port == PORT_A && INTEL_GEN(dev_priv) < 9)
@@ -4125,7 +4224,10 @@ static void intel_enable_ddi_dp(struct intel_atomic_state *state,
intel_edp_backlight_on(crtc_state, conn_state);
intel_psr_enable(intel_dp, crtc_state, conn_state);
- intel_dp_set_infoframes(encoder, true, crtc_state, conn_state);
+
+ if (!dig_port->lspcon.active || dig_port->dp.has_hdmi_sink)
+ intel_dp_set_infoframes(encoder, true, crtc_state, conn_state);
+
intel_edp_drrs_enable(intel_dp, crtc_state);
if (crtc_state->has_audio)
@@ -4227,6 +4329,8 @@ static void intel_enable_ddi(struct intel_atomic_state *state,
if (!crtc_state->bigjoiner_slave)
intel_ddi_enable_transcoder_func(encoder, crtc_state);
+ intel_vrr_enable(encoder, crtc_state);
+
intel_enable_pipe(crtc_state);
intel_crtc_vblank_on(crtc_state);
@@ -4240,7 +4344,7 @@ static void intel_enable_ddi(struct intel_atomic_state *state,
if (conn_state->content_protection ==
DRM_MODE_CONTENT_PROTECTION_DESIRED)
intel_hdcp_enable(to_intel_connector(conn_state->connector),
- crtc_state->cpu_transcoder,
+ crtc_state,
(u8)conn_state->hdcp_content_type);
}
@@ -4263,6 +4367,9 @@ static void intel_disable_ddi_dp(struct intel_atomic_state *state,
/* Disable the decompression in DP Sink */
intel_dp_sink_set_decompression_state(intel_dp, old_crtc_state,
false);
+ /* Disable Ignore_MSA bit in DP Sink */
+ intel_dp_sink_set_msa_timing_par_ignore_state(intel_dp, old_crtc_state,
+ false);
}
static void intel_disable_ddi_hdmi(struct intel_atomic_state *state,
@@ -4368,9 +4475,12 @@ intel_ddi_pre_pll_enable(struct intel_atomic_state *state,
if (is_tc_port)
intel_tc_port_get_link(dig_port, crtc_state->lane_count);
- if (intel_crtc_has_dp_encoder(crtc_state) || is_tc_port)
- intel_display_power_get(dev_priv,
- intel_ddi_main_link_aux_domain(dig_port));
+ if (intel_crtc_has_dp_encoder(crtc_state) || is_tc_port) {
+ drm_WARN_ON(&dev_priv->drm, dig_port->aux_wakeref);
+ dig_port->aux_wakeref =
+ intel_display_power_get(dev_priv,
+ intel_ddi_main_link_aux_domain(dig_port));
+ }
if (is_tc_port && dig_port->tc_mode != TC_PORT_TBT_ALT)
/*
@@ -4583,6 +4693,7 @@ static void intel_ddi_read_func_ctl(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->uapi.crtc);
enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
u32 temp, flags = 0;
temp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder));
@@ -4657,9 +4768,12 @@ static void intel_ddi_read_func_ctl(struct intel_encoder *encoder,
pipe_config->fec_enable);
}
- pipe_config->infoframes.enable |=
- intel_hdmi_infoframes_enabled(encoder, pipe_config);
-
+ if (dig_port->lspcon.active && dig_port->dp.has_hdmi_sink)
+ pipe_config->infoframes.enable |=
+ intel_lspcon_infoframes_enabled(encoder, pipe_config);
+ else
+ pipe_config->infoframes.enable |=
+ intel_hdmi_infoframes_enabled(encoder, pipe_config);
break;
case TRANS_DDI_MODE_SELECT_DP_MST:
pipe_config->output_types |= BIT(INTEL_OUTPUT_DP_MST);
@@ -4946,6 +5060,8 @@ static void intel_ddi_encoder_destroy(struct drm_encoder *encoder)
intel_dp_encoder_flush_work(encoder);
drm_encoder_cleanup(encoder);
+ if (dig_port)
+ kfree(dig_port->hdcp_port_data.streams);
kfree(dig_port);
}
@@ -5099,12 +5215,20 @@ intel_ddi_hotplug(struct intel_encoder *encoder,
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+ struct intel_dp *intel_dp = &dig_port->dp;
enum phy phy = intel_port_to_phy(i915, encoder->port);
bool is_tc = intel_phy_is_tc(i915, phy);
struct drm_modeset_acquire_ctx ctx;
enum intel_hotplug_state state;
int ret;
+ if (intel_dp->compliance.test_active &&
+ intel_dp->compliance.test_type == DP_TEST_LINK_PHY_TEST_PATTERN) {
+ intel_dp_phy_test(encoder);
+ /* just do the PHY test and nothing else */
+ return INTEL_HOTPLUG_UNCHANGED;
+ }
+
state = intel_encoder_hotplug(encoder, connector);
drm_modeset_acquire_init(&ctx, 0);
@@ -5262,8 +5386,7 @@ intel_ddi_max_lanes(struct intel_digital_port *dig_port)
static bool hti_uses_phy(struct drm_i915_private *i915, enum phy phy)
{
return i915->hti_state & HDPORT_ENABLED &&
- (i915->hti_state & HDPORT_PHY_USED_DP(phy) ||
- i915->hti_state & HDPORT_PHY_USED_HDMI(phy));
+ i915->hti_state & HDPORT_DDI_USED(phy);
}
static enum hpd_pin dg1_hpd_pin(struct drm_i915_private *dev_priv,
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.h b/drivers/gpu/drm/i915/display/intel_ddi.h
index dcc711cfe4fe..a4dd815c0000 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.h
+++ b/drivers/gpu/drm/i915/display/intel_ddi.h
@@ -50,9 +50,9 @@ u32 bxt_signal_levels(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state);
u32 ddi_signal_levels(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state);
-int intel_ddi_toggle_hdcp_signalling(struct intel_encoder *intel_encoder,
- enum transcoder cpu_transcoder,
- bool enable);
+int intel_ddi_toggle_hdcp_bits(struct intel_encoder *intel_encoder,
+ enum transcoder cpu_transcoder,
+ bool enable, u32 hdcp_mask);
void icl_sanitize_encoder_pll_mapping(struct intel_encoder *encoder);
#endif /* __INTEL_DDI_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index 53a00cf3fa32..9843a0f202a0 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -45,8 +45,10 @@
#include "display/intel_crt.h"
#include "display/intel_ddi.h"
+#include "display/intel_display_debugfs.h"
#include "display/intel_dp.h"
#include "display/intel_dp_mst.h"
+#include "display/intel_dpll.h"
#include "display/intel_dpll_mgr.h"
#include "display/intel_dsi.h"
#include "display/intel_dvo.h"
@@ -56,6 +58,9 @@
#include "display/intel_sdvo.h"
#include "display/intel_tv.h"
#include "display/intel_vdsc.h"
+#include "display/intel_vrr.h"
+
+#include "gem/i915_gem_object.h"
#include "gt/intel_rps.h"
@@ -67,10 +72,12 @@
#include "intel_bw.h"
#include "intel_cdclk.h"
#include "intel_color.h"
+#include "intel_crtc.h"
#include "intel_csr.h"
#include "intel_display_types.h"
#include "intel_dp_link_training.h"
#include "intel_fbc.h"
+#include "intel_fdi.h"
#include "intel_fbdev.h"
#include "intel_fifo_underrun.h"
#include "intel_frontbuffer.h"
@@ -79,72 +86,14 @@
#include "intel_overlay.h"
#include "intel_pipe_crc.h"
#include "intel_pm.h"
+#include "intel_pps.h"
#include "intel_psr.h"
#include "intel_quirks.h"
#include "intel_sideband.h"
#include "intel_sprite.h"
#include "intel_tc.h"
#include "intel_vga.h"
-
-/* Primary plane formats for gen <= 3 */
-static const u32 i8xx_primary_formats[] = {
- DRM_FORMAT_C8,
- DRM_FORMAT_XRGB1555,
- DRM_FORMAT_RGB565,
- DRM_FORMAT_XRGB8888,
-};
-
-/* Primary plane formats for ivb (no fp16 due to hw issue) */
-static const u32 ivb_primary_formats[] = {
- DRM_FORMAT_C8,
- DRM_FORMAT_RGB565,
- DRM_FORMAT_XRGB8888,
- DRM_FORMAT_XBGR8888,
- DRM_FORMAT_XRGB2101010,
- DRM_FORMAT_XBGR2101010,
-};
-
-/* Primary plane formats for gen >= 4, except ivb */
-static const u32 i965_primary_formats[] = {
- DRM_FORMAT_C8,
- DRM_FORMAT_RGB565,
- DRM_FORMAT_XRGB8888,
- DRM_FORMAT_XBGR8888,
- DRM_FORMAT_XRGB2101010,
- DRM_FORMAT_XBGR2101010,
- DRM_FORMAT_XBGR16161616F,
-};
-
-/* Primary plane formats for vlv/chv */
-static const u32 vlv_primary_formats[] = {
- DRM_FORMAT_C8,
- DRM_FORMAT_RGB565,
- DRM_FORMAT_XRGB8888,
- DRM_FORMAT_XBGR8888,
- DRM_FORMAT_ARGB8888,
- DRM_FORMAT_ABGR8888,
- DRM_FORMAT_XRGB2101010,
- DRM_FORMAT_XBGR2101010,
- DRM_FORMAT_ARGB2101010,
- DRM_FORMAT_ABGR2101010,
- DRM_FORMAT_XBGR16161616F,
-};
-
-static const u64 i9xx_format_modifiers[] = {
- I915_FORMAT_MOD_X_TILED,
- DRM_FORMAT_MOD_LINEAR,
- DRM_FORMAT_MOD_INVALID
-};
-
-/* Cursor formats */
-static const u32 intel_cursor_formats[] = {
- DRM_FORMAT_ARGB8888,
-};
-
-static const u64 cursor_format_modifiers[] = {
- DRM_FORMAT_MOD_LINEAR,
- DRM_FORMAT_MOD_INVALID
-};
+#include "i9xx_plane.h"
static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config);
@@ -171,18 +120,6 @@ static void skl_pfit_enable(const struct intel_crtc_state *crtc_state);
static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state);
static void intel_modeset_setup_hw_state(struct drm_device *dev,
struct drm_modeset_acquire_ctx *ctx);
-static struct intel_crtc_state *intel_crtc_state_alloc(struct intel_crtc *crtc);
-
-struct intel_limit {
- struct {
- int min, max;
- } dot, vco, n, m, m1, m2, p, p1;
-
- struct {
- int dot_limit;
- int p2_slow, p2_fast;
- } p2;
-};
/* returns HPLL frequency in kHz */
int vlv_get_hpll_vco(struct drm_i915_private *dev_priv)
@@ -241,281 +178,6 @@ static void intel_update_czclk(struct drm_i915_private *dev_priv)
dev_priv->czclk_freq);
}
-/* units of 100MHz */
-static u32 intel_fdi_link_freq(struct drm_i915_private *dev_priv,
- const struct intel_crtc_state *pipe_config)
-{
- if (HAS_DDI(dev_priv))
- return pipe_config->port_clock; /* SPLL */
- else
- return dev_priv->fdi_pll_freq;
-}
-
-static const struct intel_limit intel_limits_i8xx_dac = {
- .dot = { .min = 25000, .max = 350000 },
- .vco = { .min = 908000, .max = 1512000 },
- .n = { .min = 2, .max = 16 },
- .m = { .min = 96, .max = 140 },
- .m1 = { .min = 18, .max = 26 },
- .m2 = { .min = 6, .max = 16 },
- .p = { .min = 4, .max = 128 },
- .p1 = { .min = 2, .max = 33 },
- .p2 = { .dot_limit = 165000,
- .p2_slow = 4, .p2_fast = 2 },
-};
-
-static const struct intel_limit intel_limits_i8xx_dvo = {
- .dot = { .min = 25000, .max = 350000 },
- .vco = { .min = 908000, .max = 1512000 },
- .n = { .min = 2, .max = 16 },
- .m = { .min = 96, .max = 140 },
- .m1 = { .min = 18, .max = 26 },
- .m2 = { .min = 6, .max = 16 },
- .p = { .min = 4, .max = 128 },
- .p1 = { .min = 2, .max = 33 },
- .p2 = { .dot_limit = 165000,
- .p2_slow = 4, .p2_fast = 4 },
-};
-
-static const struct intel_limit intel_limits_i8xx_lvds = {
- .dot = { .min = 25000, .max = 350000 },
- .vco = { .min = 908000, .max = 1512000 },
- .n = { .min = 2, .max = 16 },
- .m = { .min = 96, .max = 140 },
- .m1 = { .min = 18, .max = 26 },
- .m2 = { .min = 6, .max = 16 },
- .p = { .min = 4, .max = 128 },
- .p1 = { .min = 1, .max = 6 },
- .p2 = { .dot_limit = 165000,
- .p2_slow = 14, .p2_fast = 7 },
-};
-
-static const struct intel_limit intel_limits_i9xx_sdvo = {
- .dot = { .min = 20000, .max = 400000 },
- .vco = { .min = 1400000, .max = 2800000 },
- .n = { .min = 1, .max = 6 },
- .m = { .min = 70, .max = 120 },
- .m1 = { .min = 8, .max = 18 },
- .m2 = { .min = 3, .max = 7 },
- .p = { .min = 5, .max = 80 },
- .p1 = { .min = 1, .max = 8 },
- .p2 = { .dot_limit = 200000,
- .p2_slow = 10, .p2_fast = 5 },
-};
-
-static const struct intel_limit intel_limits_i9xx_lvds = {
- .dot = { .min = 20000, .max = 400000 },
- .vco = { .min = 1400000, .max = 2800000 },
- .n = { .min = 1, .max = 6 },
- .m = { .min = 70, .max = 120 },
- .m1 = { .min = 8, .max = 18 },
- .m2 = { .min = 3, .max = 7 },
- .p = { .min = 7, .max = 98 },
- .p1 = { .min = 1, .max = 8 },
- .p2 = { .dot_limit = 112000,
- .p2_slow = 14, .p2_fast = 7 },
-};
-
-
-static const struct intel_limit intel_limits_g4x_sdvo = {
- .dot = { .min = 25000, .max = 270000 },
- .vco = { .min = 1750000, .max = 3500000},
- .n = { .min = 1, .max = 4 },
- .m = { .min = 104, .max = 138 },
- .m1 = { .min = 17, .max = 23 },
- .m2 = { .min = 5, .max = 11 },
- .p = { .min = 10, .max = 30 },
- .p1 = { .min = 1, .max = 3},
- .p2 = { .dot_limit = 270000,
- .p2_slow = 10,
- .p2_fast = 10
- },
-};
-
-static const struct intel_limit intel_limits_g4x_hdmi = {
- .dot = { .min = 22000, .max = 400000 },
- .vco = { .min = 1750000, .max = 3500000},
- .n = { .min = 1, .max = 4 },
- .m = { .min = 104, .max = 138 },
- .m1 = { .min = 16, .max = 23 },
- .m2 = { .min = 5, .max = 11 },
- .p = { .min = 5, .max = 80 },
- .p1 = { .min = 1, .max = 8},
- .p2 = { .dot_limit = 165000,
- .p2_slow = 10, .p2_fast = 5 },
-};
-
-static const struct intel_limit intel_limits_g4x_single_channel_lvds = {
- .dot = { .min = 20000, .max = 115000 },
- .vco = { .min = 1750000, .max = 3500000 },
- .n = { .min = 1, .max = 3 },
- .m = { .min = 104, .max = 138 },
- .m1 = { .min = 17, .max = 23 },
- .m2 = { .min = 5, .max = 11 },
- .p = { .min = 28, .max = 112 },
- .p1 = { .min = 2, .max = 8 },
- .p2 = { .dot_limit = 0,
- .p2_slow = 14, .p2_fast = 14
- },
-};
-
-static const struct intel_limit intel_limits_g4x_dual_channel_lvds = {
- .dot = { .min = 80000, .max = 224000 },
- .vco = { .min = 1750000, .max = 3500000 },
- .n = { .min = 1, .max = 3 },
- .m = { .min = 104, .max = 138 },
- .m1 = { .min = 17, .max = 23 },
- .m2 = { .min = 5, .max = 11 },
- .p = { .min = 14, .max = 42 },
- .p1 = { .min = 2, .max = 6 },
- .p2 = { .dot_limit = 0,
- .p2_slow = 7, .p2_fast = 7
- },
-};
-
-static const struct intel_limit pnv_limits_sdvo = {
- .dot = { .min = 20000, .max = 400000},
- .vco = { .min = 1700000, .max = 3500000 },
- /* Pineview's Ncounter is a ring counter */
- .n = { .min = 3, .max = 6 },
- .m = { .min = 2, .max = 256 },
- /* Pineview only has one combined m divider, which we treat as m2. */
- .m1 = { .min = 0, .max = 0 },
- .m2 = { .min = 0, .max = 254 },
- .p = { .min = 5, .max = 80 },
- .p1 = { .min = 1, .max = 8 },
- .p2 = { .dot_limit = 200000,
- .p2_slow = 10, .p2_fast = 5 },
-};
-
-static const struct intel_limit pnv_limits_lvds = {
- .dot = { .min = 20000, .max = 400000 },
- .vco = { .min = 1700000, .max = 3500000 },
- .n = { .min = 3, .max = 6 },
- .m = { .min = 2, .max = 256 },
- .m1 = { .min = 0, .max = 0 },
- .m2 = { .min = 0, .max = 254 },
- .p = { .min = 7, .max = 112 },
- .p1 = { .min = 1, .max = 8 },
- .p2 = { .dot_limit = 112000,
- .p2_slow = 14, .p2_fast = 14 },
-};
-
-/* Ironlake / Sandybridge
- *
- * We calculate clock using (register_value + 2) for N/M1/M2, so here
- * the range value for them is (actual_value - 2).
- */
-static const struct intel_limit ilk_limits_dac = {
- .dot = { .min = 25000, .max = 350000 },
- .vco = { .min = 1760000, .max = 3510000 },
- .n = { .min = 1, .max = 5 },
- .m = { .min = 79, .max = 127 },
- .m1 = { .min = 12, .max = 22 },
- .m2 = { .min = 5, .max = 9 },
- .p = { .min = 5, .max = 80 },
- .p1 = { .min = 1, .max = 8 },
- .p2 = { .dot_limit = 225000,
- .p2_slow = 10, .p2_fast = 5 },
-};
-
-static const struct intel_limit ilk_limits_single_lvds = {
- .dot = { .min = 25000, .max = 350000 },
- .vco = { .min = 1760000, .max = 3510000 },
- .n = { .min = 1, .max = 3 },
- .m = { .min = 79, .max = 118 },
- .m1 = { .min = 12, .max = 22 },
- .m2 = { .min = 5, .max = 9 },
- .p = { .min = 28, .max = 112 },
- .p1 = { .min = 2, .max = 8 },
- .p2 = { .dot_limit = 225000,
- .p2_slow = 14, .p2_fast = 14 },
-};
-
-static const struct intel_limit ilk_limits_dual_lvds = {
- .dot = { .min = 25000, .max = 350000 },
- .vco = { .min = 1760000, .max = 3510000 },
- .n = { .min = 1, .max = 3 },
- .m = { .min = 79, .max = 127 },
- .m1 = { .min = 12, .max = 22 },
- .m2 = { .min = 5, .max = 9 },
- .p = { .min = 14, .max = 56 },
- .p1 = { .min = 2, .max = 8 },
- .p2 = { .dot_limit = 225000,
- .p2_slow = 7, .p2_fast = 7 },
-};
-
-/* LVDS 100mhz refclk limits. */
-static const struct intel_limit ilk_limits_single_lvds_100m = {
- .dot = { .min = 25000, .max = 350000 },
- .vco = { .min = 1760000, .max = 3510000 },
- .n = { .min = 1, .max = 2 },
- .m = { .min = 79, .max = 126 },
- .m1 = { .min = 12, .max = 22 },
- .m2 = { .min = 5, .max = 9 },
- .p = { .min = 28, .max = 112 },
- .p1 = { .min = 2, .max = 8 },
- .p2 = { .dot_limit = 225000,
- .p2_slow = 14, .p2_fast = 14 },
-};
-
-static const struct intel_limit ilk_limits_dual_lvds_100m = {
- .dot = { .min = 25000, .max = 350000 },
- .vco = { .min = 1760000, .max = 3510000 },
- .n = { .min = 1, .max = 3 },
- .m = { .min = 79, .max = 126 },
- .m1 = { .min = 12, .max = 22 },
- .m2 = { .min = 5, .max = 9 },
- .p = { .min = 14, .max = 42 },
- .p1 = { .min = 2, .max = 6 },
- .p2 = { .dot_limit = 225000,
- .p2_slow = 7, .p2_fast = 7 },
-};
-
-static const struct intel_limit intel_limits_vlv = {
- /*
- * These are the data rate limits (measured in fast clocks)
- * since those are the strictest limits we have. The fast
- * clock and actual rate limits are more relaxed, so checking
- * them would make no difference.
- */
- .dot = { .min = 25000 * 5, .max = 270000 * 5 },
- .vco = { .min = 4000000, .max = 6000000 },
- .n = { .min = 1, .max = 7 },
- .m1 = { .min = 2, .max = 3 },
- .m2 = { .min = 11, .max = 156 },
- .p1 = { .min = 2, .max = 3 },
- .p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */
-};
-
-static const struct intel_limit intel_limits_chv = {
- /*
- * These are the data rate limits (measured in fast clocks)
- * since those are the strictest limits we have. The fast
- * clock and actual rate limits are more relaxed, so checking
- * them would make no difference.
- */
- .dot = { .min = 25000 * 5, .max = 540000 * 5},
- .vco = { .min = 4800000, .max = 6480000 },
- .n = { .min = 1, .max = 1 },
- .m1 = { .min = 2, .max = 2 },
- .m2 = { .min = 24 << 22, .max = 175 << 22 },
- .p1 = { .min = 2, .max = 4 },
- .p2 = { .p2_slow = 1, .p2_fast = 14 },
-};
-
-static const struct intel_limit intel_limits_bxt = {
- /* FIXME: find real dot limits */
- .dot = { .min = 0, .max = INT_MAX },
- .vco = { .min = 4800000, .max = 6700000 },
- .n = { .min = 1, .max = 1 },
- .m1 = { .min = 2, .max = 2 },
- /* FIXME: find real m2 limits */
- .m2 = { .min = 2 << 22, .max = 255 << 22 },
- .p1 = { .min = 2, .max = 4 },
- .p2 = { .p2_slow = 1, .p2_fast = 20 },
-};
-
/* WA Display #0827: Gen9:all */
static void
skl_wa_827(struct drm_i915_private *dev_priv, enum pipe pipe, bool enable)
@@ -542,12 +204,6 @@ icl_wa_scalerclkgating(struct drm_i915_private *dev_priv, enum pipe pipe,
}
static bool
-needs_modeset(const struct intel_crtc_state *state)
-{
- return drm_atomic_crtc_needs_modeset(&state->uapi);
-}
-
-static bool
is_trans_port_sync_slave(const struct intel_crtc_state *crtc_state)
{
return crtc_state->master_transcoder != INVALID_TRANSCODER;
@@ -566,482 +222,6 @@ is_trans_port_sync_mode(const struct intel_crtc_state *crtc_state)
is_trans_port_sync_slave(crtc_state);
}
-/*
- * Platform specific helpers to calculate the port PLL loopback- (clock.m),
- * and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast
- * (clock.dot) clock rates. This fast dot clock is fed to the port's IO logic.
- * The helpers' return value is the rate of the clock that is fed to the
- * display engine's pipe which can be the above fast dot clock rate or a
- * divided-down version of it.
- */
-/* m1 is reserved as 0 in Pineview, n is a ring counter */
-static int pnv_calc_dpll_params(int refclk, struct dpll *clock)
-{
- clock->m = clock->m2 + 2;
- clock->p = clock->p1 * clock->p2;
- if (WARN_ON(clock->n == 0 || clock->p == 0))
- return 0;
- clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
- clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
-
- return clock->dot;
-}
-
-static u32 i9xx_dpll_compute_m(struct dpll *dpll)
-{
- return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
-}
-
-static int i9xx_calc_dpll_params(int refclk, struct dpll *clock)
-{
- clock->m = i9xx_dpll_compute_m(clock);
- clock->p = clock->p1 * clock->p2;
- if (WARN_ON(clock->n + 2 == 0 || clock->p == 0))
- return 0;
- clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2);
- clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
-
- return clock->dot;
-}
-
-static int vlv_calc_dpll_params(int refclk, struct dpll *clock)
-{
- clock->m = clock->m1 * clock->m2;
- clock->p = clock->p1 * clock->p2;
- if (WARN_ON(clock->n == 0 || clock->p == 0))
- return 0;
- clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
- clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
-
- return clock->dot / 5;
-}
-
-int chv_calc_dpll_params(int refclk, struct dpll *clock)
-{
- clock->m = clock->m1 * clock->m2;
- clock->p = clock->p1 * clock->p2;
- if (WARN_ON(clock->n == 0 || clock->p == 0))
- return 0;
- clock->vco = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(refclk, clock->m),
- clock->n << 22);
- clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
-
- return clock->dot / 5;
-}
-
-/*
- * Returns whether the given set of divisors are valid for a given refclk with
- * the given connectors.
- */
-static bool intel_pll_is_valid(struct drm_i915_private *dev_priv,
- const struct intel_limit *limit,
- const struct dpll *clock)
-{
- if (clock->n < limit->n.min || limit->n.max < clock->n)
- return false;
- if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
- return false;
- if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2)
- return false;
- if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
- return false;
-
- if (!IS_PINEVIEW(dev_priv) && !IS_VALLEYVIEW(dev_priv) &&
- !IS_CHERRYVIEW(dev_priv) && !IS_GEN9_LP(dev_priv))
- if (clock->m1 <= clock->m2)
- return false;
-
- if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
- !IS_GEN9_LP(dev_priv)) {
- if (clock->p < limit->p.min || limit->p.max < clock->p)
- return false;
- if (clock->m < limit->m.min || limit->m.max < clock->m)
- return false;
- }
-
- if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
- return false;
- /* XXX: We may need to be checking "Dot clock" depending on the multiplier,
- * connector, etc., rather than just a single range.
- */
- if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
- return false;
-
- return true;
-}
-
-static int
-i9xx_select_p2_div(const struct intel_limit *limit,
- const struct intel_crtc_state *crtc_state,
- int target)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
-
- if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
- /*
- * For LVDS just rely on its current settings for dual-channel.
- * We haven't figured out how to reliably set up different
- * single/dual channel state, if we even can.
- */
- if (intel_is_dual_link_lvds(dev_priv))
- return limit->p2.p2_fast;
- else
- return limit->p2.p2_slow;
- } else {
- if (target < limit->p2.dot_limit)
- return limit->p2.p2_slow;
- else
- return limit->p2.p2_fast;
- }
-}
-
-/*
- * Returns a set of divisors for the desired target clock with the given
- * refclk, or FALSE. The returned values represent the clock equation:
- * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
- *
- * Target and reference clocks are specified in kHz.
- *
- * If match_clock is provided, then best_clock P divider must match the P
- * divider from @match_clock used for LVDS downclocking.
- */
-static bool
-i9xx_find_best_dpll(const struct intel_limit *limit,
- struct intel_crtc_state *crtc_state,
- int target, int refclk, struct dpll *match_clock,
- struct dpll *best_clock)
-{
- struct drm_device *dev = crtc_state->uapi.crtc->dev;
- struct dpll clock;
- int err = target;
-
- memset(best_clock, 0, sizeof(*best_clock));
-
- clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
-
- for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
- clock.m1++) {
- for (clock.m2 = limit->m2.min;
- clock.m2 <= limit->m2.max; clock.m2++) {
- if (clock.m2 >= clock.m1)
- break;
- for (clock.n = limit->n.min;
- clock.n <= limit->n.max; clock.n++) {
- for (clock.p1 = limit->p1.min;
- clock.p1 <= limit->p1.max; clock.p1++) {
- int this_err;
-
- i9xx_calc_dpll_params(refclk, &clock);
- if (!intel_pll_is_valid(to_i915(dev),
- limit,
- &clock))
- continue;
- if (match_clock &&
- clock.p != match_clock->p)
- continue;
-
- this_err = abs(clock.dot - target);
- if (this_err < err) {
- *best_clock = clock;
- err = this_err;
- }
- }
- }
- }
- }
-
- return (err != target);
-}
-
-/*
- * Returns a set of divisors for the desired target clock with the given
- * refclk, or FALSE. The returned values represent the clock equation:
- * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
- *
- * Target and reference clocks are specified in kHz.
- *
- * If match_clock is provided, then best_clock P divider must match the P
- * divider from @match_clock used for LVDS downclocking.
- */
-static bool
-pnv_find_best_dpll(const struct intel_limit *limit,
- struct intel_crtc_state *crtc_state,
- int target, int refclk, struct dpll *match_clock,
- struct dpll *best_clock)
-{
- struct drm_device *dev = crtc_state->uapi.crtc->dev;
- struct dpll clock;
- int err = target;
-
- memset(best_clock, 0, sizeof(*best_clock));
-
- clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
-
- for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
- clock.m1++) {
- for (clock.m2 = limit->m2.min;
- clock.m2 <= limit->m2.max; clock.m2++) {
- for (clock.n = limit->n.min;
- clock.n <= limit->n.max; clock.n++) {
- for (clock.p1 = limit->p1.min;
- clock.p1 <= limit->p1.max; clock.p1++) {
- int this_err;
-
- pnv_calc_dpll_params(refclk, &clock);
- if (!intel_pll_is_valid(to_i915(dev),
- limit,
- &clock))
- continue;
- if (match_clock &&
- clock.p != match_clock->p)
- continue;
-
- this_err = abs(clock.dot - target);
- if (this_err < err) {
- *best_clock = clock;
- err = this_err;
- }
- }
- }
- }
- }
-
- return (err != target);
-}
-
-/*
- * Returns a set of divisors for the desired target clock with the given
- * refclk, or FALSE. The returned values represent the clock equation:
- * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
- *
- * Target and reference clocks are specified in kHz.
- *
- * If match_clock is provided, then best_clock P divider must match the P
- * divider from @match_clock used for LVDS downclocking.
- */
-static bool
-g4x_find_best_dpll(const struct intel_limit *limit,
- struct intel_crtc_state *crtc_state,
- int target, int refclk, struct dpll *match_clock,
- struct dpll *best_clock)
-{
- struct drm_device *dev = crtc_state->uapi.crtc->dev;
- struct dpll clock;
- int max_n;
- bool found = false;
- /* approximately equals target * 0.00585 */
- int err_most = (target >> 8) + (target >> 9);
-
- memset(best_clock, 0, sizeof(*best_clock));
-
- clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
-
- max_n = limit->n.max;
- /* based on hardware requirement, prefer smaller n to precision */
- for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
- /* based on hardware requirement, prefere larger m1,m2 */
- for (clock.m1 = limit->m1.max;
- clock.m1 >= limit->m1.min; clock.m1--) {
- for (clock.m2 = limit->m2.max;
- clock.m2 >= limit->m2.min; clock.m2--) {
- for (clock.p1 = limit->p1.max;
- clock.p1 >= limit->p1.min; clock.p1--) {
- int this_err;
-
- i9xx_calc_dpll_params(refclk, &clock);
- if (!intel_pll_is_valid(to_i915(dev),
- limit,
- &clock))
- continue;
-
- this_err = abs(clock.dot - target);
- if (this_err < err_most) {
- *best_clock = clock;
- err_most = this_err;
- max_n = clock.n;
- found = true;
- }
- }
- }
- }
- }
- return found;
-}
-
-/*
- * Check if the calculated PLL configuration is more optimal compared to the
- * best configuration and error found so far. Return the calculated error.
- */
-static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq,
- const struct dpll *calculated_clock,
- const struct dpll *best_clock,
- unsigned int best_error_ppm,
- unsigned int *error_ppm)
-{
- /*
- * For CHV ignore the error and consider only the P value.
- * Prefer a bigger P value based on HW requirements.
- */
- if (IS_CHERRYVIEW(to_i915(dev))) {
- *error_ppm = 0;
-
- return calculated_clock->p > best_clock->p;
- }
-
- if (drm_WARN_ON_ONCE(dev, !target_freq))
- return false;
-
- *error_ppm = div_u64(1000000ULL *
- abs(target_freq - calculated_clock->dot),
- target_freq);
- /*
- * Prefer a better P value over a better (smaller) error if the error
- * is small. Ensure this preference for future configurations too by
- * setting the error to 0.
- */
- if (*error_ppm < 100 && calculated_clock->p > best_clock->p) {
- *error_ppm = 0;
-
- return true;
- }
-
- return *error_ppm + 10 < best_error_ppm;
-}
-
-/*
- * Returns a set of divisors for the desired target clock with the given
- * refclk, or FALSE. The returned values represent the clock equation:
- * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
- */
-static bool
-vlv_find_best_dpll(const struct intel_limit *limit,
- struct intel_crtc_state *crtc_state,
- int target, int refclk, struct dpll *match_clock,
- struct dpll *best_clock)
-{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_device *dev = crtc->base.dev;
- struct dpll clock;
- unsigned int bestppm = 1000000;
- /* min update 19.2 MHz */
- int max_n = min(limit->n.max, refclk / 19200);
- bool found = false;
-
- target *= 5; /* fast clock */
-
- memset(best_clock, 0, sizeof(*best_clock));
-
- /* based on hardware requirement, prefer smaller n to precision */
- for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
- for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
- for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow;
- clock.p2 -= clock.p2 > 10 ? 2 : 1) {
- clock.p = clock.p1 * clock.p2;
- /* based on hardware requirement, prefer bigger m1,m2 values */
- for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
- unsigned int ppm;
-
- clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n,
- refclk * clock.m1);
-
- vlv_calc_dpll_params(refclk, &clock);
-
- if (!intel_pll_is_valid(to_i915(dev),
- limit,
- &clock))
- continue;
-
- if (!vlv_PLL_is_optimal(dev, target,
- &clock,
- best_clock,
- bestppm, &ppm))
- continue;
-
- *best_clock = clock;
- bestppm = ppm;
- found = true;
- }
- }
- }
- }
-
- return found;
-}
-
-/*
- * Returns a set of divisors for the desired target clock with the given
- * refclk, or FALSE. The returned values represent the clock equation:
- * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
- */
-static bool
-chv_find_best_dpll(const struct intel_limit *limit,
- struct intel_crtc_state *crtc_state,
- int target, int refclk, struct dpll *match_clock,
- struct dpll *best_clock)
-{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_device *dev = crtc->base.dev;
- unsigned int best_error_ppm;
- struct dpll clock;
- u64 m2;
- int found = false;
-
- memset(best_clock, 0, sizeof(*best_clock));
- best_error_ppm = 1000000;
-
- /*
- * Based on hardware doc, the n always set to 1, and m1 always
- * set to 2. If requires to support 200Mhz refclk, we need to
- * revisit this because n may not 1 anymore.
- */
- clock.n = 1, clock.m1 = 2;
- target *= 5; /* fast clock */
-
- for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
- for (clock.p2 = limit->p2.p2_fast;
- clock.p2 >= limit->p2.p2_slow;
- clock.p2 -= clock.p2 > 10 ? 2 : 1) {
- unsigned int error_ppm;
-
- clock.p = clock.p1 * clock.p2;
-
- m2 = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(target, clock.p * clock.n) << 22,
- refclk * clock.m1);
-
- if (m2 > INT_MAX/clock.m1)
- continue;
-
- clock.m2 = m2;
-
- chv_calc_dpll_params(refclk, &clock);
-
- if (!intel_pll_is_valid(to_i915(dev), limit, &clock))
- continue;
-
- if (!vlv_PLL_is_optimal(dev, target, &clock, best_clock,
- best_error_ppm, &error_ppm))
- continue;
-
- *best_clock = clock;
- best_error_ppm = error_ppm;
- found = true;
- }
- }
-
- return found;
-}
-
-bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state,
- struct dpll *best_clock)
-{
- int refclk = 100000;
- const struct intel_limit *limit = &intel_limits_bxt;
-
- return chv_find_best_dpll(limit, crtc_state,
- crtc_state->port_clock, refclk,
- NULL, best_clock);
-}
-
static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
@@ -1315,12 +495,6 @@ static void assert_planes_disabled(struct intel_crtc *crtc)
assert_plane_disabled(plane);
}
-static void assert_vblank_disabled(struct drm_crtc *crtc)
-{
- if (I915_STATE_WARN_ON(drm_crtc_vblank_get(crtc) == 0))
- drm_crtc_vblank_put(crtc);
-}
-
void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
@@ -1672,7 +846,7 @@ static void ilk_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
/* Configure frame start delay to match the CPU */
val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
- val |= TRANS_CHICKEN2_FRAME_START_DELAY(0);
+ val |= TRANS_CHICKEN2_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
intel_de_write(dev_priv, reg, val);
}
@@ -1683,7 +857,7 @@ static void ilk_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
if (HAS_PCH_IBX(dev_priv)) {
/* Configure frame start delay to match the CPU */
val &= ~TRANS_FRAME_START_DELAY_MASK;
- val |= TRANS_FRAME_START_DELAY(0);
+ val |= TRANS_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
/*
* Make the BPC in transcoder be consistent with
@@ -1728,7 +902,7 @@ static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
/* Configure frame start delay to match the CPU */
val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
- val |= TRANS_CHICKEN2_FRAME_START_DELAY(0);
+ val |= TRANS_CHICKEN2_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
intel_de_write(dev_priv, TRANS_CHICKEN2(PIPE_A), val);
val = TRANS_ENABLE;
@@ -1805,55 +979,6 @@ enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc)
return crtc->pipe;
}
-static u32 intel_crtc_max_vblank_count(const struct intel_crtc_state *crtc_state)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- u32 mode_flags = crtc->mode_flags;
-
- /*
- * From Gen 11, In case of dsi cmd mode, frame counter wouldnt
- * have updated at the beginning of TE, if we want to use
- * the hw counter, then we would find it updated in only
- * the next TE, hence switching to sw counter.
- */
- if (mode_flags & (I915_MODE_FLAG_DSI_USE_TE0 | I915_MODE_FLAG_DSI_USE_TE1))
- return 0;
-
- /*
- * On i965gm the hardware frame counter reads
- * zero when the TV encoder is enabled :(
- */
- if (IS_I965GM(dev_priv) &&
- (crtc_state->output_types & BIT(INTEL_OUTPUT_TVOUT)))
- return 0;
-
- if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
- return 0xffffffff; /* full 32 bit counter */
- else if (INTEL_GEN(dev_priv) >= 3)
- return 0xffffff; /* only 24 bits of frame count */
- else
- return 0; /* Gen2 doesn't have a hardware frame counter */
-}
-
-void intel_crtc_vblank_on(const struct intel_crtc_state *crtc_state)
-{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
-
- assert_vblank_disabled(&crtc->base);
- drm_crtc_set_max_vblank_count(&crtc->base,
- intel_crtc_max_vblank_count(crtc_state));
- drm_crtc_vblank_on(&crtc->base);
-}
-
-void intel_crtc_vblank_off(const struct intel_crtc_state *crtc_state)
-{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
-
- drm_crtc_vblank_off(&crtc->base);
- assert_vblank_disabled(&crtc->base);
-}
-
void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
@@ -1968,8 +1093,8 @@ static bool is_ccs_plane(const struct drm_framebuffer *fb, int plane)
static bool is_gen12_ccs_modifier(u64 modifier)
{
return modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS ||
+ modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC ||
modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS;
-
}
static bool is_gen12_ccs_plane(const struct drm_framebuffer *fb, int plane)
@@ -1977,6 +1102,12 @@ static bool is_gen12_ccs_plane(const struct drm_framebuffer *fb, int plane)
return is_gen12_ccs_modifier(fb->modifier) && is_ccs_plane(fb, plane);
}
+static bool is_gen12_ccs_cc_plane(const struct drm_framebuffer *fb, int plane)
+{
+ return fb->modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC &&
+ plane == 2;
+}
+
static bool is_aux_plane(const struct drm_framebuffer *fb, int plane)
{
if (is_ccs_modifier(fb->modifier))
@@ -1998,6 +1129,9 @@ static int ccs_to_main_plane(const struct drm_framebuffer *fb, int ccs_plane)
drm_WARN_ON(fb->dev, !is_ccs_modifier(fb->modifier) ||
ccs_plane < fb->format->num_planes / 2);
+ if (is_gen12_ccs_cc_plane(fb, ccs_plane))
+ return 0;
+
return ccs_plane - fb->format->num_planes / 2;
}
@@ -2016,7 +1150,7 @@ int intel_main_to_aux_plane(const struct drm_framebuffer *fb, int main_plane)
bool
intel_format_info_is_yuv_semiplanar(const struct drm_format_info *info,
- uint64_t modifier)
+ u64 modifier)
{
return info->is_yuv &&
info->num_planes == (is_ccs_modifier(modifier) ? 4 : 2);
@@ -2048,6 +1182,7 @@ intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane)
return 128;
fallthrough;
case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
+ case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
if (is_ccs_plane(fb, color_plane))
return 64;
@@ -2182,6 +1317,11 @@ static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_pr
return 0;
}
+static bool has_async_flips(struct drm_i915_private *i915)
+{
+ return INTEL_GEN(i915) >= 5;
+}
+
static unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
int color_plane)
{
@@ -2196,7 +1336,7 @@ static unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
case DRM_FORMAT_MOD_LINEAR:
return intel_linear_alignment(dev_priv);
case I915_FORMAT_MOD_X_TILED:
- if (INTEL_GEN(dev_priv) >= 9)
+ if (has_async_flips(dev_priv))
return 256 * 1024;
return 0;
case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
@@ -2204,6 +1344,7 @@ static unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
return intel_tile_row_size(fb, color_plane);
fallthrough;
case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
+ case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
return 16 * 1024;
case I915_FORMAT_MOD_Y_TILED_CCS:
case I915_FORMAT_MOD_Yf_TILED_CCS:
@@ -2309,7 +1450,7 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
*/
ret = i915_vma_pin_fence(vma);
if (ret != 0 && INTEL_GEN(dev_priv) < 4) {
- i915_gem_object_unpin_from_display_plane(vma);
+ i915_vma_unpin(vma);
vma = ERR_PTR(ret);
goto err;
}
@@ -2327,12 +1468,9 @@ err:
void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags)
{
- i915_gem_object_lock(vma->obj, NULL);
if (flags & PLANE_HAS_FENCE)
i915_vma_unpin_fence(vma);
- i915_gem_object_unpin_from_display_plane(vma);
- i915_gem_object_unlock(vma->obj);
-
+ i915_vma_unpin(vma);
i915_vma_put(vma);
}
@@ -2532,9 +1670,9 @@ static u32 intel_compute_aligned_offset(struct drm_i915_private *dev_priv,
return offset_aligned;
}
-static u32 intel_plane_compute_aligned_offset(int *x, int *y,
- const struct intel_plane_state *state,
- int color_plane)
+u32 intel_plane_compute_aligned_offset(int *x, int *y,
+ const struct intel_plane_state *state,
+ int color_plane)
{
struct intel_plane *intel_plane = to_intel_plane(state->uapi.plane);
struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev);
@@ -2608,6 +1746,7 @@ static unsigned int intel_fb_modifier_to_tiling(u64 fb_modifier)
case I915_FORMAT_MOD_Y_TILED:
case I915_FORMAT_MOD_Y_TILED_CCS:
case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
+ case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
return I915_TILING_Y;
default:
@@ -2686,6 +1825,25 @@ static const struct drm_format_info gen12_ccs_formats[] = {
.hsub = 2, .vsub = 2, .is_yuv = true },
};
+/*
+ * Same as gen12_ccs_formats[] above, but with additional surface used
+ * to pass Clear Color information in plane 2 with 64 bits of data.
+ */
+static const struct drm_format_info gen12_ccs_cc_formats[] = {
+ { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 3,
+ .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 },
+ .hsub = 1, .vsub = 1, },
+ { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 3,
+ .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 },
+ .hsub = 1, .vsub = 1, },
+ { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 3,
+ .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 },
+ .hsub = 1, .vsub = 1, .has_alpha = true },
+ { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 3,
+ .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 },
+ .hsub = 1, .vsub = 1, .has_alpha = true },
+};
+
static const struct drm_format_info *
lookup_format_info(const struct drm_format_info formats[],
int num_formats, u32 format)
@@ -2714,6 +1872,10 @@ intel_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
return lookup_format_info(gen12_ccs_formats,
ARRAY_SIZE(gen12_ccs_formats),
cmd->pixel_format);
+ case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
+ return lookup_format_info(gen12_ccs_cc_formats,
+ ARRAY_SIZE(gen12_ccs_cc_formats),
+ cmd->pixel_format);
default:
return NULL;
}
@@ -2722,6 +1884,7 @@ intel_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
bool is_ccs_modifier(u64 modifier)
{
return modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS ||
+ modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC ||
modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS ||
modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
@@ -2940,7 +2103,7 @@ intel_fb_check_ccs_xy(struct drm_framebuffer *fb, int ccs_plane, int x, int y)
int ccs_x, ccs_y;
int main_x, main_y;
- if (!is_ccs_plane(fb, ccs_plane))
+ if (!is_ccs_plane(fb, ccs_plane) || is_gen12_ccs_cc_plane(fb, ccs_plane))
return 0;
intel_tile_dims(fb, ccs_plane, &tile_width, &tile_height);
@@ -3067,6 +2230,18 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv,
int x, y;
int ret;
+ /*
+ * Plane 2 of Render Compression with Clear Color fb modifier
+ * is consumed by the driver and not passed to DE. Skip the
+ * arithmetic related to alignment and offset calculation.
+ */
+ if (is_gen12_ccs_cc_plane(fb, i)) {
+ if (IS_ALIGNED(fb->offsets[i], PAGE_SIZE))
+ continue;
+ else
+ return -EINVAL;
+ }
+
cpp = fb->format->cpp[i];
intel_fb_plane_dims(&width, &height, fb, i);
@@ -3271,7 +2446,7 @@ intel_plane_remap_gtt(struct intel_plane_state *plane_state)
}
}
-static int
+int
intel_plane_compute_gtt(struct intel_plane_state *plane_state)
{
const struct intel_framebuffer *fb =
@@ -3551,7 +2726,7 @@ intel_set_plane_visible(struct intel_crtc_state *crtc_state,
crtc_state->uapi.plane_mask &= ~drm_plane_mask(&plane->base);
}
-static void fixup_active_planes(struct intel_crtc_state *crtc_state)
+static void fixup_plane_bitmasks(struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
struct drm_plane *plane;
@@ -3561,11 +2736,14 @@ static void fixup_active_planes(struct intel_crtc_state *crtc_state)
* have been used on the same (or wrong) pipe. plane_mask uses
* unique ids, hence we can use that to reconstruct active_planes.
*/
+ crtc_state->enabled_planes = 0;
crtc_state->active_planes = 0;
drm_for_each_plane_mask(plane, &dev_priv->drm,
- crtc_state->uapi.plane_mask)
+ crtc_state->uapi.plane_mask) {
+ crtc_state->enabled_planes |= BIT(to_intel_plane(plane)->id);
crtc_state->active_planes |= BIT(to_intel_plane(plane)->id);
+ }
}
static void intel_plane_disable_noatomic(struct intel_crtc *crtc,
@@ -3583,7 +2761,7 @@ static void intel_plane_disable_noatomic(struct intel_crtc *crtc,
crtc->base.base.id, crtc->base.name);
intel_set_plane_visible(crtc_state, plane_state, false);
- fixup_active_planes(crtc_state);
+ fixup_plane_bitmasks(crtc_state);
crtc_state->data_rate[plane->id] = 0;
crtc_state->min_cdclk[plane->id] = 0;
@@ -3613,12 +2791,6 @@ static void intel_plane_disable_noatomic(struct intel_crtc *crtc,
intel_disable_plane(plane, crtc_state);
}
-static struct intel_frontbuffer *
-to_intel_frontbuffer(struct drm_framebuffer *fb)
-{
- return fb ? to_intel_framebuffer(fb)->frontbuffer : NULL;
-}
-
static void
intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
struct intel_initial_plane_config *plane_config)
@@ -3817,33 +2989,19 @@ static int intel_plane_max_height(struct intel_plane *plane,
return INT_MAX;
}
-static int skl_check_main_surface(struct intel_plane_state *plane_state)
+int skl_calc_main_surface_offset(const struct intel_plane_state *plane_state,
+ int *x, int *y, u32 *offset)
{
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
const struct drm_framebuffer *fb = plane_state->hw.fb;
- unsigned int rotation = plane_state->hw.rotation;
- int x = plane_state->uapi.src.x1 >> 16;
- int y = plane_state->uapi.src.y1 >> 16;
- int w = drm_rect_width(&plane_state->uapi.src) >> 16;
- int h = drm_rect_height(&plane_state->uapi.src) >> 16;
- int min_width = intel_plane_min_width(plane, fb, 0, rotation);
- int max_width = intel_plane_max_width(plane, fb, 0, rotation);
- int max_height = intel_plane_max_height(plane, fb, 0, rotation);
- int aux_plane = intel_main_to_aux_plane(fb, 0);
- u32 aux_offset = plane_state->color_plane[aux_plane].offset;
- u32 alignment, offset;
+ const int aux_plane = intel_main_to_aux_plane(fb, 0);
+ const u32 aux_offset = plane_state->color_plane[aux_plane].offset;
+ const u32 alignment = intel_surf_alignment(fb, 0);
+ const int w = drm_rect_width(&plane_state->uapi.src) >> 16;
- if (w > max_width || w < min_width || h > max_height) {
- drm_dbg_kms(&dev_priv->drm,
- "requested Y/RGB source size %dx%d outside limits (min: %dx1 max: %dx%d)\n",
- w, h, min_width, max_width, max_height);
- return -EINVAL;
- }
-
- intel_add_fb_offsets(&x, &y, plane_state, 0);
- offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 0);
- alignment = intel_surf_alignment(fb, 0);
+ intel_add_fb_offsets(x, y, plane_state, 0);
+ *offset = intel_plane_compute_aligned_offset(x, y, plane_state, 0);
if (drm_WARN_ON(&dev_priv->drm, alignment && !is_power_of_2(alignment)))
return -EINVAL;
@@ -3852,9 +3010,10 @@ static int skl_check_main_surface(struct intel_plane_state *plane_state)
* main surface offset, and it must be non-negative. Make
* sure that is what we will get.
*/
- if (aux_plane && offset > aux_offset)
- offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
- offset, aux_offset & ~(alignment - 1));
+ if (aux_plane && *offset > aux_offset)
+ *offset = intel_plane_adjust_aligned_offset(x, y, plane_state, 0,
+ *offset,
+ aux_offset & ~(alignment - 1));
/*
* When using an X-tiled surface, the plane blows up
@@ -3865,18 +3024,51 @@ static int skl_check_main_surface(struct intel_plane_state *plane_state)
if (fb->modifier == I915_FORMAT_MOD_X_TILED) {
int cpp = fb->format->cpp[0];
- while ((x + w) * cpp > plane_state->color_plane[0].stride) {
- if (offset == 0) {
+ while ((*x + w) * cpp > plane_state->color_plane[0].stride) {
+ if (*offset == 0) {
drm_dbg_kms(&dev_priv->drm,
"Unable to find suitable display surface offset due to X-tiling\n");
return -EINVAL;
}
- offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
- offset, offset - alignment);
+ *offset = intel_plane_adjust_aligned_offset(x, y, plane_state, 0,
+ *offset,
+ *offset - alignment);
}
}
+ return 0;
+}
+
+static int skl_check_main_surface(struct intel_plane_state *plane_state)
+{
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ const unsigned int rotation = plane_state->hw.rotation;
+ int x = plane_state->uapi.src.x1 >> 16;
+ int y = plane_state->uapi.src.y1 >> 16;
+ const int w = drm_rect_width(&plane_state->uapi.src) >> 16;
+ const int h = drm_rect_height(&plane_state->uapi.src) >> 16;
+ const int min_width = intel_plane_min_width(plane, fb, 0, rotation);
+ const int max_width = intel_plane_max_width(plane, fb, 0, rotation);
+ const int max_height = intel_plane_max_height(plane, fb, 0, rotation);
+ const int aux_plane = intel_main_to_aux_plane(fb, 0);
+ const u32 alignment = intel_surf_alignment(fb, 0);
+ u32 offset;
+ int ret;
+
+ if (w > max_width || w < min_width || h > max_height) {
+ drm_dbg_kms(&dev_priv->drm,
+ "requested Y/RGB source size %dx%d outside limits (min: %dx1 max: %dx%d)\n",
+ w, h, min_width, max_width, max_height);
+ return -EINVAL;
+ }
+
+ ret = skl_calc_main_surface_offset(plane_state, &x, &y, &offset);
+ if (ret)
+ return ret;
+
/*
* CCS AUX surface doesn't have its own x/y offsets, we must make sure
* they match with the main surface x/y offsets.
@@ -3899,6 +3091,8 @@ static int skl_check_main_surface(struct intel_plane_state *plane_state)
}
}
+ drm_WARN_ON(&dev_priv->drm, x > 8191 || y > 8191);
+
plane_state->color_plane[0].offset = offset;
plane_state->color_plane[0].x = x;
plane_state->color_plane[0].y = y;
@@ -3971,6 +3165,8 @@ static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state)
}
}
+ drm_WARN_ON(&i915->drm, x > 8191 || y > 8191);
+
plane_state->color_plane[uv_plane].offset = offset;
plane_state->color_plane[uv_plane].x = x;
plane_state->color_plane[uv_plane].y = y;
@@ -3991,7 +3187,8 @@ static int skl_check_ccs_aux_surface(struct intel_plane_state *plane_state)
int hsub, vsub;
int x, y;
- if (!is_ccs_plane(fb, ccs_plane))
+ if (!is_ccs_plane(fb, ccs_plane) ||
+ is_gen12_ccs_cc_plane(fb, ccs_plane))
continue;
intel_fb_plane_get_subsampling(&main_hsub, &main_vsub, fb,
@@ -4063,422 +3260,6 @@ int skl_check_plane_surface(struct intel_plane_state *plane_state)
return 0;
}
-static void i9xx_plane_ratio(const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state,
- unsigned int *num, unsigned int *den)
-{
- const struct drm_framebuffer *fb = plane_state->hw.fb;
- unsigned int cpp = fb->format->cpp[0];
-
- /*
- * g4x bspec says 64bpp pixel rate can't exceed 80%
- * of cdclk when the sprite plane is enabled on the
- * same pipe. ilk/snb bspec says 64bpp pixel rate is
- * never allowed to exceed 80% of cdclk. Let's just go
- * with the ilk/snb limit always.
- */
- if (cpp == 8) {
- *num = 10;
- *den = 8;
- } else {
- *num = 1;
- *den = 1;
- }
-}
-
-static int i9xx_plane_min_cdclk(const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
-{
- unsigned int pixel_rate;
- unsigned int num, den;
-
- /*
- * Note that crtc_state->pixel_rate accounts for both
- * horizontal and vertical panel fitter downscaling factors.
- * Pre-HSW bspec tells us to only consider the horizontal
- * downscaling factor here. We ignore that and just consider
- * both for simplicity.
- */
- pixel_rate = crtc_state->pixel_rate;
-
- i9xx_plane_ratio(crtc_state, plane_state, &num, &den);
-
- /* two pixels per clock with double wide pipe */
- if (crtc_state->double_wide)
- den *= 2;
-
- return DIV_ROUND_UP(pixel_rate * num, den);
-}
-
-unsigned int
-i9xx_plane_max_stride(struct intel_plane *plane,
- u32 pixel_format, u64 modifier,
- unsigned int rotation)
-{
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
-
- if (!HAS_GMCH(dev_priv)) {
- return 32*1024;
- } else if (INTEL_GEN(dev_priv) >= 4) {
- if (modifier == I915_FORMAT_MOD_X_TILED)
- return 16*1024;
- else
- return 32*1024;
- } else if (INTEL_GEN(dev_priv) >= 3) {
- if (modifier == I915_FORMAT_MOD_X_TILED)
- return 8*1024;
- else
- return 16*1024;
- } else {
- if (plane->i9xx_plane == PLANE_C)
- return 4*1024;
- else
- return 8*1024;
- }
-}
-
-static u32 i9xx_plane_ctl_crtc(const struct intel_crtc_state *crtc_state)
-{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- u32 dspcntr = 0;
-
- if (crtc_state->gamma_enable)
- dspcntr |= DISPPLANE_GAMMA_ENABLE;
-
- if (crtc_state->csc_enable)
- dspcntr |= DISPPLANE_PIPE_CSC_ENABLE;
-
- if (INTEL_GEN(dev_priv) < 5)
- dspcntr |= DISPPLANE_SEL_PIPE(crtc->pipe);
-
- return dspcntr;
-}
-
-static u32 i9xx_plane_ctl(const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
-{
- struct drm_i915_private *dev_priv =
- to_i915(plane_state->uapi.plane->dev);
- const struct drm_framebuffer *fb = plane_state->hw.fb;
- unsigned int rotation = plane_state->hw.rotation;
- u32 dspcntr;
-
- dspcntr = DISPLAY_PLANE_ENABLE;
-
- if (IS_G4X(dev_priv) || IS_GEN(dev_priv, 5) ||
- IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
- dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
-
- switch (fb->format->format) {
- case DRM_FORMAT_C8:
- dspcntr |= DISPPLANE_8BPP;
- break;
- case DRM_FORMAT_XRGB1555:
- dspcntr |= DISPPLANE_BGRX555;
- break;
- case DRM_FORMAT_ARGB1555:
- dspcntr |= DISPPLANE_BGRA555;
- break;
- case DRM_FORMAT_RGB565:
- dspcntr |= DISPPLANE_BGRX565;
- break;
- case DRM_FORMAT_XRGB8888:
- dspcntr |= DISPPLANE_BGRX888;
- break;
- case DRM_FORMAT_XBGR8888:
- dspcntr |= DISPPLANE_RGBX888;
- break;
- case DRM_FORMAT_ARGB8888:
- dspcntr |= DISPPLANE_BGRA888;
- break;
- case DRM_FORMAT_ABGR8888:
- dspcntr |= DISPPLANE_RGBA888;
- break;
- case DRM_FORMAT_XRGB2101010:
- dspcntr |= DISPPLANE_BGRX101010;
- break;
- case DRM_FORMAT_XBGR2101010:
- dspcntr |= DISPPLANE_RGBX101010;
- break;
- case DRM_FORMAT_ARGB2101010:
- dspcntr |= DISPPLANE_BGRA101010;
- break;
- case DRM_FORMAT_ABGR2101010:
- dspcntr |= DISPPLANE_RGBA101010;
- break;
- case DRM_FORMAT_XBGR16161616F:
- dspcntr |= DISPPLANE_RGBX161616;
- break;
- default:
- MISSING_CASE(fb->format->format);
- return 0;
- }
-
- if (INTEL_GEN(dev_priv) >= 4 &&
- fb->modifier == I915_FORMAT_MOD_X_TILED)
- dspcntr |= DISPPLANE_TILED;
-
- if (rotation & DRM_MODE_ROTATE_180)
- dspcntr |= DISPPLANE_ROTATE_180;
-
- if (rotation & DRM_MODE_REFLECT_X)
- dspcntr |= DISPPLANE_MIRROR;
-
- return dspcntr;
-}
-
-int i9xx_check_plane_surface(struct intel_plane_state *plane_state)
-{
- struct drm_i915_private *dev_priv =
- to_i915(plane_state->uapi.plane->dev);
- const struct drm_framebuffer *fb = plane_state->hw.fb;
- int src_x, src_y, src_w;
- u32 offset;
- int ret;
-
- ret = intel_plane_compute_gtt(plane_state);
- if (ret)
- return ret;
-
- if (!plane_state->uapi.visible)
- return 0;
-
- src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
- src_x = plane_state->uapi.src.x1 >> 16;
- src_y = plane_state->uapi.src.y1 >> 16;
-
- /* Undocumented hardware limit on i965/g4x/vlv/chv */
- if (HAS_GMCH(dev_priv) && fb->format->cpp[0] == 8 && src_w > 2048)
- return -EINVAL;
-
- intel_add_fb_offsets(&src_x, &src_y, plane_state, 0);
-
- if (INTEL_GEN(dev_priv) >= 4)
- offset = intel_plane_compute_aligned_offset(&src_x, &src_y,
- plane_state, 0);
- else
- offset = 0;
-
- /*
- * Put the final coordinates back so that the src
- * coordinate checks will see the right values.
- */
- drm_rect_translate_to(&plane_state->uapi.src,
- src_x << 16, src_y << 16);
-
- /* HSW/BDW do this automagically in hardware */
- if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv)) {
- unsigned int rotation = plane_state->hw.rotation;
- int src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
- int src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
-
- if (rotation & DRM_MODE_ROTATE_180) {
- src_x += src_w - 1;
- src_y += src_h - 1;
- } else if (rotation & DRM_MODE_REFLECT_X) {
- src_x += src_w - 1;
- }
- }
-
- plane_state->color_plane[0].offset = offset;
- plane_state->color_plane[0].x = src_x;
- plane_state->color_plane[0].y = src_y;
-
- return 0;
-}
-
-static bool i9xx_plane_has_windowing(struct intel_plane *plane)
-{
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
-
- if (IS_CHERRYVIEW(dev_priv))
- return i9xx_plane == PLANE_B;
- else if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
- return false;
- else if (IS_GEN(dev_priv, 4))
- return i9xx_plane == PLANE_C;
- else
- return i9xx_plane == PLANE_B ||
- i9xx_plane == PLANE_C;
-}
-
-static int
-i9xx_plane_check(struct intel_crtc_state *crtc_state,
- struct intel_plane_state *plane_state)
-{
- struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
- int ret;
-
- ret = chv_plane_check_rotation(plane_state);
- if (ret)
- return ret;
-
- ret = intel_atomic_plane_check_clipping(plane_state, crtc_state,
- DRM_PLANE_HELPER_NO_SCALING,
- DRM_PLANE_HELPER_NO_SCALING,
- i9xx_plane_has_windowing(plane));
- if (ret)
- return ret;
-
- ret = i9xx_check_plane_surface(plane_state);
- if (ret)
- return ret;
-
- if (!plane_state->uapi.visible)
- return 0;
-
- ret = intel_plane_check_src_coordinates(plane_state);
- if (ret)
- return ret;
-
- plane_state->ctl = i9xx_plane_ctl(crtc_state, plane_state);
-
- return 0;
-}
-
-static void i9xx_update_plane(struct intel_plane *plane,
- const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
-{
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
- u32 linear_offset;
- int x = plane_state->color_plane[0].x;
- int y = plane_state->color_plane[0].y;
- int crtc_x = plane_state->uapi.dst.x1;
- int crtc_y = plane_state->uapi.dst.y1;
- int crtc_w = drm_rect_width(&plane_state->uapi.dst);
- int crtc_h = drm_rect_height(&plane_state->uapi.dst);
- unsigned long irqflags;
- u32 dspaddr_offset;
- u32 dspcntr;
-
- dspcntr = plane_state->ctl | i9xx_plane_ctl_crtc(crtc_state);
-
- linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
-
- if (INTEL_GEN(dev_priv) >= 4)
- dspaddr_offset = plane_state->color_plane[0].offset;
- else
- dspaddr_offset = linear_offset;
-
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
-
- intel_de_write_fw(dev_priv, DSPSTRIDE(i9xx_plane),
- plane_state->color_plane[0].stride);
-
- if (INTEL_GEN(dev_priv) < 4) {
- /*
- * PLANE_A doesn't actually have a full window
- * generator but let's assume we still need to
- * program whatever is there.
- */
- intel_de_write_fw(dev_priv, DSPPOS(i9xx_plane),
- (crtc_y << 16) | crtc_x);
- intel_de_write_fw(dev_priv, DSPSIZE(i9xx_plane),
- ((crtc_h - 1) << 16) | (crtc_w - 1));
- } else if (IS_CHERRYVIEW(dev_priv) && i9xx_plane == PLANE_B) {
- intel_de_write_fw(dev_priv, PRIMPOS(i9xx_plane),
- (crtc_y << 16) | crtc_x);
- intel_de_write_fw(dev_priv, PRIMSIZE(i9xx_plane),
- ((crtc_h - 1) << 16) | (crtc_w - 1));
- intel_de_write_fw(dev_priv, PRIMCNSTALPHA(i9xx_plane), 0);
- }
-
- if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
- intel_de_write_fw(dev_priv, DSPOFFSET(i9xx_plane),
- (y << 16) | x);
- } else if (INTEL_GEN(dev_priv) >= 4) {
- intel_de_write_fw(dev_priv, DSPLINOFF(i9xx_plane),
- linear_offset);
- intel_de_write_fw(dev_priv, DSPTILEOFF(i9xx_plane),
- (y << 16) | x);
- }
-
- /*
- * The control register self-arms if the plane was previously
- * disabled. Try to make the plane enable atomic by writing
- * the control register just before the surface register.
- */
- intel_de_write_fw(dev_priv, DSPCNTR(i9xx_plane), dspcntr);
- if (INTEL_GEN(dev_priv) >= 4)
- intel_de_write_fw(dev_priv, DSPSURF(i9xx_plane),
- intel_plane_ggtt_offset(plane_state) + dspaddr_offset);
- else
- intel_de_write_fw(dev_priv, DSPADDR(i9xx_plane),
- intel_plane_ggtt_offset(plane_state) + dspaddr_offset);
-
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
-}
-
-static void i9xx_disable_plane(struct intel_plane *plane,
- const struct intel_crtc_state *crtc_state)
-{
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
- unsigned long irqflags;
- u32 dspcntr;
-
- /*
- * DSPCNTR pipe gamma enable on g4x+ and pipe csc
- * enable on ilk+ affect the pipe bottom color as
- * well, so we must configure them even if the plane
- * is disabled.
- *
- * On pre-g4x there is no way to gamma correct the
- * pipe bottom color but we'll keep on doing this
- * anyway so that the crtc state readout works correctly.
- */
- dspcntr = i9xx_plane_ctl_crtc(crtc_state);
-
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
-
- intel_de_write_fw(dev_priv, DSPCNTR(i9xx_plane), dspcntr);
- if (INTEL_GEN(dev_priv) >= 4)
- intel_de_write_fw(dev_priv, DSPSURF(i9xx_plane), 0);
- else
- intel_de_write_fw(dev_priv, DSPADDR(i9xx_plane), 0);
-
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
-}
-
-static bool i9xx_plane_get_hw_state(struct intel_plane *plane,
- enum pipe *pipe)
-{
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- enum intel_display_power_domain power_domain;
- enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
- intel_wakeref_t wakeref;
- bool ret;
- u32 val;
-
- /*
- * Not 100% correct for planes that can move between pipes,
- * but that's only the case for gen2-4 which don't have any
- * display power wells.
- */
- power_domain = POWER_DOMAIN_PIPE(plane->pipe);
- wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
- if (!wakeref)
- return false;
-
- val = intel_de_read(dev_priv, DSPCNTR(i9xx_plane));
-
- ret = val & DISPLAY_PLANE_ENABLE;
-
- if (INTEL_GEN(dev_priv) >= 5)
- *pipe = plane->pipe;
- else
- *pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
- DISPPLANE_SEL_PIPE_SHIFT;
-
- intel_display_power_put(dev_priv, power_domain, wakeref);
-
- return ret;
-}
-
static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
{
struct drm_device *dev = intel_crtc->base.dev;
@@ -4647,6 +3428,7 @@ static u32 skl_plane_ctl_tiling(u64 fb_modifier)
case I915_FORMAT_MOD_Y_TILED:
return PLANE_CTL_TILED_Y;
case I915_FORMAT_MOD_Y_TILED_CCS:
+ case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
return PLANE_CTL_TILED_Y | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE;
case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
return PLANE_CTL_TILED_Y |
@@ -4707,9 +3489,6 @@ u32 skl_plane_ctl_crtc(const struct intel_crtc_state *crtc_state)
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
u32 plane_ctl = 0;
- if (crtc_state->uapi.async_flip)
- plane_ctl |= PLANE_CTL_ASYNC_FLIP;
-
if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
return plane_ctl;
@@ -4807,6 +3586,8 @@ u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
plane_color_ctl |= PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE;
} else if (fb->format->is_yuv) {
plane_color_ctl |= PLANE_COLOR_INPUT_CSC_ENABLE;
+ if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
+ plane_color_ctl |= PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE;
}
return plane_color_ctl;
@@ -4996,532 +3777,6 @@ static void icl_set_pipe_chicken(struct intel_crtc *crtc)
intel_de_write(dev_priv, PIPE_CHICKEN(pipe), tmp);
}
-static void intel_fdi_normal_train(struct intel_crtc *crtc)
-{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- enum pipe pipe = crtc->pipe;
- i915_reg_t reg;
- u32 temp;
-
- /* enable normal train */
- reg = FDI_TX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- if (IS_IVYBRIDGE(dev_priv)) {
- temp &= ~FDI_LINK_TRAIN_NONE_IVB;
- temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
- } else {
- temp &= ~FDI_LINK_TRAIN_NONE;
- temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
- }
- intel_de_write(dev_priv, reg, temp);
-
- reg = FDI_RX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- if (HAS_PCH_CPT(dev_priv)) {
- temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
- temp |= FDI_LINK_TRAIN_NORMAL_CPT;
- } else {
- temp &= ~FDI_LINK_TRAIN_NONE;
- temp |= FDI_LINK_TRAIN_NONE;
- }
- intel_de_write(dev_priv, reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
-
- /* wait one idle pattern time */
- intel_de_posting_read(dev_priv, reg);
- udelay(1000);
-
- /* IVB wants error correction enabled */
- if (IS_IVYBRIDGE(dev_priv))
- intel_de_write(dev_priv, reg,
- intel_de_read(dev_priv, reg) | FDI_FS_ERRC_ENABLE | FDI_FE_ERRC_ENABLE);
-}
-
-/* The FDI link training functions for ILK/Ibexpeak. */
-static void ilk_fdi_link_train(struct intel_crtc *crtc,
- const struct intel_crtc_state *crtc_state)
-{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- enum pipe pipe = crtc->pipe;
- i915_reg_t reg;
- u32 temp, tries;
-
- /* FDI needs bits from pipe first */
- assert_pipe_enabled(dev_priv, crtc_state->cpu_transcoder);
-
- /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
- for train result */
- reg = FDI_RX_IMR(pipe);
- temp = intel_de_read(dev_priv, reg);
- temp &= ~FDI_RX_SYMBOL_LOCK;
- temp &= ~FDI_RX_BIT_LOCK;
- intel_de_write(dev_priv, reg, temp);
- intel_de_read(dev_priv, reg);
- udelay(150);
-
- /* enable CPU FDI TX and PCH FDI RX */
- reg = FDI_TX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- temp &= ~FDI_DP_PORT_WIDTH_MASK;
- temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
- temp &= ~FDI_LINK_TRAIN_NONE;
- temp |= FDI_LINK_TRAIN_PATTERN_1;
- intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE);
-
- reg = FDI_RX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- temp &= ~FDI_LINK_TRAIN_NONE;
- temp |= FDI_LINK_TRAIN_PATTERN_1;
- intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE);
-
- intel_de_posting_read(dev_priv, reg);
- udelay(150);
-
- /* Ironlake workaround, enable clock pointer after FDI enable*/
- intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe),
- FDI_RX_PHASE_SYNC_POINTER_OVR);
- intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe),
- FDI_RX_PHASE_SYNC_POINTER_OVR | FDI_RX_PHASE_SYNC_POINTER_EN);
-
- reg = FDI_RX_IIR(pipe);
- for (tries = 0; tries < 5; tries++) {
- temp = intel_de_read(dev_priv, reg);
- drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
-
- if ((temp & FDI_RX_BIT_LOCK)) {
- drm_dbg_kms(&dev_priv->drm, "FDI train 1 done.\n");
- intel_de_write(dev_priv, reg, temp | FDI_RX_BIT_LOCK);
- break;
- }
- }
- if (tries == 5)
- drm_err(&dev_priv->drm, "FDI train 1 fail!\n");
-
- /* Train 2 */
- reg = FDI_TX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- temp &= ~FDI_LINK_TRAIN_NONE;
- temp |= FDI_LINK_TRAIN_PATTERN_2;
- intel_de_write(dev_priv, reg, temp);
-
- reg = FDI_RX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- temp &= ~FDI_LINK_TRAIN_NONE;
- temp |= FDI_LINK_TRAIN_PATTERN_2;
- intel_de_write(dev_priv, reg, temp);
-
- intel_de_posting_read(dev_priv, reg);
- udelay(150);
-
- reg = FDI_RX_IIR(pipe);
- for (tries = 0; tries < 5; tries++) {
- temp = intel_de_read(dev_priv, reg);
- drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
-
- if (temp & FDI_RX_SYMBOL_LOCK) {
- intel_de_write(dev_priv, reg,
- temp | FDI_RX_SYMBOL_LOCK);
- drm_dbg_kms(&dev_priv->drm, "FDI train 2 done.\n");
- break;
- }
- }
- if (tries == 5)
- drm_err(&dev_priv->drm, "FDI train 2 fail!\n");
-
- drm_dbg_kms(&dev_priv->drm, "FDI train done\n");
-
-}
-
-static const int snb_b_fdi_train_param[] = {
- FDI_LINK_TRAIN_400MV_0DB_SNB_B,
- FDI_LINK_TRAIN_400MV_6DB_SNB_B,
- FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
- FDI_LINK_TRAIN_800MV_0DB_SNB_B,
-};
-
-/* The FDI link training functions for SNB/Cougarpoint. */
-static void gen6_fdi_link_train(struct intel_crtc *crtc,
- const struct intel_crtc_state *crtc_state)
-{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- enum pipe pipe = crtc->pipe;
- i915_reg_t reg;
- u32 temp, i, retry;
-
- /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
- for train result */
- reg = FDI_RX_IMR(pipe);
- temp = intel_de_read(dev_priv, reg);
- temp &= ~FDI_RX_SYMBOL_LOCK;
- temp &= ~FDI_RX_BIT_LOCK;
- intel_de_write(dev_priv, reg, temp);
-
- intel_de_posting_read(dev_priv, reg);
- udelay(150);
-
- /* enable CPU FDI TX and PCH FDI RX */
- reg = FDI_TX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- temp &= ~FDI_DP_PORT_WIDTH_MASK;
- temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
- temp &= ~FDI_LINK_TRAIN_NONE;
- temp |= FDI_LINK_TRAIN_PATTERN_1;
- temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
- /* SNB-B */
- temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
- intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE);
-
- intel_de_write(dev_priv, FDI_RX_MISC(pipe),
- FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
-
- reg = FDI_RX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- if (HAS_PCH_CPT(dev_priv)) {
- temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
- temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
- } else {
- temp &= ~FDI_LINK_TRAIN_NONE;
- temp |= FDI_LINK_TRAIN_PATTERN_1;
- }
- intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE);
-
- intel_de_posting_read(dev_priv, reg);
- udelay(150);
-
- for (i = 0; i < 4; i++) {
- reg = FDI_TX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
- temp |= snb_b_fdi_train_param[i];
- intel_de_write(dev_priv, reg, temp);
-
- intel_de_posting_read(dev_priv, reg);
- udelay(500);
-
- for (retry = 0; retry < 5; retry++) {
- reg = FDI_RX_IIR(pipe);
- temp = intel_de_read(dev_priv, reg);
- drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
- if (temp & FDI_RX_BIT_LOCK) {
- intel_de_write(dev_priv, reg,
- temp | FDI_RX_BIT_LOCK);
- drm_dbg_kms(&dev_priv->drm,
- "FDI train 1 done.\n");
- break;
- }
- udelay(50);
- }
- if (retry < 5)
- break;
- }
- if (i == 4)
- drm_err(&dev_priv->drm, "FDI train 1 fail!\n");
-
- /* Train 2 */
- reg = FDI_TX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- temp &= ~FDI_LINK_TRAIN_NONE;
- temp |= FDI_LINK_TRAIN_PATTERN_2;
- if (IS_GEN(dev_priv, 6)) {
- temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
- /* SNB-B */
- temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
- }
- intel_de_write(dev_priv, reg, temp);
-
- reg = FDI_RX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- if (HAS_PCH_CPT(dev_priv)) {
- temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
- temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
- } else {
- temp &= ~FDI_LINK_TRAIN_NONE;
- temp |= FDI_LINK_TRAIN_PATTERN_2;
- }
- intel_de_write(dev_priv, reg, temp);
-
- intel_de_posting_read(dev_priv, reg);
- udelay(150);
-
- for (i = 0; i < 4; i++) {
- reg = FDI_TX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
- temp |= snb_b_fdi_train_param[i];
- intel_de_write(dev_priv, reg, temp);
-
- intel_de_posting_read(dev_priv, reg);
- udelay(500);
-
- for (retry = 0; retry < 5; retry++) {
- reg = FDI_RX_IIR(pipe);
- temp = intel_de_read(dev_priv, reg);
- drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
- if (temp & FDI_RX_SYMBOL_LOCK) {
- intel_de_write(dev_priv, reg,
- temp | FDI_RX_SYMBOL_LOCK);
- drm_dbg_kms(&dev_priv->drm,
- "FDI train 2 done.\n");
- break;
- }
- udelay(50);
- }
- if (retry < 5)
- break;
- }
- if (i == 4)
- drm_err(&dev_priv->drm, "FDI train 2 fail!\n");
-
- drm_dbg_kms(&dev_priv->drm, "FDI train done.\n");
-}
-
-/* Manual link training for Ivy Bridge A0 parts */
-static void ivb_manual_fdi_link_train(struct intel_crtc *crtc,
- const struct intel_crtc_state *crtc_state)
-{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- enum pipe pipe = crtc->pipe;
- i915_reg_t reg;
- u32 temp, i, j;
-
- /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
- for train result */
- reg = FDI_RX_IMR(pipe);
- temp = intel_de_read(dev_priv, reg);
- temp &= ~FDI_RX_SYMBOL_LOCK;
- temp &= ~FDI_RX_BIT_LOCK;
- intel_de_write(dev_priv, reg, temp);
-
- intel_de_posting_read(dev_priv, reg);
- udelay(150);
-
- drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR before link train 0x%x\n",
- intel_de_read(dev_priv, FDI_RX_IIR(pipe)));
-
- /* Try each vswing and preemphasis setting twice before moving on */
- for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) {
- /* disable first in case we need to retry */
- reg = FDI_TX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
- temp &= ~FDI_TX_ENABLE;
- intel_de_write(dev_priv, reg, temp);
-
- reg = FDI_RX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- temp &= ~FDI_LINK_TRAIN_AUTO;
- temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
- temp &= ~FDI_RX_ENABLE;
- intel_de_write(dev_priv, reg, temp);
-
- /* enable CPU FDI TX and PCH FDI RX */
- reg = FDI_TX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- temp &= ~FDI_DP_PORT_WIDTH_MASK;
- temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
- temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
- temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
- temp |= snb_b_fdi_train_param[j/2];
- temp |= FDI_COMPOSITE_SYNC;
- intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE);
-
- intel_de_write(dev_priv, FDI_RX_MISC(pipe),
- FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
-
- reg = FDI_RX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
- temp |= FDI_COMPOSITE_SYNC;
- intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE);
-
- intel_de_posting_read(dev_priv, reg);
- udelay(1); /* should be 0.5us */
-
- for (i = 0; i < 4; i++) {
- reg = FDI_RX_IIR(pipe);
- temp = intel_de_read(dev_priv, reg);
- drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
-
- if (temp & FDI_RX_BIT_LOCK ||
- (intel_de_read(dev_priv, reg) & FDI_RX_BIT_LOCK)) {
- intel_de_write(dev_priv, reg,
- temp | FDI_RX_BIT_LOCK);
- drm_dbg_kms(&dev_priv->drm,
- "FDI train 1 done, level %i.\n",
- i);
- break;
- }
- udelay(1); /* should be 0.5us */
- }
- if (i == 4) {
- drm_dbg_kms(&dev_priv->drm,
- "FDI train 1 fail on vswing %d\n", j / 2);
- continue;
- }
-
- /* Train 2 */
- reg = FDI_TX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- temp &= ~FDI_LINK_TRAIN_NONE_IVB;
- temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
- intel_de_write(dev_priv, reg, temp);
-
- reg = FDI_RX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
- temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
- intel_de_write(dev_priv, reg, temp);
-
- intel_de_posting_read(dev_priv, reg);
- udelay(2); /* should be 1.5us */
-
- for (i = 0; i < 4; i++) {
- reg = FDI_RX_IIR(pipe);
- temp = intel_de_read(dev_priv, reg);
- drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
-
- if (temp & FDI_RX_SYMBOL_LOCK ||
- (intel_de_read(dev_priv, reg) & FDI_RX_SYMBOL_LOCK)) {
- intel_de_write(dev_priv, reg,
- temp | FDI_RX_SYMBOL_LOCK);
- drm_dbg_kms(&dev_priv->drm,
- "FDI train 2 done, level %i.\n",
- i);
- goto train_done;
- }
- udelay(2); /* should be 1.5us */
- }
- if (i == 4)
- drm_dbg_kms(&dev_priv->drm,
- "FDI train 2 fail on vswing %d\n", j / 2);
- }
-
-train_done:
- drm_dbg_kms(&dev_priv->drm, "FDI train done.\n");
-}
-
-static void ilk_fdi_pll_enable(const struct intel_crtc_state *crtc_state)
-{
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
- enum pipe pipe = intel_crtc->pipe;
- i915_reg_t reg;
- u32 temp;
-
- /* enable PCH FDI RX PLL, wait warmup plus DMI latency */
- reg = FDI_RX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
- temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
- temp |= (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
- intel_de_write(dev_priv, reg, temp | FDI_RX_PLL_ENABLE);
-
- intel_de_posting_read(dev_priv, reg);
- udelay(200);
-
- /* Switch from Rawclk to PCDclk */
- temp = intel_de_read(dev_priv, reg);
- intel_de_write(dev_priv, reg, temp | FDI_PCDCLK);
-
- intel_de_posting_read(dev_priv, reg);
- udelay(200);
-
- /* Enable CPU FDI TX PLL, always on for Ironlake */
- reg = FDI_TX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- if ((temp & FDI_TX_PLL_ENABLE) == 0) {
- intel_de_write(dev_priv, reg, temp | FDI_TX_PLL_ENABLE);
-
- intel_de_posting_read(dev_priv, reg);
- udelay(100);
- }
-}
-
-static void ilk_fdi_pll_disable(struct intel_crtc *intel_crtc)
-{
- struct drm_device *dev = intel_crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- enum pipe pipe = intel_crtc->pipe;
- i915_reg_t reg;
- u32 temp;
-
- /* Switch from PCDclk to Rawclk */
- reg = FDI_RX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- intel_de_write(dev_priv, reg, temp & ~FDI_PCDCLK);
-
- /* Disable CPU FDI TX PLL */
- reg = FDI_TX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- intel_de_write(dev_priv, reg, temp & ~FDI_TX_PLL_ENABLE);
-
- intel_de_posting_read(dev_priv, reg);
- udelay(100);
-
- reg = FDI_RX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- intel_de_write(dev_priv, reg, temp & ~FDI_RX_PLL_ENABLE);
-
- /* Wait for the clocks to turn off. */
- intel_de_posting_read(dev_priv, reg);
- udelay(100);
-}
-
-static void ilk_fdi_disable(struct intel_crtc *crtc)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- enum pipe pipe = crtc->pipe;
- i915_reg_t reg;
- u32 temp;
-
- /* disable CPU FDI tx and PCH FDI rx */
- reg = FDI_TX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- intel_de_write(dev_priv, reg, temp & ~FDI_TX_ENABLE);
- intel_de_posting_read(dev_priv, reg);
-
- reg = FDI_RX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- temp &= ~(0x7 << 16);
- temp |= (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
- intel_de_write(dev_priv, reg, temp & ~FDI_RX_ENABLE);
-
- intel_de_posting_read(dev_priv, reg);
- udelay(100);
-
- /* Ironlake workaround, disable clock pointer after downing FDI */
- if (HAS_PCH_IBX(dev_priv))
- intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe),
- FDI_RX_PHASE_SYNC_POINTER_OVR);
-
- /* still set train pattern 1 */
- reg = FDI_TX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- temp &= ~FDI_LINK_TRAIN_NONE;
- temp |= FDI_LINK_TRAIN_PATTERN_1;
- intel_de_write(dev_priv, reg, temp);
-
- reg = FDI_RX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- if (HAS_PCH_CPT(dev_priv)) {
- temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
- temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
- } else {
- temp &= ~FDI_LINK_TRAIN_NONE;
- temp |= FDI_LINK_TRAIN_PATTERN_1;
- }
- /* BPC in FDI rx is consistent with that in PIPECONF */
- temp &= ~(0x07 << 16);
- temp |= (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
- intel_de_write(dev_priv, reg, temp);
-
- intel_de_posting_read(dev_priv, reg);
- udelay(100);
-}
-
bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv)
{
struct drm_crtc *crtc;
@@ -5752,7 +4007,7 @@ static void ivb_update_fdi_bc_bifurcation(const struct intel_crtc_state *crtc_st
* Finds the encoder associated with the given CRTC. This can only be
* used when we know that the CRTC isn't feeding multiple encoders!
*/
-static struct intel_encoder *
+struct intel_encoder *
intel_get_crtc_new_encoder(const struct intel_atomic_state *state,
const struct intel_crtc_state *crtc_state)
{
@@ -6271,7 +4526,7 @@ static void cnl_program_nearest_filter_coefs(struct drm_i915_private *dev_priv,
intel_de_write_fw(dev_priv, CNL_PS_COEF_INDEX_SET(pipe, id, set), 0);
}
-inline u32 skl_scaler_get_filter_select(enum drm_scaling_filter filter, int set)
+u32 skl_scaler_get_filter_select(enum drm_scaling_filter filter, int set)
{
if (filter == DRM_SCALING_FILTER_NEAREST_NEIGHBOR) {
return (PS_FILTER_PROGRAMMED |
@@ -6470,7 +4725,7 @@ static bool hsw_pre_update_disable_ips(const struct intel_crtc_state *old_crtc_s
if (!old_crtc_state->ips_enabled)
return false;
- if (needs_modeset(new_crtc_state))
+ if (intel_crtc_needs_modeset(new_crtc_state))
return true;
/*
@@ -6497,7 +4752,7 @@ static bool hsw_post_update_enable_ips(const struct intel_crtc_state *old_crtc_s
if (!new_crtc_state->ips_enabled)
return false;
- if (needs_modeset(new_crtc_state))
+ if (intel_crtc_needs_modeset(new_crtc_state))
return true;
/*
@@ -6550,7 +4805,7 @@ static bool needs_scalerclk_wa(const struct intel_crtc_state *crtc_state)
static bool planes_enabling(const struct intel_crtc_state *old_crtc_state,
const struct intel_crtc_state *new_crtc_state)
{
- return (!old_crtc_state->active_planes || needs_modeset(new_crtc_state)) &&
+ return (!old_crtc_state->active_planes || intel_crtc_needs_modeset(new_crtc_state)) &&
new_crtc_state->active_planes;
}
@@ -6558,7 +4813,7 @@ static bool planes_disabling(const struct intel_crtc_state *old_crtc_state,
const struct intel_crtc_state *new_crtc_state)
{
return old_crtc_state->active_planes &&
- (!new_crtc_state->active_planes || needs_modeset(new_crtc_state));
+ (!new_crtc_state->active_planes || intel_crtc_needs_modeset(new_crtc_state));
}
static void intel_post_plane_update(struct intel_atomic_state *state,
@@ -6590,41 +4845,72 @@ static void intel_post_plane_update(struct intel_atomic_state *state,
icl_wa_scalerclkgating(dev_priv, pipe, false);
}
-static void skl_disable_async_flip_wa(struct intel_atomic_state *state,
- struct intel_crtc *crtc,
- const struct intel_crtc_state *new_crtc_state)
+static void intel_crtc_enable_flip_done(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ const struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ u8 update_planes = crtc_state->update_planes;
+ const struct intel_plane_state *plane_state;
struct intel_plane *plane;
- struct intel_plane_state *new_plane_state;
int i;
- for_each_new_intel_plane_in_state(state, plane, new_plane_state, i) {
- u32 update_mask = new_crtc_state->update_planes;
- u32 plane_ctl, surf_addr;
- enum plane_id plane_id;
- unsigned long irqflags;
- enum pipe pipe;
-
- if (crtc->pipe != plane->pipe ||
- !(update_mask & BIT(plane->id)))
- continue;
+ for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
+ if (plane->enable_flip_done &&
+ plane->pipe == crtc->pipe &&
+ update_planes & BIT(plane->id))
+ plane->enable_flip_done(plane);
+ }
+}
- plane_id = plane->id;
- pipe = plane->pipe;
+static void intel_crtc_disable_flip_done(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ const struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ u8 update_planes = crtc_state->update_planes;
+ const struct intel_plane_state *plane_state;
+ struct intel_plane *plane;
+ int i;
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
- plane_ctl = intel_de_read_fw(dev_priv, PLANE_CTL(pipe, plane_id));
- surf_addr = intel_de_read_fw(dev_priv, PLANE_SURF(pipe, plane_id));
+ for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
+ if (plane->disable_flip_done &&
+ plane->pipe == crtc->pipe &&
+ update_planes & BIT(plane->id))
+ plane->disable_flip_done(plane);
+ }
+}
- plane_ctl &= ~PLANE_CTL_ASYNC_FLIP;
+static void intel_crtc_async_flip_disable_wa(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+ const struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ u8 update_planes = new_crtc_state->update_planes;
+ const struct intel_plane_state *old_plane_state;
+ struct intel_plane *plane;
+ bool need_vbl_wait = false;
+ int i;
- intel_de_write_fw(dev_priv, PLANE_CTL(pipe, plane_id), plane_ctl);
- intel_de_write_fw(dev_priv, PLANE_SURF(pipe, plane_id), surf_addr);
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+ for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) {
+ if (plane->need_async_flip_disable_wa &&
+ plane->pipe == crtc->pipe &&
+ update_planes & BIT(plane->id)) {
+ /*
+ * Apart from the async flip bit we want to
+ * preserve the old state for the plane.
+ */
+ plane->async_flip(plane, old_crtc_state,
+ old_plane_state, false);
+ need_vbl_wait = true;
+ }
}
- intel_wait_for_vblank(dev_priv, crtc->pipe);
+ if (need_vbl_wait)
+ intel_wait_for_vblank(i915, crtc->pipe);
}
static void intel_pre_plane_update(struct intel_atomic_state *state,
@@ -6681,7 +4967,7 @@ static void intel_pre_plane_update(struct intel_atomic_state *state,
* If we're doing a modeset we don't need to do any
* pre-vblank watermark programming here.
*/
- if (!needs_modeset(new_crtc_state)) {
+ if (!intel_crtc_needs_modeset(new_crtc_state)) {
/*
* For platforms that support atomic watermarks, program the
* 'intermediate' watermarks immediately. On pre-gen9 platforms, these
@@ -6717,10 +5003,8 @@ static void intel_pre_plane_update(struct intel_atomic_state *state,
* WA for platforms where async address update enable bit
* is double buffered and only latched at start of vblank.
*/
- if (old_crtc_state->uapi.async_flip &&
- !new_crtc_state->uapi.async_flip &&
- IS_GEN_RANGE(dev_priv, 9, 10))
- skl_disable_async_flip_wa(state, crtc, new_crtc_state);
+ if (old_crtc_state->uapi.async_flip && !new_crtc_state->uapi.async_flip)
+ intel_crtc_async_flip_disable_wa(state, crtc);
}
static void intel_crtc_disable_planes(struct intel_atomic_state *state,
@@ -7140,7 +5424,7 @@ static void hsw_set_frame_start_delay(const struct intel_crtc_state *crtc_state)
val = intel_de_read(dev_priv, reg);
val &= ~HSW_FRAME_START_DELAY_MASK;
- val |= HSW_FRAME_START_DELAY(0);
+ val |= HSW_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
intel_de_write(dev_priv, reg, val);
}
@@ -7575,25 +5859,25 @@ modeset_get_crtc_power_domains(struct intel_crtc_state *crtc_state)
enum intel_display_power_domain domain;
u64 domains, new_domains, old_domains;
- old_domains = crtc->enabled_power_domains;
- crtc->enabled_power_domains = new_domains =
- get_crtc_power_domains(crtc_state);
+ domains = get_crtc_power_domains(crtc_state);
- domains = new_domains & ~old_domains;
+ new_domains = domains & ~crtc->enabled_power_domains.mask;
+ old_domains = crtc->enabled_power_domains.mask & ~domains;
- for_each_power_domain(domain, domains)
- intel_display_power_get(dev_priv, domain);
+ for_each_power_domain(domain, new_domains)
+ intel_display_power_get_in_set(dev_priv,
+ &crtc->enabled_power_domains,
+ domain);
- return old_domains & ~new_domains;
+ return old_domains;
}
-static void modeset_put_power_domains(struct drm_i915_private *dev_priv,
- u64 domains)
+static void modeset_put_crtc_power_domains(struct intel_crtc *crtc,
+ u64 domains)
{
- enum intel_display_power_domain domain;
-
- for_each_power_domain(domain, domains)
- intel_display_power_put_unchecked(dev_priv, domain);
+ intel_display_power_put_mask_in_set(to_i915(crtc->base.dev),
+ &crtc->enabled_power_domains,
+ domains);
}
static void valleyview_crtc_enable(struct intel_atomic_state *state,
@@ -7789,12 +6073,10 @@ static void intel_crtc_disable_noatomic(struct intel_crtc *crtc,
to_intel_dbuf_state(dev_priv->dbuf.obj.state);
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
- enum intel_display_power_domain domain;
struct intel_plane *plane;
struct drm_atomic_state *state;
struct intel_crtc_state *temp_crtc_state;
enum pipe pipe = crtc->pipe;
- u64 domains;
int ret;
if (!crtc_state->hw.active)
@@ -7850,10 +6132,7 @@ static void intel_crtc_disable_noatomic(struct intel_crtc *crtc,
intel_update_watermarks(crtc);
intel_disable_shared_dpll(crtc_state);
- domains = crtc->enabled_power_domains;
- for_each_power_domain(domain, domains)
- intel_display_power_put_unchecked(dev_priv, domain);
- crtc->enabled_power_domains = 0;
+ intel_display_power_put_all_in_set(dev_priv, &crtc->enabled_power_domains);
dev_priv->active_pipes &= ~BIT(pipe);
cdclk_state->min_cdclk[pipe] = 0;
@@ -7933,143 +6212,6 @@ static void intel_connector_verify_state(struct intel_crtc_state *crtc_state,
}
}
-static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state)
-{
- if (crtc_state->hw.enable && crtc_state->has_pch_encoder)
- return crtc_state->fdi_lanes;
-
- return 0;
-}
-
-static int ilk_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
- struct intel_crtc_state *pipe_config)
-{
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct drm_atomic_state *state = pipe_config->uapi.state;
- struct intel_crtc *other_crtc;
- struct intel_crtc_state *other_crtc_state;
-
- drm_dbg_kms(&dev_priv->drm,
- "checking fdi config on pipe %c, lanes %i\n",
- pipe_name(pipe), pipe_config->fdi_lanes);
- if (pipe_config->fdi_lanes > 4) {
- drm_dbg_kms(&dev_priv->drm,
- "invalid fdi lane config on pipe %c: %i lanes\n",
- pipe_name(pipe), pipe_config->fdi_lanes);
- return -EINVAL;
- }
-
- if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
- if (pipe_config->fdi_lanes > 2) {
- drm_dbg_kms(&dev_priv->drm,
- "only 2 lanes on haswell, required: %i lanes\n",
- pipe_config->fdi_lanes);
- return -EINVAL;
- } else {
- return 0;
- }
- }
-
- if (INTEL_NUM_PIPES(dev_priv) == 2)
- return 0;
-
- /* Ivybridge 3 pipe is really complicated */
- switch (pipe) {
- case PIPE_A:
- return 0;
- case PIPE_B:
- if (pipe_config->fdi_lanes <= 2)
- return 0;
-
- other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_C);
- other_crtc_state =
- intel_atomic_get_crtc_state(state, other_crtc);
- if (IS_ERR(other_crtc_state))
- return PTR_ERR(other_crtc_state);
-
- if (pipe_required_fdi_lanes(other_crtc_state) > 0) {
- drm_dbg_kms(&dev_priv->drm,
- "invalid shared fdi lane config on pipe %c: %i lanes\n",
- pipe_name(pipe), pipe_config->fdi_lanes);
- return -EINVAL;
- }
- return 0;
- case PIPE_C:
- if (pipe_config->fdi_lanes > 2) {
- drm_dbg_kms(&dev_priv->drm,
- "only 2 lanes on pipe %c: required %i lanes\n",
- pipe_name(pipe), pipe_config->fdi_lanes);
- return -EINVAL;
- }
-
- other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_B);
- other_crtc_state =
- intel_atomic_get_crtc_state(state, other_crtc);
- if (IS_ERR(other_crtc_state))
- return PTR_ERR(other_crtc_state);
-
- if (pipe_required_fdi_lanes(other_crtc_state) > 2) {
- drm_dbg_kms(&dev_priv->drm,
- "fdi link B uses too many lanes to enable link C\n");
- return -EINVAL;
- }
- return 0;
- default:
- BUG();
- }
-}
-
-#define RETRY 1
-static int ilk_fdi_compute_config(struct intel_crtc *intel_crtc,
- struct intel_crtc_state *pipe_config)
-{
- struct drm_device *dev = intel_crtc->base.dev;
- struct drm_i915_private *i915 = to_i915(dev);
- const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
- int lane, link_bw, fdi_dotclock, ret;
- bool needs_recompute = false;
-
-retry:
- /* FDI is a binary signal running at ~2.7GHz, encoding
- * each output octet as 10 bits. The actual frequency
- * is stored as a divider into a 100MHz clock, and the
- * mode pixel clock is stored in units of 1KHz.
- * Hence the bw of each lane in terms of the mode signal
- * is:
- */
- link_bw = intel_fdi_link_freq(i915, pipe_config);
-
- fdi_dotclock = adjusted_mode->crtc_clock;
-
- lane = ilk_get_lanes_required(fdi_dotclock, link_bw,
- pipe_config->pipe_bpp);
-
- pipe_config->fdi_lanes = lane;
-
- intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
- link_bw, &pipe_config->fdi_m_n, false, false);
-
- ret = ilk_check_fdi_lanes(dev, intel_crtc->pipe, pipe_config);
- if (ret == -EDEADLK)
- return ret;
-
- if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) {
- pipe_config->pipe_bpp -= 2*3;
- drm_dbg_kms(&i915->drm,
- "fdi link bw constraint, reducing pipe bpp to %i\n",
- pipe_config->pipe_bpp);
- needs_recompute = true;
- pipe_config->bw_constrained = true;
-
- goto retry;
- }
-
- if (needs_recompute)
- return RETRY;
-
- return ret;
-}
-
bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
@@ -8301,19 +6443,6 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
return -EINVAL;
}
- if ((pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
- pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) &&
- pipe_config->hw.ctm) {
- /*
- * There is only one pipe CSC unit per pipe, and we need that
- * for output conversion from RGB->YCBCR. So if CTM is already
- * applied we can't support YCBCR420 output.
- */
- drm_dbg_kms(&dev_priv->drm,
- "YCBCR420 and CTM together are not possible\n");
- return -EINVAL;
- }
-
/*
* Pipe horizontal size must be even in:
* - DVO ganged mode
@@ -8425,51 +6554,6 @@ static void intel_panel_sanitize_ssc(struct drm_i915_private *dev_priv)
}
}
-static bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
-{
- if (dev_priv->params.panel_use_ssc >= 0)
- return dev_priv->params.panel_use_ssc != 0;
- return dev_priv->vbt.lvds_use_ssc
- && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
-}
-
-static u32 pnv_dpll_compute_fp(struct dpll *dpll)
-{
- return (1 << dpll->n) << 16 | dpll->m2;
-}
-
-static u32 i9xx_dpll_compute_fp(struct dpll *dpll)
-{
- return dpll->n << 16 | dpll->m1 << 8 | dpll->m2;
-}
-
-static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state,
- struct dpll *reduced_clock)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- u32 fp, fp2 = 0;
-
- if (IS_PINEVIEW(dev_priv)) {
- fp = pnv_dpll_compute_fp(&crtc_state->dpll);
- if (reduced_clock)
- fp2 = pnv_dpll_compute_fp(reduced_clock);
- } else {
- fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
- if (reduced_clock)
- fp2 = i9xx_dpll_compute_fp(reduced_clock);
- }
-
- crtc_state->dpll_hw_state.fp0 = fp;
-
- if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
- reduced_clock) {
- crtc_state->dpll_hw_state.fp1 = fp2;
- } else {
- crtc_state->dpll_hw_state.fp1 = fp;
- }
-}
-
static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum pipe
pipe)
{
@@ -8594,39 +6678,6 @@ void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state, enum link_m_n_s
intel_cpu_transcoder_set_m_n(crtc_state, dp_m_n, dp_m2_n2);
}
-static void vlv_compute_dpll(struct intel_crtc *crtc,
- struct intel_crtc_state *pipe_config)
-{
- pipe_config->dpll_hw_state.dpll = DPLL_INTEGRATED_REF_CLK_VLV |
- DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
- if (crtc->pipe != PIPE_A)
- pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
-
- /* DPLL not used with DSI, but still need the rest set up */
- if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI))
- pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE |
- DPLL_EXT_BUFFER_ENABLE_VLV;
-
- pipe_config->dpll_hw_state.dpll_md =
- (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
-}
-
-static void chv_compute_dpll(struct intel_crtc *crtc,
- struct intel_crtc_state *pipe_config)
-{
- pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLK_CHV |
- DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
- if (crtc->pipe != PIPE_A)
- pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
-
- /* DPLL not used with DSI, but still need the rest set up */
- if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI))
- pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE;
-
- pipe_config->dpll_hw_state.dpll_md =
- (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
-}
-
static void vlv_prepare_pll(struct intel_crtc *crtc,
const struct intel_crtc_state *pipe_config)
{
@@ -8886,128 +6937,7 @@ void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe)
vlv_disable_pll(dev_priv, pipe);
}
-static void i9xx_compute_dpll(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state,
- struct dpll *reduced_clock)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- u32 dpll;
- struct dpll *clock = &crtc_state->dpll;
-
- i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
-
- dpll = DPLL_VGA_MODE_DIS;
-
- if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
- dpll |= DPLLB_MODE_LVDS;
- else
- dpll |= DPLLB_MODE_DAC_SERIAL;
-
- if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
- IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
- dpll |= (crtc_state->pixel_multiplier - 1)
- << SDVO_MULTIPLIER_SHIFT_HIRES;
- }
-
- if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
- intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
- dpll |= DPLL_SDVO_HIGH_SPEED;
-
- if (intel_crtc_has_dp_encoder(crtc_state))
- dpll |= DPLL_SDVO_HIGH_SPEED;
-
- /* compute bitmask from p1 value */
- if (IS_PINEVIEW(dev_priv))
- dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
- else {
- dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
- if (IS_G4X(dev_priv) && reduced_clock)
- dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
- }
- switch (clock->p2) {
- case 5:
- dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
- break;
- case 7:
- dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
- break;
- case 10:
- dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
- break;
- case 14:
- dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
- break;
- }
- if (INTEL_GEN(dev_priv) >= 4)
- dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
-
- if (crtc_state->sdvo_tv_clock)
- dpll |= PLL_REF_INPUT_TVCLKINBC;
- else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
- intel_panel_use_ssc(dev_priv))
- dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
- else
- dpll |= PLL_REF_INPUT_DREFCLK;
-
- dpll |= DPLL_VCO_ENABLE;
- crtc_state->dpll_hw_state.dpll = dpll;
-
- if (INTEL_GEN(dev_priv) >= 4) {
- u32 dpll_md = (crtc_state->pixel_multiplier - 1)
- << DPLL_MD_UDI_MULTIPLIER_SHIFT;
- crtc_state->dpll_hw_state.dpll_md = dpll_md;
- }
-}
-
-static void i8xx_compute_dpll(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state,
- struct dpll *reduced_clock)
-{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- u32 dpll;
- struct dpll *clock = &crtc_state->dpll;
- i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
-
- dpll = DPLL_VGA_MODE_DIS;
-
- if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
- dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
- } else {
- if (clock->p1 == 2)
- dpll |= PLL_P1_DIVIDE_BY_TWO;
- else
- dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
- if (clock->p2 == 4)
- dpll |= PLL_P2_DIVIDE_BY_4;
- }
-
- /*
- * Bspec:
- * "[Almador Errata}: For the correct operation of the muxed DVO pins
- * (GDEVSELB/I2Cdata, GIRDBY/I2CClk) and (GFRAMEB/DVI_Data,
- * GTRDYB/DVI_Clk): Bit 31 (DPLL VCO Enable) and Bit 30 (2X Clock
- * Enable) must be set to “1” in both the DPLL A Control Register
- * (06014h-06017h) and DPLL B Control Register (06018h-0601Bh)."
- *
- * For simplicity We simply keep both bits always enabled in
- * both DPLLS. The spec says we should disable the DVO 2X clock
- * when not needed, but this seems to work fine in practice.
- */
- if (IS_I830(dev_priv) ||
- intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO))
- dpll |= DPLL_DVO_2X_MODE;
-
- if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
- intel_panel_use_ssc(dev_priv))
- dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
- else
- dpll |= PLL_REF_INPUT_DREFCLK;
-
- dpll |= DPLL_VCO_ENABLE;
- crtc_state->dpll_hw_state.dpll = dpll;
-}
static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state)
{
@@ -9207,214 +7137,12 @@ static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
pipeconf |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
- pipeconf |= PIPECONF_FRAME_START_DELAY(0);
+ pipeconf |= PIPECONF_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
intel_de_write(dev_priv, PIPECONF(crtc->pipe), pipeconf);
intel_de_posting_read(dev_priv, PIPECONF(crtc->pipe));
}
-static int i8xx_crtc_compute_clock(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state)
-{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- const struct intel_limit *limit;
- int refclk = 48000;
-
- memset(&crtc_state->dpll_hw_state, 0,
- sizeof(crtc_state->dpll_hw_state));
-
- if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
- if (intel_panel_use_ssc(dev_priv)) {
- refclk = dev_priv->vbt.lvds_ssc_freq;
- drm_dbg_kms(&dev_priv->drm,
- "using SSC reference clock of %d kHz\n",
- refclk);
- }
-
- limit = &intel_limits_i8xx_lvds;
- } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO)) {
- limit = &intel_limits_i8xx_dvo;
- } else {
- limit = &intel_limits_i8xx_dac;
- }
-
- if (!crtc_state->clock_set &&
- !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
- refclk, NULL, &crtc_state->dpll)) {
- drm_err(&dev_priv->drm,
- "Couldn't find PLL settings for mode!\n");
- return -EINVAL;
- }
-
- i8xx_compute_dpll(crtc, crtc_state, NULL);
-
- return 0;
-}
-
-static int g4x_crtc_compute_clock(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- const struct intel_limit *limit;
- int refclk = 96000;
-
- memset(&crtc_state->dpll_hw_state, 0,
- sizeof(crtc_state->dpll_hw_state));
-
- if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
- if (intel_panel_use_ssc(dev_priv)) {
- refclk = dev_priv->vbt.lvds_ssc_freq;
- drm_dbg_kms(&dev_priv->drm,
- "using SSC reference clock of %d kHz\n",
- refclk);
- }
-
- if (intel_is_dual_link_lvds(dev_priv))
- limit = &intel_limits_g4x_dual_channel_lvds;
- else
- limit = &intel_limits_g4x_single_channel_lvds;
- } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
- intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
- limit = &intel_limits_g4x_hdmi;
- } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) {
- limit = &intel_limits_g4x_sdvo;
- } else {
- /* The option is for other outputs */
- limit = &intel_limits_i9xx_sdvo;
- }
-
- if (!crtc_state->clock_set &&
- !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
- refclk, NULL, &crtc_state->dpll)) {
- drm_err(&dev_priv->drm,
- "Couldn't find PLL settings for mode!\n");
- return -EINVAL;
- }
-
- i9xx_compute_dpll(crtc, crtc_state, NULL);
-
- return 0;
-}
-
-static int pnv_crtc_compute_clock(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state)
-{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- const struct intel_limit *limit;
- int refclk = 96000;
-
- memset(&crtc_state->dpll_hw_state, 0,
- sizeof(crtc_state->dpll_hw_state));
-
- if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
- if (intel_panel_use_ssc(dev_priv)) {
- refclk = dev_priv->vbt.lvds_ssc_freq;
- drm_dbg_kms(&dev_priv->drm,
- "using SSC reference clock of %d kHz\n",
- refclk);
- }
-
- limit = &pnv_limits_lvds;
- } else {
- limit = &pnv_limits_sdvo;
- }
-
- if (!crtc_state->clock_set &&
- !pnv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
- refclk, NULL, &crtc_state->dpll)) {
- drm_err(&dev_priv->drm,
- "Couldn't find PLL settings for mode!\n");
- return -EINVAL;
- }
-
- i9xx_compute_dpll(crtc, crtc_state, NULL);
-
- return 0;
-}
-
-static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state)
-{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- const struct intel_limit *limit;
- int refclk = 96000;
-
- memset(&crtc_state->dpll_hw_state, 0,
- sizeof(crtc_state->dpll_hw_state));
-
- if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
- if (intel_panel_use_ssc(dev_priv)) {
- refclk = dev_priv->vbt.lvds_ssc_freq;
- drm_dbg_kms(&dev_priv->drm,
- "using SSC reference clock of %d kHz\n",
- refclk);
- }
-
- limit = &intel_limits_i9xx_lvds;
- } else {
- limit = &intel_limits_i9xx_sdvo;
- }
-
- if (!crtc_state->clock_set &&
- !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
- refclk, NULL, &crtc_state->dpll)) {
- drm_err(&dev_priv->drm,
- "Couldn't find PLL settings for mode!\n");
- return -EINVAL;
- }
-
- i9xx_compute_dpll(crtc, crtc_state, NULL);
-
- return 0;
-}
-
-static int chv_crtc_compute_clock(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state)
-{
- int refclk = 100000;
- const struct intel_limit *limit = &intel_limits_chv;
- struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
-
- memset(&crtc_state->dpll_hw_state, 0,
- sizeof(crtc_state->dpll_hw_state));
-
- if (!crtc_state->clock_set &&
- !chv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
- refclk, NULL, &crtc_state->dpll)) {
- drm_err(&i915->drm, "Couldn't find PLL settings for mode!\n");
- return -EINVAL;
- }
-
- chv_compute_dpll(crtc, crtc_state);
-
- return 0;
-}
-
-static int vlv_crtc_compute_clock(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state)
-{
- int refclk = 100000;
- const struct intel_limit *limit = &intel_limits_vlv;
- struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
-
- memset(&crtc_state->dpll_hw_state, 0,
- sizeof(crtc_state->dpll_hw_state));
-
- if (!crtc_state->clock_set &&
- !vlv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
- refclk, NULL, &crtc_state->dpll)) {
- drm_err(&i915->drm, "Couldn't find PLL settings for mode!\n");
- return -EINVAL;
- }
-
- vlv_compute_dpll(crtc, crtc_state);
-
- return 0;
-}
-
static bool i9xx_has_pfit(struct drm_i915_private *dev_priv)
{
if (IS_I830(dev_priv))
@@ -10316,7 +8044,7 @@ static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state)
val |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
- val |= PIPECONF_FRAME_START_DELAY(0);
+ val |= PIPECONF_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
intel_de_write(dev_priv, PIPECONF(pipe), val);
intel_de_posting_read(dev_priv, PIPECONF(pipe));
@@ -10424,172 +8152,6 @@ int ilk_get_lanes_required(int target_clock, int link_bw, int bpp)
return DIV_ROUND_UP(bps, link_bw * 8);
}
-static bool ilk_needs_fb_cb_tune(struct dpll *dpll, int factor)
-{
- return i9xx_dpll_compute_m(dpll) < factor * dpll->n;
-}
-
-static void ilk_compute_dpll(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state,
- struct dpll *reduced_clock)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- u32 dpll, fp, fp2;
- int factor;
-
- /* Enable autotuning of the PLL clock (if permissible) */
- factor = 21;
- if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
- if ((intel_panel_use_ssc(dev_priv) &&
- dev_priv->vbt.lvds_ssc_freq == 100000) ||
- (HAS_PCH_IBX(dev_priv) &&
- intel_is_dual_link_lvds(dev_priv)))
- factor = 25;
- } else if (crtc_state->sdvo_tv_clock) {
- factor = 20;
- }
-
- fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
-
- if (ilk_needs_fb_cb_tune(&crtc_state->dpll, factor))
- fp |= FP_CB_TUNE;
-
- if (reduced_clock) {
- fp2 = i9xx_dpll_compute_fp(reduced_clock);
-
- if (reduced_clock->m < factor * reduced_clock->n)
- fp2 |= FP_CB_TUNE;
- } else {
- fp2 = fp;
- }
-
- dpll = 0;
-
- if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
- dpll |= DPLLB_MODE_LVDS;
- else
- dpll |= DPLLB_MODE_DAC_SERIAL;
-
- dpll |= (crtc_state->pixel_multiplier - 1)
- << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
-
- if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
- intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
- dpll |= DPLL_SDVO_HIGH_SPEED;
-
- if (intel_crtc_has_dp_encoder(crtc_state))
- dpll |= DPLL_SDVO_HIGH_SPEED;
-
- /*
- * The high speed IO clock is only really required for
- * SDVO/HDMI/DP, but we also enable it for CRT to make it
- * possible to share the DPLL between CRT and HDMI. Enabling
- * the clock needlessly does no real harm, except use up a
- * bit of power potentially.
- *
- * We'll limit this to IVB with 3 pipes, since it has only two
- * DPLLs and so DPLL sharing is the only way to get three pipes
- * driving PCH ports at the same time. On SNB we could do this,
- * and potentially avoid enabling the second DPLL, but it's not
- * clear if it''s a win or loss power wise. No point in doing
- * this on ILK at all since it has a fixed DPLL<->pipe mapping.
- */
- if (INTEL_NUM_PIPES(dev_priv) == 3 &&
- intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
- dpll |= DPLL_SDVO_HIGH_SPEED;
-
- /* compute bitmask from p1 value */
- dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
- /* also FPA1 */
- dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
-
- switch (crtc_state->dpll.p2) {
- case 5:
- dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
- break;
- case 7:
- dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
- break;
- case 10:
- dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
- break;
- case 14:
- dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
- break;
- }
-
- if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
- intel_panel_use_ssc(dev_priv))
- dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
- else
- dpll |= PLL_REF_INPUT_DREFCLK;
-
- dpll |= DPLL_VCO_ENABLE;
-
- crtc_state->dpll_hw_state.dpll = dpll;
- crtc_state->dpll_hw_state.fp0 = fp;
- crtc_state->dpll_hw_state.fp1 = fp2;
-}
-
-static int ilk_crtc_compute_clock(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- struct intel_atomic_state *state =
- to_intel_atomic_state(crtc_state->uapi.state);
- const struct intel_limit *limit;
- int refclk = 120000;
-
- memset(&crtc_state->dpll_hw_state, 0,
- sizeof(crtc_state->dpll_hw_state));
-
- /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
- if (!crtc_state->has_pch_encoder)
- return 0;
-
- if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
- if (intel_panel_use_ssc(dev_priv)) {
- drm_dbg_kms(&dev_priv->drm,
- "using SSC reference clock of %d kHz\n",
- dev_priv->vbt.lvds_ssc_freq);
- refclk = dev_priv->vbt.lvds_ssc_freq;
- }
-
- if (intel_is_dual_link_lvds(dev_priv)) {
- if (refclk == 100000)
- limit = &ilk_limits_dual_lvds_100m;
- else
- limit = &ilk_limits_dual_lvds;
- } else {
- if (refclk == 100000)
- limit = &ilk_limits_single_lvds_100m;
- else
- limit = &ilk_limits_single_lvds;
- }
- } else {
- limit = &ilk_limits_dac;
- }
-
- if (!crtc_state->clock_set &&
- !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
- refclk, NULL, &crtc_state->dpll)) {
- drm_err(&dev_priv->drm,
- "Couldn't find PLL settings for mode!\n");
- return -EINVAL;
- }
-
- ilk_compute_dpll(crtc, crtc_state, NULL);
-
- if (!intel_reserve_shared_dplls(state, crtc, NULL)) {
- drm_dbg_kms(&dev_priv->drm,
- "failed to find PLL for pipe %c\n",
- pipe_name(crtc->pipe));
- return -EINVAL;
- }
-
- return 0;
-}
-
static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc,
struct intel_link_m_n *m_n)
{
@@ -11002,29 +8564,6 @@ out:
return ret;
}
-static int hsw_crtc_compute_clock(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- struct intel_atomic_state *state =
- to_intel_atomic_state(crtc_state->uapi.state);
-
- if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI) ||
- INTEL_GEN(dev_priv) >= 11) {
- struct intel_encoder *encoder =
- intel_get_crtc_new_encoder(state, crtc_state);
-
- if (!intel_reserve_shared_dplls(state, crtc, encoder)) {
- drm_dbg_kms(&dev_priv->drm,
- "failed to find PLL for pipe %c\n",
- pipe_name(crtc->pipe));
- return -EINVAL;
- }
- }
-
- return 0;
-}
-
static void dg1_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port,
struct intel_crtc_state *pipe_config)
{
@@ -11226,16 +8765,13 @@ static void hsw_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port,
static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config,
- u64 *power_domain_mask,
- intel_wakeref_t *wakerefs)
+ struct intel_display_power_domain_set *power_domain_set)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- enum intel_display_power_domain power_domain;
unsigned long panel_transcoder_mask = BIT(TRANSCODER_EDP);
unsigned long enabled_panel_transcoders = 0;
enum transcoder panel_transcoder;
- intel_wakeref_t wf;
u32 tmp;
if (INTEL_GEN(dev_priv) >= 11)
@@ -11306,16 +8842,10 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
drm_WARN_ON(dev, (enabled_panel_transcoders & BIT(TRANSCODER_EDP)) &&
enabled_panel_transcoders != BIT(TRANSCODER_EDP));
- power_domain = POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder);
- drm_WARN_ON(dev, *power_domain_mask & BIT_ULL(power_domain));
-
- wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
- if (!wf)
+ if (!intel_display_power_get_in_set_if_enabled(dev_priv, power_domain_set,
+ POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder)))
return false;
- wakerefs[power_domain] = wf;
- *power_domain_mask |= BIT_ULL(power_domain);
-
tmp = intel_de_read(dev_priv, PIPECONF(pipe_config->cpu_transcoder));
return tmp & PIPECONF_ENABLE;
@@ -11323,14 +8853,11 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config,
- u64 *power_domain_mask,
- intel_wakeref_t *wakerefs)
+ struct intel_display_power_domain_set *power_domain_set)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- enum intel_display_power_domain power_domain;
enum transcoder cpu_transcoder;
- intel_wakeref_t wf;
enum port port;
u32 tmp;
@@ -11340,16 +8867,10 @@ static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
else
cpu_transcoder = TRANSCODER_DSI_C;
- power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
- drm_WARN_ON(dev, *power_domain_mask & BIT_ULL(power_domain));
-
- wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
- if (!wf)
+ if (!intel_display_power_get_in_set_if_enabled(dev_priv, power_domain_set,
+ POWER_DOMAIN_TRANSCODER(cpu_transcoder)))
continue;
- wakerefs[power_domain] = wf;
- *power_domain_mask |= BIT_ULL(power_domain);
-
/*
* The PLL needs to be enabled with a valid divider
* configuration, otherwise accessing DSI registers will hang
@@ -11432,30 +8953,20 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- intel_wakeref_t wakerefs[POWER_DOMAIN_NUM], wf;
- enum intel_display_power_domain power_domain;
- u64 power_domain_mask;
+ struct intel_display_power_domain_set power_domain_set = { };
bool active;
u32 tmp;
- pipe_config->master_transcoder = INVALID_TRANSCODER;
-
- power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
- wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
- if (!wf)
+ if (!intel_display_power_get_in_set_if_enabled(dev_priv, &power_domain_set,
+ POWER_DOMAIN_PIPE(crtc->pipe)))
return false;
- wakerefs[power_domain] = wf;
- power_domain_mask = BIT_ULL(power_domain);
-
pipe_config->shared_dpll = NULL;
- active = hsw_get_transcoder_state(crtc, pipe_config,
- &power_domain_mask, wakerefs);
+ active = hsw_get_transcoder_state(crtc, pipe_config, &power_domain_set);
if (IS_GEN9_LP(dev_priv) &&
- bxt_get_dsi_transcoder_state(crtc, pipe_config,
- &power_domain_mask, wakerefs)) {
+ bxt_get_dsi_transcoder_state(crtc, pipe_config, &power_domain_set)) {
drm_WARN_ON(&dev_priv->drm, active);
active = true;
}
@@ -11478,6 +8989,9 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc,
intel_get_transcoder_timings(crtc, pipe_config);
}
+ if (HAS_VRR(dev_priv) && !transcoder_is_dsi(pipe_config->cpu_transcoder))
+ intel_vrr_get_config(crtc, pipe_config);
+
intel_get_pipe_src_size(crtc, pipe_config);
if (IS_HASWELL(dev_priv)) {
@@ -11519,14 +9033,8 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc,
pipe_config->ips_linetime =
REG_FIELD_GET(HSW_IPS_LINETIME_MASK, tmp);
- power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
- drm_WARN_ON(&dev_priv->drm, power_domain_mask & BIT_ULL(power_domain));
-
- wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
- if (wf) {
- wakerefs[power_domain] = wf;
- power_domain_mask |= BIT_ULL(power_domain);
-
+ if (intel_display_power_get_in_set_if_enabled(dev_priv, &power_domain_set,
+ POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe))) {
if (INTEL_GEN(dev_priv) >= 9)
skl_get_pfit_config(pipe_config);
else
@@ -11560,9 +9068,7 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc,
}
out:
- for_each_power_domain(power_domain, power_domain_mask)
- intel_display_power_put(dev_priv,
- power_domain, wakerefs[power_domain]);
+ intel_display_power_put_all_in_set(dev_priv, &power_domain_set);
return active;
}
@@ -11582,569 +9088,6 @@ static bool intel_crtc_get_pipe_config(struct intel_crtc_state *crtc_state)
return true;
}
-static u32 intel_cursor_base(const struct intel_plane_state *plane_state)
-{
- struct drm_i915_private *dev_priv =
- to_i915(plane_state->uapi.plane->dev);
- const struct drm_framebuffer *fb = plane_state->hw.fb;
- const struct drm_i915_gem_object *obj = intel_fb_obj(fb);
- u32 base;
-
- if (INTEL_INFO(dev_priv)->display.cursor_needs_physical)
- base = sg_dma_address(obj->mm.pages->sgl);
- else
- base = intel_plane_ggtt_offset(plane_state);
-
- return base + plane_state->color_plane[0].offset;
-}
-
-static u32 intel_cursor_position(const struct intel_plane_state *plane_state)
-{
- int x = plane_state->uapi.dst.x1;
- int y = plane_state->uapi.dst.y1;
- u32 pos = 0;
-
- if (x < 0) {
- pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
- x = -x;
- }
- pos |= x << CURSOR_X_SHIFT;
-
- if (y < 0) {
- pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
- y = -y;
- }
- pos |= y << CURSOR_Y_SHIFT;
-
- return pos;
-}
-
-static bool intel_cursor_size_ok(const struct intel_plane_state *plane_state)
-{
- const struct drm_mode_config *config =
- &plane_state->uapi.plane->dev->mode_config;
- int width = drm_rect_width(&plane_state->uapi.dst);
- int height = drm_rect_height(&plane_state->uapi.dst);
-
- return width > 0 && width <= config->cursor_width &&
- height > 0 && height <= config->cursor_height;
-}
-
-static int intel_cursor_check_surface(struct intel_plane_state *plane_state)
-{
- struct drm_i915_private *dev_priv =
- to_i915(plane_state->uapi.plane->dev);
- unsigned int rotation = plane_state->hw.rotation;
- int src_x, src_y;
- u32 offset;
- int ret;
-
- ret = intel_plane_compute_gtt(plane_state);
- if (ret)
- return ret;
-
- if (!plane_state->uapi.visible)
- return 0;
-
- src_x = plane_state->uapi.src.x1 >> 16;
- src_y = plane_state->uapi.src.y1 >> 16;
-
- intel_add_fb_offsets(&src_x, &src_y, plane_state, 0);
- offset = intel_plane_compute_aligned_offset(&src_x, &src_y,
- plane_state, 0);
-
- if (src_x != 0 || src_y != 0) {
- drm_dbg_kms(&dev_priv->drm,
- "Arbitrary cursor panning not supported\n");
- return -EINVAL;
- }
-
- /*
- * Put the final coordinates back so that the src
- * coordinate checks will see the right values.
- */
- drm_rect_translate_to(&plane_state->uapi.src,
- src_x << 16, src_y << 16);
-
- /* ILK+ do this automagically in hardware */
- if (HAS_GMCH(dev_priv) && rotation & DRM_MODE_ROTATE_180) {
- const struct drm_framebuffer *fb = plane_state->hw.fb;
- int src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
- int src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
-
- offset += (src_h * src_w - 1) * fb->format->cpp[0];
- }
-
- plane_state->color_plane[0].offset = offset;
- plane_state->color_plane[0].x = src_x;
- plane_state->color_plane[0].y = src_y;
-
- return 0;
-}
-
-static int intel_check_cursor(struct intel_crtc_state *crtc_state,
- struct intel_plane_state *plane_state)
-{
- const struct drm_framebuffer *fb = plane_state->hw.fb;
- struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
- const struct drm_rect src = plane_state->uapi.src;
- const struct drm_rect dst = plane_state->uapi.dst;
- int ret;
-
- if (fb && fb->modifier != DRM_FORMAT_MOD_LINEAR) {
- drm_dbg_kms(&i915->drm, "cursor cannot be tiled\n");
- return -EINVAL;
- }
-
- ret = intel_atomic_plane_check_clipping(plane_state, crtc_state,
- DRM_PLANE_HELPER_NO_SCALING,
- DRM_PLANE_HELPER_NO_SCALING,
- true);
- if (ret)
- return ret;
-
- /* Use the unclipped src/dst rectangles, which we program to hw */
- plane_state->uapi.src = src;
- plane_state->uapi.dst = dst;
-
- ret = intel_cursor_check_surface(plane_state);
- if (ret)
- return ret;
-
- if (!plane_state->uapi.visible)
- return 0;
-
- ret = intel_plane_check_src_coordinates(plane_state);
- if (ret)
- return ret;
-
- return 0;
-}
-
-static unsigned int
-i845_cursor_max_stride(struct intel_plane *plane,
- u32 pixel_format, u64 modifier,
- unsigned int rotation)
-{
- return 2048;
-}
-
-static u32 i845_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state)
-{
- u32 cntl = 0;
-
- if (crtc_state->gamma_enable)
- cntl |= CURSOR_GAMMA_ENABLE;
-
- return cntl;
-}
-
-static u32 i845_cursor_ctl(const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
-{
- return CURSOR_ENABLE |
- CURSOR_FORMAT_ARGB |
- CURSOR_STRIDE(plane_state->color_plane[0].stride);
-}
-
-static bool i845_cursor_size_ok(const struct intel_plane_state *plane_state)
-{
- int width = drm_rect_width(&plane_state->uapi.dst);
-
- /*
- * 845g/865g are only limited by the width of their cursors,
- * the height is arbitrary up to the precision of the register.
- */
- return intel_cursor_size_ok(plane_state) && IS_ALIGNED(width, 64);
-}
-
-static int i845_check_cursor(struct intel_crtc_state *crtc_state,
- struct intel_plane_state *plane_state)
-{
- const struct drm_framebuffer *fb = plane_state->hw.fb;
- struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
- int ret;
-
- ret = intel_check_cursor(crtc_state, plane_state);
- if (ret)
- return ret;
-
- /* if we want to turn off the cursor ignore width and height */
- if (!fb)
- return 0;
-
- /* Check for which cursor types we support */
- if (!i845_cursor_size_ok(plane_state)) {
- drm_dbg_kms(&i915->drm,
- "Cursor dimension %dx%d not supported\n",
- drm_rect_width(&plane_state->uapi.dst),
- drm_rect_height(&plane_state->uapi.dst));
- return -EINVAL;
- }
-
- drm_WARN_ON(&i915->drm, plane_state->uapi.visible &&
- plane_state->color_plane[0].stride != fb->pitches[0]);
-
- switch (fb->pitches[0]) {
- case 256:
- case 512:
- case 1024:
- case 2048:
- break;
- default:
- drm_dbg_kms(&i915->drm, "Invalid cursor stride (%u)\n",
- fb->pitches[0]);
- return -EINVAL;
- }
-
- plane_state->ctl = i845_cursor_ctl(crtc_state, plane_state);
-
- return 0;
-}
-
-static void i845_update_cursor(struct intel_plane *plane,
- const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
-{
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- u32 cntl = 0, base = 0, pos = 0, size = 0;
- unsigned long irqflags;
-
- if (plane_state && plane_state->uapi.visible) {
- unsigned int width = drm_rect_width(&plane_state->uapi.dst);
- unsigned int height = drm_rect_height(&plane_state->uapi.dst);
-
- cntl = plane_state->ctl |
- i845_cursor_ctl_crtc(crtc_state);
-
- size = (height << 12) | width;
-
- base = intel_cursor_base(plane_state);
- pos = intel_cursor_position(plane_state);
- }
-
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
-
- /* On these chipsets we can only modify the base/size/stride
- * whilst the cursor is disabled.
- */
- if (plane->cursor.base != base ||
- plane->cursor.size != size ||
- plane->cursor.cntl != cntl) {
- intel_de_write_fw(dev_priv, CURCNTR(PIPE_A), 0);
- intel_de_write_fw(dev_priv, CURBASE(PIPE_A), base);
- intel_de_write_fw(dev_priv, CURSIZE, size);
- intel_de_write_fw(dev_priv, CURPOS(PIPE_A), pos);
- intel_de_write_fw(dev_priv, CURCNTR(PIPE_A), cntl);
-
- plane->cursor.base = base;
- plane->cursor.size = size;
- plane->cursor.cntl = cntl;
- } else {
- intel_de_write_fw(dev_priv, CURPOS(PIPE_A), pos);
- }
-
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
-}
-
-static void i845_disable_cursor(struct intel_plane *plane,
- const struct intel_crtc_state *crtc_state)
-{
- i845_update_cursor(plane, crtc_state, NULL);
-}
-
-static bool i845_cursor_get_hw_state(struct intel_plane *plane,
- enum pipe *pipe)
-{
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- enum intel_display_power_domain power_domain;
- intel_wakeref_t wakeref;
- bool ret;
-
- power_domain = POWER_DOMAIN_PIPE(PIPE_A);
- wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
- if (!wakeref)
- return false;
-
- ret = intel_de_read(dev_priv, CURCNTR(PIPE_A)) & CURSOR_ENABLE;
-
- *pipe = PIPE_A;
-
- intel_display_power_put(dev_priv, power_domain, wakeref);
-
- return ret;
-}
-
-static unsigned int
-i9xx_cursor_max_stride(struct intel_plane *plane,
- u32 pixel_format, u64 modifier,
- unsigned int rotation)
-{
- return plane->base.dev->mode_config.cursor_width * 4;
-}
-
-static u32 i9xx_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state)
-{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- u32 cntl = 0;
-
- if (INTEL_GEN(dev_priv) >= 11)
- return cntl;
-
- if (crtc_state->gamma_enable)
- cntl = MCURSOR_GAMMA_ENABLE;
-
- if (crtc_state->csc_enable)
- cntl |= MCURSOR_PIPE_CSC_ENABLE;
-
- if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
- cntl |= MCURSOR_PIPE_SELECT(crtc->pipe);
-
- return cntl;
-}
-
-static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
-{
- struct drm_i915_private *dev_priv =
- to_i915(plane_state->uapi.plane->dev);
- u32 cntl = 0;
-
- if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
- cntl |= MCURSOR_TRICKLE_FEED_DISABLE;
-
- switch (drm_rect_width(&plane_state->uapi.dst)) {
- case 64:
- cntl |= MCURSOR_MODE_64_ARGB_AX;
- break;
- case 128:
- cntl |= MCURSOR_MODE_128_ARGB_AX;
- break;
- case 256:
- cntl |= MCURSOR_MODE_256_ARGB_AX;
- break;
- default:
- MISSING_CASE(drm_rect_width(&plane_state->uapi.dst));
- return 0;
- }
-
- if (plane_state->hw.rotation & DRM_MODE_ROTATE_180)
- cntl |= MCURSOR_ROTATE_180;
-
- return cntl;
-}
-
-static bool i9xx_cursor_size_ok(const struct intel_plane_state *plane_state)
-{
- struct drm_i915_private *dev_priv =
- to_i915(plane_state->uapi.plane->dev);
- int width = drm_rect_width(&plane_state->uapi.dst);
- int height = drm_rect_height(&plane_state->uapi.dst);
-
- if (!intel_cursor_size_ok(plane_state))
- return false;
-
- /* Cursor width is limited to a few power-of-two sizes */
- switch (width) {
- case 256:
- case 128:
- case 64:
- break;
- default:
- return false;
- }
-
- /*
- * IVB+ have CUR_FBC_CTL which allows an arbitrary cursor
- * height from 8 lines up to the cursor width, when the
- * cursor is not rotated. Everything else requires square
- * cursors.
- */
- if (HAS_CUR_FBC(dev_priv) &&
- plane_state->hw.rotation & DRM_MODE_ROTATE_0) {
- if (height < 8 || height > width)
- return false;
- } else {
- if (height != width)
- return false;
- }
-
- return true;
-}
-
-static int i9xx_check_cursor(struct intel_crtc_state *crtc_state,
- struct intel_plane_state *plane_state)
-{
- struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- const struct drm_framebuffer *fb = plane_state->hw.fb;
- enum pipe pipe = plane->pipe;
- int ret;
-
- ret = intel_check_cursor(crtc_state, plane_state);
- if (ret)
- return ret;
-
- /* if we want to turn off the cursor ignore width and height */
- if (!fb)
- return 0;
-
- /* Check for which cursor types we support */
- if (!i9xx_cursor_size_ok(plane_state)) {
- drm_dbg(&dev_priv->drm,
- "Cursor dimension %dx%d not supported\n",
- drm_rect_width(&plane_state->uapi.dst),
- drm_rect_height(&plane_state->uapi.dst));
- return -EINVAL;
- }
-
- drm_WARN_ON(&dev_priv->drm, plane_state->uapi.visible &&
- plane_state->color_plane[0].stride != fb->pitches[0]);
-
- if (fb->pitches[0] !=
- drm_rect_width(&plane_state->uapi.dst) * fb->format->cpp[0]) {
- drm_dbg_kms(&dev_priv->drm,
- "Invalid cursor stride (%u) (cursor width %d)\n",
- fb->pitches[0],
- drm_rect_width(&plane_state->uapi.dst));
- return -EINVAL;
- }
-
- /*
- * There's something wrong with the cursor on CHV pipe C.
- * If it straddles the left edge of the screen then
- * moving it away from the edge or disabling it often
- * results in a pipe underrun, and often that can lead to
- * dead pipe (constant underrun reported, and it scans
- * out just a solid color). To recover from that, the
- * display power well must be turned off and on again.
- * Refuse the put the cursor into that compromised position.
- */
- if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_C &&
- plane_state->uapi.visible && plane_state->uapi.dst.x1 < 0) {
- drm_dbg_kms(&dev_priv->drm,
- "CHV cursor C not allowed to straddle the left screen edge\n");
- return -EINVAL;
- }
-
- plane_state->ctl = i9xx_cursor_ctl(crtc_state, plane_state);
-
- return 0;
-}
-
-static void i9xx_update_cursor(struct intel_plane *plane,
- const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
-{
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- enum pipe pipe = plane->pipe;
- u32 cntl = 0, base = 0, pos = 0, fbc_ctl = 0;
- unsigned long irqflags;
-
- if (plane_state && plane_state->uapi.visible) {
- unsigned width = drm_rect_width(&plane_state->uapi.dst);
- unsigned height = drm_rect_height(&plane_state->uapi.dst);
-
- cntl = plane_state->ctl |
- i9xx_cursor_ctl_crtc(crtc_state);
-
- if (width != height)
- fbc_ctl = CUR_FBC_CTL_EN | (height - 1);
-
- base = intel_cursor_base(plane_state);
- pos = intel_cursor_position(plane_state);
- }
-
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
-
- /*
- * On some platforms writing CURCNTR first will also
- * cause CURPOS to be armed by the CURBASE write.
- * Without the CURCNTR write the CURPOS write would
- * arm itself. Thus we always update CURCNTR before
- * CURPOS.
- *
- * On other platforms CURPOS always requires the
- * CURBASE write to arm the update. Additonally
- * a write to any of the cursor register will cancel
- * an already armed cursor update. Thus leaving out
- * the CURBASE write after CURPOS could lead to a
- * cursor that doesn't appear to move, or even change
- * shape. Thus we always write CURBASE.
- *
- * The other registers are armed by by the CURBASE write
- * except when the plane is getting enabled at which time
- * the CURCNTR write arms the update.
- */
-
- if (INTEL_GEN(dev_priv) >= 9)
- skl_write_cursor_wm(plane, crtc_state);
-
- if (!needs_modeset(crtc_state))
- intel_psr2_program_plane_sel_fetch(plane, crtc_state, plane_state, 0);
-
- if (plane->cursor.base != base ||
- plane->cursor.size != fbc_ctl ||
- plane->cursor.cntl != cntl) {
- if (HAS_CUR_FBC(dev_priv))
- intel_de_write_fw(dev_priv, CUR_FBC_CTL(pipe),
- fbc_ctl);
- intel_de_write_fw(dev_priv, CURCNTR(pipe), cntl);
- intel_de_write_fw(dev_priv, CURPOS(pipe), pos);
- intel_de_write_fw(dev_priv, CURBASE(pipe), base);
-
- plane->cursor.base = base;
- plane->cursor.size = fbc_ctl;
- plane->cursor.cntl = cntl;
- } else {
- intel_de_write_fw(dev_priv, CURPOS(pipe), pos);
- intel_de_write_fw(dev_priv, CURBASE(pipe), base);
- }
-
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
-}
-
-static void i9xx_disable_cursor(struct intel_plane *plane,
- const struct intel_crtc_state *crtc_state)
-{
- i9xx_update_cursor(plane, crtc_state, NULL);
-}
-
-static bool i9xx_cursor_get_hw_state(struct intel_plane *plane,
- enum pipe *pipe)
-{
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- enum intel_display_power_domain power_domain;
- intel_wakeref_t wakeref;
- bool ret;
- u32 val;
-
- /*
- * Not 100% correct for planes that can move between pipes,
- * but that's only the case for gen2-3 which don't have any
- * display power wells.
- */
- power_domain = POWER_DOMAIN_PIPE(plane->pipe);
- wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
- if (!wakeref)
- return false;
-
- val = intel_de_read(dev_priv, CURCNTR(plane->pipe));
-
- ret = val & MCURSOR_MODE;
-
- if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
- *pipe = plane->pipe;
- else
- *pipe = (val & MCURSOR_PIPE_SELECT_MASK) >>
- MCURSOR_PIPE_SELECT_SHIFT;
-
- intel_display_power_put(dev_priv, power_domain, wakeref);
-
- return ret;
-}
-
/* VESA 640x480x72Hz mode to set on the pipe */
static const struct drm_display_mode load_detect_mode = {
DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
@@ -12527,33 +9470,6 @@ static void ilk_pch_clock_get(struct intel_crtc *crtc,
&pipe_config->fdi_m_n);
}
-static void intel_crtc_state_reset(struct intel_crtc_state *crtc_state,
- struct intel_crtc *crtc)
-{
- memset(crtc_state, 0, sizeof(*crtc_state));
-
- __drm_atomic_helper_crtc_state_reset(&crtc_state->uapi, &crtc->base);
-
- crtc_state->cpu_transcoder = INVALID_TRANSCODER;
- crtc_state->master_transcoder = INVALID_TRANSCODER;
- crtc_state->hsw_workaround_pipe = INVALID_PIPE;
- crtc_state->output_format = INTEL_OUTPUT_FORMAT_INVALID;
- crtc_state->scaler_state.scaler_id = -1;
- crtc_state->mst_master_transcoder = INVALID_TRANSCODER;
-}
-
-static struct intel_crtc_state *intel_crtc_state_alloc(struct intel_crtc *crtc)
-{
- struct intel_crtc_state *crtc_state;
-
- crtc_state = kmalloc(sizeof(*crtc_state), GFP_KERNEL);
-
- if (crtc_state)
- intel_crtc_state_reset(crtc_state, crtc);
-
- return crtc_state;
-}
-
/* Returns the currently programmed mode of the given encoder. */
struct drm_display_mode *
intel_encoder_current_mode(struct intel_encoder *encoder)
@@ -12594,14 +9510,6 @@ intel_encoder_current_mode(struct intel_encoder *encoder)
return mode;
}
-static void intel_crtc_destroy(struct drm_crtc *crtc)
-{
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-
- drm_crtc_cleanup(crtc);
- kfree(intel_crtc);
-}
-
/**
* intel_wm_need_update - Check whether watermarks need updating
* @cur: current plane state
@@ -12651,7 +9559,7 @@ int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_stat
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- bool mode_changed = needs_modeset(crtc_state);
+ bool mode_changed = intel_crtc_needs_modeset(crtc_state);
bool was_crtc_enabled = old_crtc_state->hw.active;
bool is_crtc_enabled = crtc_state->hw.active;
bool turn_off, turn_on, visible, was_visible;
@@ -12842,6 +9750,7 @@ static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
plane_state->planar_linked_plane = NULL;
if (plane_state->planar_slave && !plane_state->uapi.visible) {
+ crtc_state->enabled_planes &= ~BIT(plane->id);
crtc_state->active_planes &= ~BIT(plane->id);
crtc_state->update_planes |= BIT(plane->id);
}
@@ -12885,6 +9794,7 @@ static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
linked_state->planar_slave = true;
linked_state->planar_linked_plane = plane;
+ crtc_state->enabled_planes |= BIT(linked->id);
crtc_state->active_planes |= BIT(linked->id);
crtc_state->update_planes |= BIT(linked->id);
drm_dbg_kms(&dev_priv->drm, "Using %s as Y plane for %s\n",
@@ -13013,7 +9923,7 @@ static int intel_crtc_atomic_check(struct intel_atomic_state *state,
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- bool mode_changed = needs_modeset(crtc_state);
+ bool mode_changed = intel_crtc_needs_modeset(crtc_state);
int ret;
if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv) &&
@@ -13428,6 +10338,13 @@ static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config,
intel_hdmi_infoframe_enable(DP_SDP_VSC))
intel_dump_dp_vsc_sdp(dev_priv, &pipe_config->infoframes.vsc);
+ drm_dbg_kms(&dev_priv->drm, "vrr: %s, vmin: %d, vmax: %d, pipeline full: %d, flipline: %d, vmin vblank: %d, vmax vblank: %d\n",
+ yesno(pipe_config->vrr.enable),
+ pipe_config->vrr.vmin, pipe_config->vrr.vmax,
+ pipe_config->vrr.pipeline_full, pipe_config->vrr.flipline,
+ intel_vrr_vmin_vblank_start(pipe_config),
+ intel_vrr_vmax_vblank_start(pipe_config));
+
drm_dbg_kms(&dev_priv->drm, "requested mode:\n");
drm_mode_debug_printmodeline(&pipe_config->hw.mode);
drm_dbg_kms(&dev_priv->drm, "adjusted mode:\n");
@@ -13815,7 +10732,7 @@ encoder_retry:
return ret;
}
- if (ret == RETRY) {
+ if (ret == I915_DISPLAY_CONFIG_RETRY) {
if (drm_WARN(&i915->drm, !retry,
"loop in pipe configuration computation\n"))
return -EINVAL;
@@ -14421,6 +11338,12 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_I(mst_master_transcoder);
+ PIPE_CONF_CHECK_BOOL(vrr.enable);
+ PIPE_CONF_CHECK_I(vrr.vmin);
+ PIPE_CONF_CHECK_I(vrr.vmax);
+ PIPE_CONF_CHECK_I(vrr.flipline);
+ PIPE_CONF_CHECK_I(vrr.pipeline_full);
+
#undef PIPE_CONF_CHECK_X
#undef PIPE_CONF_CHECK_I
#undef PIPE_CONF_CHECK_BOOL
@@ -14845,7 +11768,7 @@ intel_modeset_verify_crtc(struct intel_crtc *crtc,
struct intel_crtc_state *old_crtc_state,
struct intel_crtc_state *new_crtc_state)
{
- if (!needs_modeset(new_crtc_state) && !new_crtc_state->update_pipe)
+ if (!intel_crtc_needs_modeset(new_crtc_state) && !new_crtc_state->update_pipe)
return;
verify_wm_state(crtc, new_crtc_state);
@@ -14879,10 +11802,17 @@ intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- const struct drm_display_mode *adjusted_mode =
- &crtc_state->hw.adjusted_mode;
+ struct drm_display_mode adjusted_mode =
+ crtc_state->hw.adjusted_mode;
+
+ if (crtc_state->vrr.enable) {
+ adjusted_mode.crtc_vtotal = crtc_state->vrr.vmax;
+ adjusted_mode.crtc_vblank_end = crtc_state->vrr.vmax;
+ adjusted_mode.crtc_vblank_start = intel_vrr_vmin_vblank_start(crtc_state);
+ crtc->vmax_vblank_start = intel_vrr_vmax_vblank_start(crtc_state);
+ }
- drm_calc_timestamping_constants(&crtc->base, adjusted_mode);
+ drm_calc_timestamping_constants(&crtc->base, &adjusted_mode);
crtc->mode_flags = crtc_state->mode_flags;
@@ -14916,8 +11846,8 @@ intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state)
if (IS_GEN(dev_priv, 2)) {
int vtotal;
- vtotal = adjusted_mode->crtc_vtotal;
- if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
+ vtotal = adjusted_mode.crtc_vtotal;
+ if (adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
vtotal /= 2;
crtc->scanline_offset = vtotal - 1;
@@ -14940,7 +11870,7 @@ static void intel_modeset_clear_plls(struct intel_atomic_state *state)
return;
for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
- if (!needs_modeset(new_crtc_state))
+ if (!intel_crtc_needs_modeset(new_crtc_state))
continue;
intel_release_shared_dplls(state, crtc);
@@ -14965,7 +11895,7 @@ static int hsw_mode_set_planes_workaround(struct intel_atomic_state *state)
/* look at all crtc's that are going to be enabled in during modeset */
for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
if (!crtc_state->hw.active ||
- !needs_modeset(crtc_state))
+ !intel_crtc_needs_modeset(crtc_state))
continue;
if (first_crtc_state) {
@@ -14990,7 +11920,7 @@ static int hsw_mode_set_planes_workaround(struct intel_atomic_state *state)
crtc_state->hsw_workaround_pipe = INVALID_PIPE;
if (!crtc_state->hw.active ||
- needs_modeset(crtc_state))
+ intel_crtc_needs_modeset(crtc_state))
continue;
/* 2 or more enabled crtcs means no need for w/a */
@@ -15102,6 +12032,19 @@ static int intel_crtc_add_planes_to_state(struct intel_atomic_state *state,
return 0;
}
+int intel_atomic_add_affected_planes(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+ const struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+
+ return intel_crtc_add_planes_to_state(state, crtc,
+ old_crtc_state->enabled_planes |
+ new_crtc_state->enabled_planes);
+}
+
static bool active_planes_affects_min_cdclk(struct drm_i915_private *dev_priv)
{
/* See {hsw,vlv,ivb}_plane_ratio() */
@@ -15296,7 +12239,7 @@ static bool intel_cpu_transcoders_need_modeset(struct intel_atomic_state *state,
for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
if (new_crtc_state->hw.enable &&
transcoders & BIT(new_crtc_state->cpu_transcoder) &&
- needs_modeset(new_crtc_state))
+ intel_crtc_needs_modeset(new_crtc_state))
return true;
}
@@ -15317,7 +12260,7 @@ static int intel_atomic_check_bigjoiner(struct intel_atomic_state *state,
slave = crtc;
master = old_crtc_state->bigjoiner_linked_crtc;
master_crtc_state = intel_atomic_get_new_crtc_state(state, master);
- if (!master_crtc_state || !needs_modeset(master_crtc_state))
+ if (!master_crtc_state || !intel_crtc_needs_modeset(master_crtc_state))
goto claimed;
}
@@ -15355,21 +12298,16 @@ claimed:
return -EINVAL;
}
-static int kill_bigjoiner_slave(struct intel_atomic_state *state,
- struct intel_crtc_state *master_crtc_state)
+static void kill_bigjoiner_slave(struct intel_atomic_state *state,
+ struct intel_crtc_state *master_crtc_state)
{
struct intel_crtc_state *slave_crtc_state =
- intel_atomic_get_crtc_state(&state->base,
- master_crtc_state->bigjoiner_linked_crtc);
-
- if (IS_ERR(slave_crtc_state))
- return PTR_ERR(slave_crtc_state);
+ intel_atomic_get_new_crtc_state(state, master_crtc_state->bigjoiner_linked_crtc);
slave_crtc_state->bigjoiner = master_crtc_state->bigjoiner = false;
slave_crtc_state->bigjoiner_slave = master_crtc_state->bigjoiner_slave = false;
slave_crtc_state->bigjoiner_linked_crtc = master_crtc_state->bigjoiner_linked_crtc = NULL;
intel_crtc_copy_uapi_to_hw_state(state, slave_crtc_state);
- return 0;
}
/**
@@ -15382,7 +12320,7 @@ static int kill_bigjoiner_slave(struct intel_atomic_state *state,
* Async flip can only change the plane surface address, so anything else
* changing is rejected from the intel_atomic_check_async() function.
* Once this check is cleared, flip done interrupt is enabled using
- * the skl_enable_flip_done() function.
+ * the intel_crtc_enable_flip_done() function.
*
* As soon as the surface address register is written, flip done interrupt is
* generated and the requested events are sent to the usersapce in the interrupt
@@ -15401,7 +12339,7 @@ static int intel_atomic_check_async(struct intel_atomic_state *state)
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
new_crtc_state, i) {
- if (needs_modeset(new_crtc_state)) {
+ if (intel_crtc_needs_modeset(new_crtc_state)) {
drm_dbg_kms(&i915->drm, "Modeset Required. Async flip not supported\n");
return -EINVAL;
}
@@ -15426,7 +12364,7 @@ static int intel_atomic_check_async(struct intel_atomic_state *state)
* this(vlv/chv and icl+) should be added when async flip is
* enabled in the atomic IOCTL path.
*/
- if (plane->id != PLANE_PRIMARY)
+ if (!plane->async_flip)
return -EINVAL;
/*
@@ -15507,20 +12445,43 @@ static int intel_atomic_check_async(struct intel_atomic_state *state)
static int intel_bigjoiner_add_affected_crtcs(struct intel_atomic_state *state)
{
- const struct intel_crtc_state *crtc_state;
+ struct intel_crtc_state *crtc_state;
struct intel_crtc *crtc;
int i;
for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
struct intel_crtc_state *linked_crtc_state;
+ struct intel_crtc *linked_crtc;
+ int ret;
if (!crtc_state->bigjoiner)
continue;
- linked_crtc_state = intel_atomic_get_crtc_state(&state->base,
- crtc_state->bigjoiner_linked_crtc);
+ linked_crtc = crtc_state->bigjoiner_linked_crtc;
+ linked_crtc_state = intel_atomic_get_crtc_state(&state->base, linked_crtc);
if (IS_ERR(linked_crtc_state))
return PTR_ERR(linked_crtc_state);
+
+ if (!intel_crtc_needs_modeset(crtc_state))
+ continue;
+
+ linked_crtc_state->uapi.mode_changed = true;
+
+ ret = drm_atomic_add_affected_connectors(&state->base,
+ &linked_crtc->base);
+ if (ret)
+ return ret;
+
+ ret = intel_atomic_add_affected_planes(state, linked_crtc);
+ if (ret)
+ return ret;
+ }
+
+ for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
+ /* Kill old bigjoiner link, we may re-establish afterwards */
+ if (intel_crtc_needs_modeset(crtc_state) &&
+ crtc_state->bigjoiner && !crtc_state->bigjoiner_slave)
+ kill_bigjoiner_slave(state, crtc_state);
}
return 0;
@@ -15547,6 +12508,8 @@ static int intel_atomic_check(struct drm_device *dev,
new_crtc_state->uapi.mode_changed = true;
}
+ intel_vrr_check_modeset(state);
+
ret = drm_atomic_helper_check_modeset(dev, &state->base);
if (ret)
goto fail;
@@ -15557,20 +12520,13 @@ static int intel_atomic_check(struct drm_device *dev,
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
new_crtc_state, i) {
- if (!needs_modeset(new_crtc_state)) {
+ if (!intel_crtc_needs_modeset(new_crtc_state)) {
/* Light copy */
intel_crtc_copy_uapi_to_hw_state_nomodeset(state, new_crtc_state);
continue;
}
- /* Kill old bigjoiner link, we may re-establish afterwards */
- if (old_crtc_state->bigjoiner && !old_crtc_state->bigjoiner_slave) {
- ret = kill_bigjoiner_slave(state, new_crtc_state);
- if (ret)
- goto fail;
- }
-
if (!new_crtc_state->uapi.enable) {
if (!new_crtc_state->bigjoiner_slave) {
intel_crtc_copy_uapi_to_hw_state(state, new_crtc_state);
@@ -15595,7 +12551,7 @@ static int intel_atomic_check(struct drm_device *dev,
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
new_crtc_state, i) {
- if (!needs_modeset(new_crtc_state))
+ if (!intel_crtc_needs_modeset(new_crtc_state))
continue;
ret = intel_modeset_pipe_config_late(new_crtc_state);
@@ -15617,7 +12573,7 @@ static int intel_atomic_check(struct drm_device *dev,
* forced a full modeset.
*/
for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
- if (!new_crtc_state->hw.enable || needs_modeset(new_crtc_state))
+ if (!new_crtc_state->hw.enable || intel_crtc_needs_modeset(new_crtc_state))
continue;
if (intel_dp_mst_is_slave_trans(new_crtc_state)) {
@@ -15640,11 +12596,21 @@ static int intel_atomic_check(struct drm_device *dev,
new_crtc_state->update_pipe = false;
}
}
+
+ if (new_crtc_state->bigjoiner) {
+ struct intel_crtc_state *linked_crtc_state =
+ intel_atomic_get_new_crtc_state(state, new_crtc_state->bigjoiner_linked_crtc);
+
+ if (intel_crtc_needs_modeset(linked_crtc_state)) {
+ new_crtc_state->uapi.mode_changed = true;
+ new_crtc_state->update_pipe = false;
+ }
+ }
}
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
new_crtc_state, i) {
- if (needs_modeset(new_crtc_state)) {
+ if (intel_crtc_needs_modeset(new_crtc_state)) {
any_ms = true;
continue;
}
@@ -15670,20 +12636,6 @@ static int intel_atomic_check(struct drm_device *dev,
if (ret)
goto fail;
- /*
- * distrust_bios_wm will force a full dbuf recomputation
- * but the hardware state will only get updated accordingly
- * if state->modeset==true. Hence distrust_bios_wm==true &&
- * state->modeset==false is an invalid combination which
- * would cause the hardware and software dbuf state to get
- * out of sync. We must prevent that.
- *
- * FIXME clean up this mess and introduce better
- * state tracking for dbuf.
- */
- if (dev_priv->wm.distrust_bios_wm)
- any_ms = true;
-
intel_fbc_choose_crtc(dev_priv, state);
ret = calc_watermark_data(state);
if (ret)
@@ -15721,12 +12673,12 @@ static int intel_atomic_check(struct drm_device *dev,
goto fail;
}
- if (!needs_modeset(new_crtc_state) &&
+ if (!intel_crtc_needs_modeset(new_crtc_state) &&
!new_crtc_state->update_pipe)
continue;
intel_dump_pipe_config(new_crtc_state, state,
- needs_modeset(new_crtc_state) ?
+ intel_crtc_needs_modeset(new_crtc_state) ?
"[modeset]" : "[fastset]");
}
@@ -15758,7 +12710,7 @@ static int intel_atomic_prepare_commit(struct intel_atomic_state *state)
return ret;
for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
- bool mode_changed = needs_modeset(crtc_state);
+ bool mode_changed = intel_crtc_needs_modeset(crtc_state);
if (mode_changed || crtc_state->update_pipe ||
crtc_state->uapi.color_mgmt_changed) {
@@ -15769,17 +12721,6 @@ static int intel_atomic_prepare_commit(struct intel_atomic_state *state)
return 0;
}
-u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc)
-{
- struct drm_device *dev = crtc->base.dev;
- struct drm_vblank_crtc *vblank = &dev->vblank[drm_crtc_index(&crtc->base)];
-
- if (!vblank->max_vblank_count)
- return (u32)drm_crtc_accurate_vblank_count(&crtc->base);
-
- return crtc->base.funcs->get_vblank_counter(&crtc->base);
-}
-
void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state)
{
@@ -15849,7 +12790,7 @@ static void commit_pipe_config(struct intel_atomic_state *state,
intel_atomic_get_old_crtc_state(state, crtc);
const struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- bool modeset = needs_modeset(new_crtc_state);
+ bool modeset = intel_crtc_needs_modeset(new_crtc_state);
/*
* During modesets pipe configuration was programmed as the
@@ -15883,7 +12824,7 @@ static void intel_enable_crtc(struct intel_atomic_state *state,
const struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- if (!needs_modeset(new_crtc_state))
+ if (!intel_crtc_needs_modeset(new_crtc_state))
return;
intel_crtc_update_active_timings(new_crtc_state);
@@ -15905,7 +12846,7 @@ static void intel_update_crtc(struct intel_atomic_state *state,
intel_atomic_get_old_crtc_state(state, crtc);
struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- bool modeset = needs_modeset(new_crtc_state);
+ bool modeset = intel_crtc_needs_modeset(new_crtc_state);
if (!modeset) {
if (new_crtc_state->preload_luts &&
@@ -15997,7 +12938,7 @@ static void intel_commit_modeset_disables(struct intel_atomic_state *state)
/* Only disable port sync and MST slaves */
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
new_crtc_state, i) {
- if (!needs_modeset(new_crtc_state) || old_crtc_state->bigjoiner)
+ if (!intel_crtc_needs_modeset(new_crtc_state) || old_crtc_state->bigjoiner)
continue;
if (!old_crtc_state->hw.active)
@@ -16021,7 +12962,7 @@ static void intel_commit_modeset_disables(struct intel_atomic_state *state)
/* Disable everything else left on */
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
new_crtc_state, i) {
- if (!needs_modeset(new_crtc_state) ||
+ if (!intel_crtc_needs_modeset(new_crtc_state) ||
(handled & BIT(crtc->pipe)) ||
old_crtc_state->bigjoiner_slave)
continue;
@@ -16071,7 +13012,7 @@ static void skl_commit_modeset_enables(struct intel_atomic_state *state)
continue;
/* ignore allocations for crtc's that have been turned off. */
- if (!needs_modeset(new_crtc_state)) {
+ if (!intel_crtc_needs_modeset(new_crtc_state)) {
entries[pipe] = old_crtc_state->wm.skl.ddb;
update_pipes |= BIT(pipe);
} else {
@@ -16247,6 +13188,43 @@ static void intel_atomic_cleanup_work(struct work_struct *work)
intel_atomic_helper_free_state(i915);
}
+static void intel_atomic_prepare_plane_clear_colors(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_plane *plane;
+ struct intel_plane_state *plane_state;
+ int i;
+
+ for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
+ struct drm_framebuffer *fb = plane_state->hw.fb;
+ int ret;
+
+ if (!fb ||
+ fb->modifier != I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC)
+ continue;
+
+ /*
+ * The layout of the fast clear color value expected by HW
+ * (the DRM ABI requiring this value to be located in fb at offset 0 of plane#2):
+ * - 4 x 4 bytes per-channel value
+ * (in surface type specific float/int format provided by the fb user)
+ * - 8 bytes native color value used by the display
+ * (converted/written by GPU during a fast clear operation using the
+ * above per-channel values)
+ *
+ * The commit's FB prepare hook already ensured that FB obj is pinned and the
+ * caller made sure that the object is synced wrt. the related color clear value
+ * GPU write on it.
+ */
+ ret = i915_gem_object_read_from_page(intel_fb_obj(fb),
+ fb->offsets[2] + 16,
+ &plane_state->ccval,
+ sizeof(plane_state->ccval));
+ /* The above could only fail if the FB obj has an unexpected backing store type. */
+ drm_WARN_ON(&i915->drm, ret);
+ }
+}
+
static void intel_atomic_commit_tail(struct intel_atomic_state *state)
{
struct drm_device *dev = state->base.dev;
@@ -16264,9 +13242,11 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
if (state->modeset)
wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
+ intel_atomic_prepare_plane_clear_colors(state);
+
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
new_crtc_state, i) {
- if (needs_modeset(new_crtc_state) ||
+ if (intel_crtc_needs_modeset(new_crtc_state) ||
new_crtc_state->update_pipe) {
put_domains[crtc->pipe] =
@@ -16292,7 +13272,7 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
/* Complete the events for pipes that have now been disabled */
for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
- bool modeset = needs_modeset(new_crtc_state);
+ bool modeset = intel_crtc_needs_modeset(new_crtc_state);
/* Complete events for now disable pipes here. */
if (modeset && !new_crtc_state->hw.active && new_crtc_state->uapi.event) {
@@ -16312,7 +13292,7 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
if (new_crtc_state->uapi.async_flip)
- skl_enable_flip_done(crtc);
+ intel_crtc_enable_flip_done(state, crtc);
}
/* Now enable the clocks, plane, pipe, and connectors that we set up. */
@@ -16337,10 +13317,10 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
if (new_crtc_state->uapi.async_flip)
- skl_disable_flip_done(crtc);
+ intel_crtc_disable_flip_done(state, crtc);
if (new_crtc_state->hw.active &&
- !needs_modeset(new_crtc_state) &&
+ !intel_crtc_needs_modeset(new_crtc_state) &&
!new_crtc_state->preload_luts &&
(new_crtc_state->uapi.color_mgmt_changed ||
new_crtc_state->update_pipe))
@@ -16376,8 +13356,7 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
intel_post_plane_update(state, crtc);
- if (put_domains[i])
- modeset_put_power_domains(dev_priv, put_domains[i]);
+ modeset_put_crtc_power_domains(crtc, put_domains[crtc->pipe]);
intel_modeset_verify_crtc(crtc, state, old_crtc_state, new_crtc_state);
@@ -16541,7 +13520,6 @@ static int intel_atomic_commit(struct drm_device *dev,
intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
return ret;
}
- dev_priv->wm.distrust_bios_wm = false;
intel_shared_dpll_swap_state(state);
intel_atomic_track_fbs(state);
@@ -16620,7 +13598,7 @@ static void add_rps_boost_after_vblank(struct drm_crtc *crtc,
add_wait_queue(drm_crtc_vblank_waitqueue(crtc), &wait->wait);
}
-static int intel_plane_pin_fb(struct intel_plane_state *plane_state)
+int intel_plane_pin_fb(struct intel_plane_state *plane_state)
{
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
@@ -16650,7 +13628,7 @@ static int intel_plane_pin_fb(struct intel_plane_state *plane_state)
return 0;
}
-static void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state)
+void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state)
{
struct i915_vma *vma;
@@ -16659,15 +13637,6 @@ static void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state)
intel_unpin_fb_vma(vma, old_plane_state->flags);
}
-static void fb_obj_bump_render_priority(struct drm_i915_gem_object *obj)
-{
- struct i915_sched_attr attr = {
- .priority = I915_USER_PRIORITY(I915_PRIORITY_DISPLAY),
- };
-
- i915_gem_object_wait_priority(obj, 0, &attr);
-}
-
/**
* intel_prepare_plane_fb - Prepare fb for usage on plane
* @_plane: drm plane to prepare for
@@ -16684,6 +13653,9 @@ int
intel_prepare_plane_fb(struct drm_plane *_plane,
struct drm_plane_state *_new_plane_state)
{
+ struct i915_sched_attr attr = {
+ .priority = I915_USER_PRIORITY(I915_PRIORITY_DISPLAY),
+ };
struct intel_plane *plane = to_intel_plane(_plane);
struct intel_plane_state *new_plane_state =
to_intel_plane_state(_new_plane_state);
@@ -16712,7 +13684,7 @@ intel_prepare_plane_fb(struct drm_plane *_plane,
* This should only fail upon a hung GPU, in which case we
* can safely continue.
*/
- if (needs_modeset(crtc_state)) {
+ if (intel_crtc_needs_modeset(crtc_state)) {
ret = i915_sw_fence_await_reservation(&state->commit_ready,
old_obj->base.resv, NULL,
false, 0,
@@ -16723,6 +13695,8 @@ intel_prepare_plane_fb(struct drm_plane *_plane,
}
if (new_plane_state->uapi.fence) { /* explicit fencing */
+ i915_gem_fence_wait_priority(new_plane_state->uapi.fence,
+ &attr);
ret = i915_sw_fence_await_dma_fence(&state->commit_ready,
new_plane_state->uapi.fence,
i915_fence_timeout(dev_priv),
@@ -16744,7 +13718,7 @@ intel_prepare_plane_fb(struct drm_plane *_plane,
if (ret)
return ret;
- fb_obj_bump_render_priority(obj);
+ i915_gem_object_wait_priority(obj, 0, &attr);
i915_gem_object_flush_frontbuffer(obj, ORIGIN_DIRTYFB);
if (!new_plane_state->uapi.fence) { /* implicit fencing */
@@ -16833,540 +13807,6 @@ void intel_plane_destroy(struct drm_plane *plane)
kfree(to_intel_plane(plane));
}
-static bool i8xx_plane_format_mod_supported(struct drm_plane *_plane,
- u32 format, u64 modifier)
-{
- switch (modifier) {
- case DRM_FORMAT_MOD_LINEAR:
- case I915_FORMAT_MOD_X_TILED:
- break;
- default:
- return false;
- }
-
- switch (format) {
- case DRM_FORMAT_C8:
- case DRM_FORMAT_RGB565:
- case DRM_FORMAT_XRGB1555:
- case DRM_FORMAT_XRGB8888:
- return modifier == DRM_FORMAT_MOD_LINEAR ||
- modifier == I915_FORMAT_MOD_X_TILED;
- default:
- return false;
- }
-}
-
-static bool i965_plane_format_mod_supported(struct drm_plane *_plane,
- u32 format, u64 modifier)
-{
- switch (modifier) {
- case DRM_FORMAT_MOD_LINEAR:
- case I915_FORMAT_MOD_X_TILED:
- break;
- default:
- return false;
- }
-
- switch (format) {
- case DRM_FORMAT_C8:
- case DRM_FORMAT_RGB565:
- case DRM_FORMAT_XRGB8888:
- case DRM_FORMAT_XBGR8888:
- case DRM_FORMAT_ARGB8888:
- case DRM_FORMAT_ABGR8888:
- case DRM_FORMAT_XRGB2101010:
- case DRM_FORMAT_XBGR2101010:
- case DRM_FORMAT_ARGB2101010:
- case DRM_FORMAT_ABGR2101010:
- case DRM_FORMAT_XBGR16161616F:
- return modifier == DRM_FORMAT_MOD_LINEAR ||
- modifier == I915_FORMAT_MOD_X_TILED;
- default:
- return false;
- }
-}
-
-static bool intel_cursor_format_mod_supported(struct drm_plane *_plane,
- u32 format, u64 modifier)
-{
- return modifier == DRM_FORMAT_MOD_LINEAR &&
- format == DRM_FORMAT_ARGB8888;
-}
-
-static const struct drm_plane_funcs i965_plane_funcs = {
- .update_plane = drm_atomic_helper_update_plane,
- .disable_plane = drm_atomic_helper_disable_plane,
- .destroy = intel_plane_destroy,
- .atomic_duplicate_state = intel_plane_duplicate_state,
- .atomic_destroy_state = intel_plane_destroy_state,
- .format_mod_supported = i965_plane_format_mod_supported,
-};
-
-static const struct drm_plane_funcs i8xx_plane_funcs = {
- .update_plane = drm_atomic_helper_update_plane,
- .disable_plane = drm_atomic_helper_disable_plane,
- .destroy = intel_plane_destroy,
- .atomic_duplicate_state = intel_plane_duplicate_state,
- .atomic_destroy_state = intel_plane_destroy_state,
- .format_mod_supported = i8xx_plane_format_mod_supported,
-};
-
-static int
-intel_legacy_cursor_update(struct drm_plane *_plane,
- struct drm_crtc *_crtc,
- struct drm_framebuffer *fb,
- int crtc_x, int crtc_y,
- unsigned int crtc_w, unsigned int crtc_h,
- u32 src_x, u32 src_y,
- u32 src_w, u32 src_h,
- struct drm_modeset_acquire_ctx *ctx)
-{
- struct intel_plane *plane = to_intel_plane(_plane);
- struct intel_crtc *crtc = to_intel_crtc(_crtc);
- struct intel_plane_state *old_plane_state =
- to_intel_plane_state(plane->base.state);
- struct intel_plane_state *new_plane_state;
- struct intel_crtc_state *crtc_state =
- to_intel_crtc_state(crtc->base.state);
- struct intel_crtc_state *new_crtc_state;
- int ret;
-
- /*
- * When crtc is inactive or there is a modeset pending,
- * wait for it to complete in the slowpath
- *
- * FIXME bigjoiner fastpath would be good
- */
- if (!crtc_state->hw.active || needs_modeset(crtc_state) ||
- crtc_state->update_pipe || crtc_state->bigjoiner)
- goto slow;
-
- /*
- * Don't do an async update if there is an outstanding commit modifying
- * the plane. This prevents our async update's changes from getting
- * overridden by a previous synchronous update's state.
- */
- if (old_plane_state->uapi.commit &&
- !try_wait_for_completion(&old_plane_state->uapi.commit->hw_done))
- goto slow;
-
- /*
- * If any parameters change that may affect watermarks,
- * take the slowpath. Only changing fb or position should be
- * in the fastpath.
- */
- if (old_plane_state->uapi.crtc != &crtc->base ||
- old_plane_state->uapi.src_w != src_w ||
- old_plane_state->uapi.src_h != src_h ||
- old_plane_state->uapi.crtc_w != crtc_w ||
- old_plane_state->uapi.crtc_h != crtc_h ||
- !old_plane_state->uapi.fb != !fb)
- goto slow;
-
- new_plane_state = to_intel_plane_state(intel_plane_duplicate_state(&plane->base));
- if (!new_plane_state)
- return -ENOMEM;
-
- new_crtc_state = to_intel_crtc_state(intel_crtc_duplicate_state(&crtc->base));
- if (!new_crtc_state) {
- ret = -ENOMEM;
- goto out_free;
- }
-
- drm_atomic_set_fb_for_plane(&new_plane_state->uapi, fb);
-
- new_plane_state->uapi.src_x = src_x;
- new_plane_state->uapi.src_y = src_y;
- new_plane_state->uapi.src_w = src_w;
- new_plane_state->uapi.src_h = src_h;
- new_plane_state->uapi.crtc_x = crtc_x;
- new_plane_state->uapi.crtc_y = crtc_y;
- new_plane_state->uapi.crtc_w = crtc_w;
- new_plane_state->uapi.crtc_h = crtc_h;
-
- intel_plane_copy_uapi_to_hw_state(new_plane_state, new_plane_state, crtc);
-
- ret = intel_plane_atomic_check_with_state(crtc_state, new_crtc_state,
- old_plane_state, new_plane_state);
- if (ret)
- goto out_free;
-
- ret = intel_plane_pin_fb(new_plane_state);
- if (ret)
- goto out_free;
-
- intel_frontbuffer_flush(to_intel_frontbuffer(new_plane_state->hw.fb),
- ORIGIN_FLIP);
- intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->hw.fb),
- to_intel_frontbuffer(new_plane_state->hw.fb),
- plane->frontbuffer_bit);
-
- /* Swap plane state */
- plane->base.state = &new_plane_state->uapi;
-
- /*
- * We cannot swap crtc_state as it may be in use by an atomic commit or
- * page flip that's running simultaneously. If we swap crtc_state and
- * destroy the old state, we will cause a use-after-free there.
- *
- * Only update active_planes, which is needed for our internal
- * bookkeeping. Either value will do the right thing when updating
- * planes atomically. If the cursor was part of the atomic update then
- * we would have taken the slowpath.
- */
- crtc_state->active_planes = new_crtc_state->active_planes;
-
- if (new_plane_state->uapi.visible)
- intel_update_plane(plane, crtc_state, new_plane_state);
- else
- intel_disable_plane(plane, crtc_state);
-
- intel_plane_unpin_fb(old_plane_state);
-
-out_free:
- if (new_crtc_state)
- intel_crtc_destroy_state(&crtc->base, &new_crtc_state->uapi);
- if (ret)
- intel_plane_destroy_state(&plane->base, &new_plane_state->uapi);
- else
- intel_plane_destroy_state(&plane->base, &old_plane_state->uapi);
- return ret;
-
-slow:
- return drm_atomic_helper_update_plane(&plane->base, &crtc->base, fb,
- crtc_x, crtc_y, crtc_w, crtc_h,
- src_x, src_y, src_w, src_h, ctx);
-}
-
-static const struct drm_plane_funcs intel_cursor_plane_funcs = {
- .update_plane = intel_legacy_cursor_update,
- .disable_plane = drm_atomic_helper_disable_plane,
- .destroy = intel_plane_destroy,
- .atomic_duplicate_state = intel_plane_duplicate_state,
- .atomic_destroy_state = intel_plane_destroy_state,
- .format_mod_supported = intel_cursor_format_mod_supported,
-};
-
-static bool i9xx_plane_has_fbc(struct drm_i915_private *dev_priv,
- enum i9xx_plane_id i9xx_plane)
-{
- if (!HAS_FBC(dev_priv))
- return false;
-
- if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
- return i9xx_plane == PLANE_A; /* tied to pipe A */
- else if (IS_IVYBRIDGE(dev_priv))
- return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B ||
- i9xx_plane == PLANE_C;
- else if (INTEL_GEN(dev_priv) >= 4)
- return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B;
- else
- return i9xx_plane == PLANE_A;
-}
-
-static struct intel_plane *
-intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
-{
- struct intel_plane *plane;
- const struct drm_plane_funcs *plane_funcs;
- unsigned int supported_rotations;
- const u32 *formats;
- int num_formats;
- int ret, zpos;
-
- if (INTEL_GEN(dev_priv) >= 9)
- return skl_universal_plane_create(dev_priv, pipe,
- PLANE_PRIMARY);
-
- plane = intel_plane_alloc();
- if (IS_ERR(plane))
- return plane;
-
- plane->pipe = pipe;
- /*
- * On gen2/3 only plane A can do FBC, but the panel fitter and LVDS
- * port is hooked to pipe B. Hence we want plane A feeding pipe B.
- */
- if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) < 4 &&
- INTEL_NUM_PIPES(dev_priv) == 2)
- plane->i9xx_plane = (enum i9xx_plane_id) !pipe;
- else
- plane->i9xx_plane = (enum i9xx_plane_id) pipe;
- plane->id = PLANE_PRIMARY;
- plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, plane->id);
-
- plane->has_fbc = i9xx_plane_has_fbc(dev_priv, plane->i9xx_plane);
- if (plane->has_fbc) {
- struct intel_fbc *fbc = &dev_priv->fbc;
-
- fbc->possible_framebuffer_bits |= plane->frontbuffer_bit;
- }
-
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
- formats = vlv_primary_formats;
- num_formats = ARRAY_SIZE(vlv_primary_formats);
- } else if (INTEL_GEN(dev_priv) >= 4) {
- /*
- * WaFP16GammaEnabling:ivb
- * "Workaround : When using the 64-bit format, the plane
- * output on each color channel has one quarter amplitude.
- * It can be brought up to full amplitude by using pipe
- * gamma correction or pipe color space conversion to
- * multiply the plane output by four."
- *
- * There is no dedicated plane gamma for the primary plane,
- * and using the pipe gamma/csc could conflict with other
- * planes, so we choose not to expose fp16 on IVB primary
- * planes. HSW primary planes no longer have this problem.
- */
- if (IS_IVYBRIDGE(dev_priv)) {
- formats = ivb_primary_formats;
- num_formats = ARRAY_SIZE(ivb_primary_formats);
- } else {
- formats = i965_primary_formats;
- num_formats = ARRAY_SIZE(i965_primary_formats);
- }
- } else {
- formats = i8xx_primary_formats;
- num_formats = ARRAY_SIZE(i8xx_primary_formats);
- }
-
- if (INTEL_GEN(dev_priv) >= 4)
- plane_funcs = &i965_plane_funcs;
- else
- plane_funcs = &i8xx_plane_funcs;
-
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- plane->min_cdclk = vlv_plane_min_cdclk;
- else if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
- plane->min_cdclk = hsw_plane_min_cdclk;
- else if (IS_IVYBRIDGE(dev_priv))
- plane->min_cdclk = ivb_plane_min_cdclk;
- else
- plane->min_cdclk = i9xx_plane_min_cdclk;
-
- plane->max_stride = i9xx_plane_max_stride;
- plane->update_plane = i9xx_update_plane;
- plane->disable_plane = i9xx_disable_plane;
- plane->get_hw_state = i9xx_plane_get_hw_state;
- plane->check_plane = i9xx_plane_check;
-
- if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
- ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
- 0, plane_funcs,
- formats, num_formats,
- i9xx_format_modifiers,
- DRM_PLANE_TYPE_PRIMARY,
- "primary %c", pipe_name(pipe));
- else
- ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
- 0, plane_funcs,
- formats, num_formats,
- i9xx_format_modifiers,
- DRM_PLANE_TYPE_PRIMARY,
- "plane %c",
- plane_name(plane->i9xx_plane));
- if (ret)
- goto fail;
-
- if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
- supported_rotations =
- DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180 |
- DRM_MODE_REFLECT_X;
- } else if (INTEL_GEN(dev_priv) >= 4) {
- supported_rotations =
- DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180;
- } else {
- supported_rotations = DRM_MODE_ROTATE_0;
- }
-
- if (INTEL_GEN(dev_priv) >= 4)
- drm_plane_create_rotation_property(&plane->base,
- DRM_MODE_ROTATE_0,
- supported_rotations);
-
- zpos = 0;
- drm_plane_create_zpos_immutable_property(&plane->base, zpos);
-
- drm_plane_helper_add(&plane->base, &intel_plane_helper_funcs);
-
- return plane;
-
-fail:
- intel_plane_free(plane);
-
- return ERR_PTR(ret);
-}
-
-static struct intel_plane *
-intel_cursor_plane_create(struct drm_i915_private *dev_priv,
- enum pipe pipe)
-{
- struct intel_plane *cursor;
- int ret, zpos;
-
- cursor = intel_plane_alloc();
- if (IS_ERR(cursor))
- return cursor;
-
- cursor->pipe = pipe;
- cursor->i9xx_plane = (enum i9xx_plane_id) pipe;
- cursor->id = PLANE_CURSOR;
- cursor->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, cursor->id);
-
- if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) {
- cursor->max_stride = i845_cursor_max_stride;
- cursor->update_plane = i845_update_cursor;
- cursor->disable_plane = i845_disable_cursor;
- cursor->get_hw_state = i845_cursor_get_hw_state;
- cursor->check_plane = i845_check_cursor;
- } else {
- cursor->max_stride = i9xx_cursor_max_stride;
- cursor->update_plane = i9xx_update_cursor;
- cursor->disable_plane = i9xx_disable_cursor;
- cursor->get_hw_state = i9xx_cursor_get_hw_state;
- cursor->check_plane = i9xx_check_cursor;
- }
-
- cursor->cursor.base = ~0;
- cursor->cursor.cntl = ~0;
-
- if (IS_I845G(dev_priv) || IS_I865G(dev_priv) || HAS_CUR_FBC(dev_priv))
- cursor->cursor.size = ~0;
-
- ret = drm_universal_plane_init(&dev_priv->drm, &cursor->base,
- 0, &intel_cursor_plane_funcs,
- intel_cursor_formats,
- ARRAY_SIZE(intel_cursor_formats),
- cursor_format_modifiers,
- DRM_PLANE_TYPE_CURSOR,
- "cursor %c", pipe_name(pipe));
- if (ret)
- goto fail;
-
- if (INTEL_GEN(dev_priv) >= 4)
- drm_plane_create_rotation_property(&cursor->base,
- DRM_MODE_ROTATE_0,
- DRM_MODE_ROTATE_0 |
- DRM_MODE_ROTATE_180);
-
- zpos = RUNTIME_INFO(dev_priv)->num_sprites[pipe] + 1;
- drm_plane_create_zpos_immutable_property(&cursor->base, zpos);
-
- if (INTEL_GEN(dev_priv) >= 12)
- drm_plane_enable_fb_damage_clips(&cursor->base);
-
- drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs);
-
- return cursor;
-
-fail:
- intel_plane_free(cursor);
-
- return ERR_PTR(ret);
-}
-
-#define INTEL_CRTC_FUNCS \
- .gamma_set = drm_atomic_helper_legacy_gamma_set, \
- .set_config = drm_atomic_helper_set_config, \
- .destroy = intel_crtc_destroy, \
- .page_flip = drm_atomic_helper_page_flip, \
- .atomic_duplicate_state = intel_crtc_duplicate_state, \
- .atomic_destroy_state = intel_crtc_destroy_state, \
- .set_crc_source = intel_crtc_set_crc_source, \
- .verify_crc_source = intel_crtc_verify_crc_source, \
- .get_crc_sources = intel_crtc_get_crc_sources
-
-static const struct drm_crtc_funcs bdw_crtc_funcs = {
- INTEL_CRTC_FUNCS,
-
- .get_vblank_counter = g4x_get_vblank_counter,
- .enable_vblank = bdw_enable_vblank,
- .disable_vblank = bdw_disable_vblank,
- .get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
-};
-
-static const struct drm_crtc_funcs ilk_crtc_funcs = {
- INTEL_CRTC_FUNCS,
-
- .get_vblank_counter = g4x_get_vblank_counter,
- .enable_vblank = ilk_enable_vblank,
- .disable_vblank = ilk_disable_vblank,
- .get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
-};
-
-static const struct drm_crtc_funcs g4x_crtc_funcs = {
- INTEL_CRTC_FUNCS,
-
- .get_vblank_counter = g4x_get_vblank_counter,
- .enable_vblank = i965_enable_vblank,
- .disable_vblank = i965_disable_vblank,
- .get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
-};
-
-static const struct drm_crtc_funcs i965_crtc_funcs = {
- INTEL_CRTC_FUNCS,
-
- .get_vblank_counter = i915_get_vblank_counter,
- .enable_vblank = i965_enable_vblank,
- .disable_vblank = i965_disable_vblank,
- .get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
-};
-
-static const struct drm_crtc_funcs i915gm_crtc_funcs = {
- INTEL_CRTC_FUNCS,
-
- .get_vblank_counter = i915_get_vblank_counter,
- .enable_vblank = i915gm_enable_vblank,
- .disable_vblank = i915gm_disable_vblank,
- .get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
-};
-
-static const struct drm_crtc_funcs i915_crtc_funcs = {
- INTEL_CRTC_FUNCS,
-
- .get_vblank_counter = i915_get_vblank_counter,
- .enable_vblank = i8xx_enable_vblank,
- .disable_vblank = i8xx_disable_vblank,
- .get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
-};
-
-static const struct drm_crtc_funcs i8xx_crtc_funcs = {
- INTEL_CRTC_FUNCS,
-
- /* no hw vblank counter */
- .enable_vblank = i8xx_enable_vblank,
- .disable_vblank = i8xx_disable_vblank,
- .get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
-};
-
-static struct intel_crtc *intel_crtc_alloc(void)
-{
- struct intel_crtc_state *crtc_state;
- struct intel_crtc *crtc;
-
- crtc = kzalloc(sizeof(*crtc), GFP_KERNEL);
- if (!crtc)
- return ERR_PTR(-ENOMEM);
-
- crtc_state = intel_crtc_state_alloc(crtc);
- if (!crtc_state) {
- kfree(crtc);
- return ERR_PTR(-ENOMEM);
- }
-
- crtc->base.state = &crtc_state->uapi;
- crtc->config = crtc_state;
-
- return crtc;
-}
-
-static void intel_crtc_free(struct intel_crtc *crtc)
-{
- intel_crtc_destroy_state(&crtc->base, crtc->base.state);
- kfree(crtc);
-}
-
static void intel_plane_possible_crtcs_init(struct drm_i915_private *dev_priv)
{
struct intel_plane *plane;
@@ -17379,100 +13819,6 @@ static void intel_plane_possible_crtcs_init(struct drm_i915_private *dev_priv)
}
}
-static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
-{
- struct intel_plane *primary, *cursor;
- const struct drm_crtc_funcs *funcs;
- struct intel_crtc *crtc;
- int sprite, ret;
-
- crtc = intel_crtc_alloc();
- if (IS_ERR(crtc))
- return PTR_ERR(crtc);
-
- crtc->pipe = pipe;
- crtc->num_scalers = RUNTIME_INFO(dev_priv)->num_scalers[pipe];
-
- primary = intel_primary_plane_create(dev_priv, pipe);
- if (IS_ERR(primary)) {
- ret = PTR_ERR(primary);
- goto fail;
- }
- crtc->plane_ids_mask |= BIT(primary->id);
-
- for_each_sprite(dev_priv, pipe, sprite) {
- struct intel_plane *plane;
-
- plane = intel_sprite_plane_create(dev_priv, pipe, sprite);
- if (IS_ERR(plane)) {
- ret = PTR_ERR(plane);
- goto fail;
- }
- crtc->plane_ids_mask |= BIT(plane->id);
- }
-
- cursor = intel_cursor_plane_create(dev_priv, pipe);
- if (IS_ERR(cursor)) {
- ret = PTR_ERR(cursor);
- goto fail;
- }
- crtc->plane_ids_mask |= BIT(cursor->id);
-
- if (HAS_GMCH(dev_priv)) {
- if (IS_CHERRYVIEW(dev_priv) ||
- IS_VALLEYVIEW(dev_priv) || IS_G4X(dev_priv))
- funcs = &g4x_crtc_funcs;
- else if (IS_GEN(dev_priv, 4))
- funcs = &i965_crtc_funcs;
- else if (IS_I945GM(dev_priv) || IS_I915GM(dev_priv))
- funcs = &i915gm_crtc_funcs;
- else if (IS_GEN(dev_priv, 3))
- funcs = &i915_crtc_funcs;
- else
- funcs = &i8xx_crtc_funcs;
- } else {
- if (INTEL_GEN(dev_priv) >= 8)
- funcs = &bdw_crtc_funcs;
- else
- funcs = &ilk_crtc_funcs;
- }
-
- ret = drm_crtc_init_with_planes(&dev_priv->drm, &crtc->base,
- &primary->base, &cursor->base,
- funcs, "pipe %c", pipe_name(pipe));
- if (ret)
- goto fail;
-
- BUG_ON(pipe >= ARRAY_SIZE(dev_priv->pipe_to_crtc_mapping) ||
- dev_priv->pipe_to_crtc_mapping[pipe] != NULL);
- dev_priv->pipe_to_crtc_mapping[pipe] = crtc;
-
- if (INTEL_GEN(dev_priv) < 9) {
- enum i9xx_plane_id i9xx_plane = primary->i9xx_plane;
-
- BUG_ON(i9xx_plane >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
- dev_priv->plane_to_crtc_mapping[i9xx_plane] != NULL);
- dev_priv->plane_to_crtc_mapping[i9xx_plane] = crtc;
- }
-
- if (INTEL_GEN(dev_priv) >= 10)
- drm_crtc_create_scaling_filter_property(&crtc->base,
- BIT(DRM_SCALING_FILTER_DEFAULT) |
- BIT(DRM_SCALING_FILTER_NEAREST_NEIGHBOR));
-
- intel_color_init(crtc);
-
- intel_crtc_crc_init(crtc);
-
- drm_WARN_ON(&dev_priv->drm, drm_crtc_index(&crtc->base) != crtc->pipe);
-
- return 0;
-
-fail:
- intel_crtc_free(crtc);
-
- return ret;
-}
int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
@@ -17555,48 +13901,12 @@ static bool intel_ddi_crt_present(struct drm_i915_private *dev_priv)
return true;
}
-void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv)
-{
- int pps_num;
- int pps_idx;
-
- if (HAS_DDI(dev_priv))
- return;
- /*
- * This w/a is needed at least on CPT/PPT, but to be sure apply it
- * everywhere where registers can be write protected.
- */
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- pps_num = 2;
- else
- pps_num = 1;
-
- for (pps_idx = 0; pps_idx < pps_num; pps_idx++) {
- u32 val = intel_de_read(dev_priv, PP_CONTROL(pps_idx));
-
- val = (val & ~PANEL_UNLOCK_MASK) | PANEL_UNLOCK_REGS;
- intel_de_write(dev_priv, PP_CONTROL(pps_idx), val);
- }
-}
-
-static void intel_pps_init(struct drm_i915_private *dev_priv)
-{
- if (HAS_PCH_SPLIT(dev_priv) || IS_GEN9_LP(dev_priv))
- dev_priv->pps_mmio_base = PCH_PPS_BASE;
- else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- dev_priv->pps_mmio_base = VLV_PPS_BASE;
- else
- dev_priv->pps_mmio_base = PPS_BASE;
-
- intel_pps_unlock_regs_wa(dev_priv);
-}
-
static void intel_setup_outputs(struct drm_i915_private *dev_priv)
{
struct intel_encoder *encoder;
bool dpd_is_edp = false;
- intel_pps_init(dev_priv);
+ intel_pps_unlock_regs_wa(dev_priv);
if (!HAS_DISPLAY(dev_priv))
return;
@@ -17997,7 +14307,7 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
goto err;
}
- if (is_gen12_ccs_plane(fb, i)) {
+ if (is_gen12_ccs_plane(fb, i) && !is_gen12_ccs_cc_plane(fb, i)) {
int ccs_aux_stride = gen12_ccs_aux_stride(fb, i);
if (fb->pitches[i] != ccs_aux_stride) {
@@ -18195,86 +14505,40 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv)
{
intel_init_cdclk_hooks(dev_priv);
+ intel_dpll_init_clock_hook(dev_priv);
+
if (INTEL_GEN(dev_priv) >= 9) {
dev_priv->display.get_pipe_config = hsw_get_pipe_config;
- dev_priv->display.get_initial_plane_config =
- skl_get_initial_plane_config;
- dev_priv->display.crtc_compute_clock = hsw_crtc_compute_clock;
dev_priv->display.crtc_enable = hsw_crtc_enable;
dev_priv->display.crtc_disable = hsw_crtc_disable;
} else if (HAS_DDI(dev_priv)) {
dev_priv->display.get_pipe_config = hsw_get_pipe_config;
- dev_priv->display.get_initial_plane_config =
- i9xx_get_initial_plane_config;
- dev_priv->display.crtc_compute_clock =
- hsw_crtc_compute_clock;
dev_priv->display.crtc_enable = hsw_crtc_enable;
dev_priv->display.crtc_disable = hsw_crtc_disable;
} else if (HAS_PCH_SPLIT(dev_priv)) {
dev_priv->display.get_pipe_config = ilk_get_pipe_config;
- dev_priv->display.get_initial_plane_config =
- i9xx_get_initial_plane_config;
- dev_priv->display.crtc_compute_clock =
- ilk_crtc_compute_clock;
dev_priv->display.crtc_enable = ilk_crtc_enable;
dev_priv->display.crtc_disable = ilk_crtc_disable;
- } else if (IS_CHERRYVIEW(dev_priv)) {
- dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
- dev_priv->display.get_initial_plane_config =
- i9xx_get_initial_plane_config;
- dev_priv->display.crtc_compute_clock = chv_crtc_compute_clock;
- dev_priv->display.crtc_enable = valleyview_crtc_enable;
- dev_priv->display.crtc_disable = i9xx_crtc_disable;
- } else if (IS_VALLEYVIEW(dev_priv)) {
+ } else if (IS_CHERRYVIEW(dev_priv) ||
+ IS_VALLEYVIEW(dev_priv)) {
dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
- dev_priv->display.get_initial_plane_config =
- i9xx_get_initial_plane_config;
- dev_priv->display.crtc_compute_clock = vlv_crtc_compute_clock;
dev_priv->display.crtc_enable = valleyview_crtc_enable;
dev_priv->display.crtc_disable = i9xx_crtc_disable;
- } else if (IS_G4X(dev_priv)) {
- dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
- dev_priv->display.get_initial_plane_config =
- i9xx_get_initial_plane_config;
- dev_priv->display.crtc_compute_clock = g4x_crtc_compute_clock;
- dev_priv->display.crtc_enable = i9xx_crtc_enable;
- dev_priv->display.crtc_disable = i9xx_crtc_disable;
- } else if (IS_PINEVIEW(dev_priv)) {
- dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
- dev_priv->display.get_initial_plane_config =
- i9xx_get_initial_plane_config;
- dev_priv->display.crtc_compute_clock = pnv_crtc_compute_clock;
- dev_priv->display.crtc_enable = i9xx_crtc_enable;
- dev_priv->display.crtc_disable = i9xx_crtc_disable;
- } else if (!IS_GEN(dev_priv, 2)) {
- dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
- dev_priv->display.get_initial_plane_config =
- i9xx_get_initial_plane_config;
- dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
- dev_priv->display.crtc_enable = i9xx_crtc_enable;
- dev_priv->display.crtc_disable = i9xx_crtc_disable;
} else {
dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
- dev_priv->display.get_initial_plane_config =
- i9xx_get_initial_plane_config;
- dev_priv->display.crtc_compute_clock = i8xx_crtc_compute_clock;
dev_priv->display.crtc_enable = i9xx_crtc_enable;
dev_priv->display.crtc_disable = i9xx_crtc_disable;
}
- if (IS_GEN(dev_priv, 5)) {
- dev_priv->display.fdi_link_train = ilk_fdi_link_train;
- } else if (IS_GEN(dev_priv, 6)) {
- dev_priv->display.fdi_link_train = gen6_fdi_link_train;
- } else if (IS_IVYBRIDGE(dev_priv)) {
- /* FIXME: detect B0+ stepping and use auto training */
- dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
- }
+ intel_fdi_init_hook(dev_priv);
- if (INTEL_GEN(dev_priv) >= 9)
+ if (INTEL_GEN(dev_priv) >= 9) {
dev_priv->display.commit_modeset_enables = skl_commit_modeset_enables;
- else
+ dev_priv->display.get_initial_plane_config = skl_get_initial_plane_config;
+ } else {
dev_priv->display.commit_modeset_enables = intel_commit_modeset_enables;
+ dev_priv->display.get_initial_plane_config = i9xx_get_initial_plane_config;
+ }
}
@@ -18282,14 +14546,10 @@ void intel_modeset_init_hw(struct drm_i915_private *i915)
{
struct intel_cdclk_state *cdclk_state =
to_intel_cdclk_state(i915->cdclk.obj.state);
- struct intel_dbuf_state *dbuf_state =
- to_intel_dbuf_state(i915->dbuf.obj.state);
intel_update_cdclk(i915);
intel_dump_cdclk_config(&i915->cdclk.hw, "Current CDCLK");
cdclk_state->logical = cdclk_state->actual = i915->cdclk.hw;
-
- dbuf_state->enabled_slices = i915->dbuf.enabled_slices;
}
static int sanitize_watermarks_add_affected(struct drm_atomic_state *state)
@@ -18522,8 +14782,7 @@ static void intel_mode_config_init(struct drm_i915_private *i915)
mode_config->funcs = &intel_mode_funcs;
- if (INTEL_GEN(i915) >= 9)
- mode_config->async_page_flip = true;
+ mode_config->async_page_flip = has_async_flips(i915);
/*
* Maximum framebuffer dimensions, chosen to match
@@ -18608,6 +14867,8 @@ int intel_modeset_init_noirq(struct drm_i915_private *i915)
i915->flip_wq = alloc_workqueue("i915_flip", WQ_HIGHPRI |
WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE);
+ i915->framestart_delay = 1; /* 1-4 */
+
intel_mode_config_init(i915);
ret = intel_cdclk_init(i915);
@@ -18654,6 +14915,8 @@ int intel_modeset_init_nogem(struct drm_i915_private *i915)
intel_panel_sanitize_ssc(i915);
+ intel_pps_setup(i915);
+
intel_gmbus_setup(i915);
drm_dbg_kms(&i915->drm, "%d display pipe%s available.\n",
@@ -18942,7 +15205,7 @@ static void intel_sanitize_frame_start_delay(const struct intel_crtc_state *crtc
val = intel_de_read(dev_priv, reg);
val &= ~HSW_FRAME_START_DELAY_MASK;
- val |= HSW_FRAME_START_DELAY(0);
+ val |= HSW_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
intel_de_write(dev_priv, reg, val);
} else {
i915_reg_t reg = PIPECONF(cpu_transcoder);
@@ -18950,7 +15213,7 @@ static void intel_sanitize_frame_start_delay(const struct intel_crtc_state *crtc
val = intel_de_read(dev_priv, reg);
val &= ~PIPECONF_FRAME_START_DELAY_MASK;
- val |= PIPECONF_FRAME_START_DELAY(0);
+ val |= PIPECONF_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
intel_de_write(dev_priv, reg, val);
}
@@ -18963,7 +15226,7 @@ static void intel_sanitize_frame_start_delay(const struct intel_crtc_state *crtc
val = intel_de_read(dev_priv, reg);
val &= ~TRANS_FRAME_START_DELAY_MASK;
- val |= TRANS_FRAME_START_DELAY(0);
+ val |= TRANS_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
intel_de_write(dev_priv, reg, val);
} else {
enum pipe pch_transcoder = intel_crtc_pch_transcoder(crtc);
@@ -18972,7 +15235,7 @@ static void intel_sanitize_frame_start_delay(const struct intel_crtc_state *crtc
val = intel_de_read(dev_priv, reg);
val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
- val |= TRANS_CHICKEN2_FRAME_START_DELAY(0);
+ val |= TRANS_CHICKEN2_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
intel_de_write(dev_priv, reg, val);
}
}
@@ -19165,7 +15428,7 @@ static void readout_plane_state(struct drm_i915_private *dev_priv)
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
- fixup_active_planes(crtc_state);
+ fixup_plane_bitmasks(crtc_state);
}
}
@@ -19588,7 +15851,7 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
put_domains = modeset_get_crtc_power_domains(crtc_state);
if (drm_WARN_ON(dev, put_domains))
- modeset_put_power_domains(dev_priv, put_domains);
+ modeset_put_crtc_power_domains(crtc, put_domains);
}
intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h
index 5e0d42d82c11..64ffa34544a7 100644
--- a/drivers/gpu/drm/i915/display/intel_display.h
+++ b/drivers/gpu/drm/i915/display/intel_display.h
@@ -499,6 +499,8 @@ enum phy_fia {
((connector) = to_intel_connector((__state)->base.connectors[__i].ptr), \
(new_connector_state) = to_intel_digital_connector_state((__state)->base.connectors[__i].new_state), 1))
+int intel_atomic_add_affected_planes(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
u8 intel_calc_active_pipes(struct intel_atomic_state *state,
u8 active_pipes);
void intel_link_compute_m_n(u16 bpp, int nlanes,
@@ -544,7 +546,6 @@ unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info
unsigned int intel_remapped_info_size(const struct intel_remapped_info *rem_info);
bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv);
int intel_display_suspend(struct drm_device *dev);
-void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv);
void intel_encoder_destroy(struct drm_encoder *encoder);
struct drm_display_mode *
intel_encoder_current_mode(struct intel_encoder *encoder);
@@ -628,11 +629,9 @@ u32 skl_plane_ctl_crtc(const struct intel_crtc_state *crtc_state);
u32 skl_plane_stride(const struct intel_plane_state *plane_state,
int plane);
int skl_check_plane_surface(struct intel_plane_state *plane_state);
-int i9xx_check_plane_surface(struct intel_plane_state *plane_state);
+int skl_calc_main_surface_offset(const struct intel_plane_state *plane_state,
+ int *x, int *y, u32 *offset);
int skl_format_to_fourcc(int format, bool rgb_order, bool alpha);
-unsigned int i9xx_plane_max_stride(struct intel_plane *plane,
- u32 pixel_format, u64 modifier,
- unsigned int rotation);
int bdw_get_pipemisc_bpp(struct intel_crtc *crtc);
unsigned int intel_plane_fence_y_offset(const struct intel_plane_state *plane_state);
@@ -643,7 +642,17 @@ void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
bool
intel_format_info_is_yuv_semiplanar(const struct drm_format_info *info,
- uint64_t modifier);
+ u64 modifier);
+
+int intel_plane_compute_gtt(struct intel_plane_state *plane_state);
+u32 intel_plane_compute_aligned_offset(int *x, int *y,
+ const struct intel_plane_state *state,
+ int color_plane);
+int intel_plane_pin_fb(struct intel_plane_state *plane_state);
+void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state);
+struct intel_encoder *
+intel_get_crtc_new_encoder(const struct intel_atomic_state *state,
+ const struct intel_crtc_state *crtc_state);
/* modesetting */
void intel_modeset_init_hw(struct drm_i915_private *i915);
diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.c b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
index ca41e8c00ad7..d62b18d5ecd8 100644
--- a/drivers/gpu/drm/i915/display/intel_display_debugfs.c
+++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
@@ -18,6 +18,7 @@
#include "intel_pm.h"
#include "intel_psr.h"
#include "intel_sideband.h"
+#include "intel_sprite.h"
static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
{
@@ -865,6 +866,110 @@ static void intel_scaler_info(struct seq_file *m, struct intel_crtc *crtc)
}
}
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_VBLANK_EVADE)
+static void crtc_updates_info(struct seq_file *m,
+ struct intel_crtc *crtc,
+ const char *hdr)
+{
+ u64 count;
+ int row;
+
+ count = 0;
+ for (row = 0; row < ARRAY_SIZE(crtc->debug.vbl.times); row++)
+ count += crtc->debug.vbl.times[row];
+ seq_printf(m, "%sUpdates: %llu\n", hdr, count);
+ if (!count)
+ return;
+
+ for (row = 0; row < ARRAY_SIZE(crtc->debug.vbl.times); row++) {
+ char columns[80] = " |";
+ unsigned int x;
+
+ if (row & 1) {
+ const char *units;
+
+ if (row > 10) {
+ x = 1000000;
+ units = "ms";
+ } else {
+ x = 1000;
+ units = "us";
+ }
+
+ snprintf(columns, sizeof(columns), "%4ld%s |",
+ DIV_ROUND_CLOSEST(BIT(row + 9), x), units);
+ }
+
+ if (crtc->debug.vbl.times[row]) {
+ x = ilog2(crtc->debug.vbl.times[row]);
+ memset(columns + 8, '*', x);
+ columns[8 + x] = '\0';
+ }
+
+ seq_printf(m, "%s%s\n", hdr, columns);
+ }
+
+ seq_printf(m, "%sMin update: %lluns\n",
+ hdr, crtc->debug.vbl.min);
+ seq_printf(m, "%sMax update: %lluns\n",
+ hdr, crtc->debug.vbl.max);
+ seq_printf(m, "%sAverage update: %lluns\n",
+ hdr, div64_u64(crtc->debug.vbl.sum, count));
+ seq_printf(m, "%sOverruns > %uus: %u\n",
+ hdr, VBLANK_EVASION_TIME_US, crtc->debug.vbl.over);
+}
+
+static int crtc_updates_show(struct seq_file *m, void *data)
+{
+ crtc_updates_info(m, m->private, "");
+ return 0;
+}
+
+static int crtc_updates_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, crtc_updates_show, inode->i_private);
+}
+
+static ssize_t crtc_updates_write(struct file *file,
+ const char __user *ubuf,
+ size_t len, loff_t *offp)
+{
+ struct seq_file *m = file->private_data;
+ struct intel_crtc *crtc = m->private;
+
+ /* May race with an update. Meh. */
+ memset(&crtc->debug.vbl, 0, sizeof(crtc->debug.vbl));
+
+ return len;
+}
+
+static const struct file_operations crtc_updates_fops = {
+ .owner = THIS_MODULE,
+ .open = crtc_updates_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = crtc_updates_write
+};
+
+static void crtc_updates_add(struct drm_crtc *crtc)
+{
+ debugfs_create_file("i915_update_info", 0644, crtc->debugfs_entry,
+ to_intel_crtc(crtc), &crtc_updates_fops);
+}
+
+#else
+static void crtc_updates_info(struct seq_file *m,
+ struct intel_crtc *crtc,
+ const char *hdr)
+{
+}
+
+static void crtc_updates_add(struct drm_crtc *crtc)
+{
+}
+#endif
+
static void intel_crtc_info(struct seq_file *m, struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
@@ -907,6 +1012,8 @@ static void intel_crtc_info(struct seq_file *m, struct intel_crtc *crtc)
seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s\n",
yesno(!crtc->cpu_fifo_underrun_disabled),
yesno(!crtc->pch_fifo_underrun_disabled));
+
+ crtc_updates_info(m, crtc, "\t");
}
static int i915_display_info(struct seq_file *m, void *unused)
@@ -1032,7 +1139,6 @@ static ssize_t i915_ipc_status_write(struct file *file, const char __user *ubuf,
if (!dev_priv->ipc_enabled && enable)
drm_info(&dev_priv->drm,
"Enabling IPC: WM will be proper only after next commit\n");
- dev_priv->wm.distrust_bios_wm = true;
dev_priv->ipc_enabled = enable;
intel_enable_ipc(dev_priv);
}
@@ -2048,13 +2154,13 @@ static int i915_panel_show(struct seq_file *m, void *data)
return -ENODEV;
seq_printf(m, "Panel power up delay: %d\n",
- intel_dp->panel_power_up_delay);
+ intel_dp->pps.panel_power_up_delay);
seq_printf(m, "Panel power down delay: %d\n",
- intel_dp->panel_power_down_delay);
+ intel_dp->pps.panel_power_down_delay);
seq_printf(m, "Backlight on delay: %d\n",
- intel_dp->backlight_on_delay);
+ intel_dp->pps.backlight_on_delay);
seq_printf(m, "Backlight off delay: %d\n",
- intel_dp->backlight_off_delay);
+ intel_dp->pps.backlight_off_delay);
return 0;
}
@@ -2278,3 +2384,20 @@ int intel_connector_debugfs_add(struct drm_connector *connector)
return 0;
}
+
+/**
+ * intel_crtc_debugfs_add - add i915 specific crtc debugfs files
+ * @crtc: pointer to a drm_crtc
+ *
+ * Returns 0 on success, negative error codes on error.
+ *
+ * Failure to add debugfs entries should generally be ignored.
+ */
+int intel_crtc_debugfs_add(struct drm_crtc *crtc)
+{
+ if (!crtc->debugfs_entry)
+ return -ENODEV;
+
+ crtc_updates_add(crtc);
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.h b/drivers/gpu/drm/i915/display/intel_display_debugfs.h
index c922c1745bfe..557901f3eb90 100644
--- a/drivers/gpu/drm/i915/display/intel_display_debugfs.h
+++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.h
@@ -7,14 +7,17 @@
#define __INTEL_DISPLAY_DEBUGFS_H__
struct drm_connector;
+struct drm_crtc;
struct drm_i915_private;
#ifdef CONFIG_DEBUG_FS
void intel_display_debugfs_register(struct drm_i915_private *i915);
int intel_connector_debugfs_add(struct drm_connector *connector);
+int intel_crtc_debugfs_add(struct drm_crtc *crtc);
#else
static inline void intel_display_debugfs_register(struct drm_i915_private *i915) {}
static inline int intel_connector_debugfs_add(struct drm_connector *connector) { return 0; }
+static inline int intel_crtc_debugfs_add(struct drm_crtc *crtc) { return 0; }
#endif
#endif /* __INTEL_DISPLAY_DEBUGFS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c
index fe2d90bba536..c11c37c65d86 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power.c
@@ -4,7 +4,6 @@
*/
#include "display/intel_crt.h"
-#include "display/intel_dp.h"
#include "i915_drv.h"
#include "i915_irq.h"
@@ -16,6 +15,7 @@
#include "intel_dpio_phy.h"
#include "intel_hotplug.h"
#include "intel_pm.h"
+#include "intel_pps.h"
#include "intel_sideband.h"
#include "intel_tc.h"
#include "intel_vga.h"
@@ -936,7 +936,7 @@ static void bxt_enable_dc9(struct drm_i915_private *dev_priv)
* because PPS registers are always on.
*/
if (!HAS_PCH_SPLIT(dev_priv))
- intel_power_sequencer_reset(dev_priv);
+ intel_pps_reset_all(dev_priv);
gen9_set_dc_state(dev_priv, DC_STATE_EN_DC9);
}
@@ -1446,7 +1446,7 @@ static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv)
/* make sure we're done processing display irqs */
intel_synchronize_irq(dev_priv);
- intel_power_sequencer_reset(dev_priv);
+ intel_pps_reset_all(dev_priv);
/* Prevent us from re-enabling polling on accident in late suspend */
if (!dev_priv->drm.dev->power.is_suspended)
@@ -2184,26 +2184,6 @@ static void __intel_display_power_put(struct drm_i915_private *dev_priv,
mutex_unlock(&power_domains->lock);
}
-/**
- * intel_display_power_put_unchecked - release an unchecked power domain reference
- * @dev_priv: i915 device instance
- * @domain: power domain to reference
- *
- * This function drops the power domain reference obtained by
- * intel_display_power_get() and might power down the corresponding hardware
- * block right away if this is the last reference.
- *
- * This function exists only for historical reasons and should be avoided in
- * new code, as the correctness of its use cannot be checked. Always use
- * intel_display_power_put() instead.
- */
-void intel_display_power_put_unchecked(struct drm_i915_private *dev_priv,
- enum intel_display_power_domain domain)
-{
- __intel_display_power_put(dev_priv, domain);
- intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm);
-}
-
static void
queue_async_put_domains_work(struct i915_power_domains *power_domains,
intel_wakeref_t wakeref)
@@ -2410,8 +2390,85 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
__intel_display_power_put(dev_priv, domain);
intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
}
+#else
+/**
+ * intel_display_power_put_unchecked - release an unchecked power domain reference
+ * @dev_priv: i915 device instance
+ * @domain: power domain to reference
+ *
+ * This function drops the power domain reference obtained by
+ * intel_display_power_get() and might power down the corresponding hardware
+ * block right away if this is the last reference.
+ *
+ * This function is only for the power domain code's internal use to suppress wakeref
+ * tracking when the correspondig debug kconfig option is disabled, should not
+ * be used otherwise.
+ */
+void intel_display_power_put_unchecked(struct drm_i915_private *dev_priv,
+ enum intel_display_power_domain domain)
+{
+ __intel_display_power_put(dev_priv, domain);
+ intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm);
+}
#endif
+void
+intel_display_power_get_in_set(struct drm_i915_private *i915,
+ struct intel_display_power_domain_set *power_domain_set,
+ enum intel_display_power_domain domain)
+{
+ intel_wakeref_t __maybe_unused wf;
+
+ drm_WARN_ON(&i915->drm, power_domain_set->mask & BIT_ULL(domain));
+
+ wf = intel_display_power_get(i915, domain);
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
+ power_domain_set->wakerefs[domain] = wf;
+#endif
+ power_domain_set->mask |= BIT_ULL(domain);
+}
+
+bool
+intel_display_power_get_in_set_if_enabled(struct drm_i915_private *i915,
+ struct intel_display_power_domain_set *power_domain_set,
+ enum intel_display_power_domain domain)
+{
+ intel_wakeref_t wf;
+
+ drm_WARN_ON(&i915->drm, power_domain_set->mask & BIT_ULL(domain));
+
+ wf = intel_display_power_get_if_enabled(i915, domain);
+ if (!wf)
+ return false;
+
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
+ power_domain_set->wakerefs[domain] = wf;
+#endif
+ power_domain_set->mask |= BIT_ULL(domain);
+
+ return true;
+}
+
+void
+intel_display_power_put_mask_in_set(struct drm_i915_private *i915,
+ struct intel_display_power_domain_set *power_domain_set,
+ u64 mask)
+{
+ enum intel_display_power_domain domain;
+
+ drm_WARN_ON(&i915->drm, mask & ~power_domain_set->mask);
+
+ for_each_power_domain(domain, mask) {
+ intel_wakeref_t __maybe_unused wf = -1;
+
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
+ wf = fetch_and_zero(&power_domain_set->wakerefs[domain]);
+#endif
+ intel_display_power_put(i915, domain, wf);
+ power_domain_set->mask &= ~BIT_ULL(domain);
+ }
+}
+
#define I830_PIPES_POWER_DOMAINS ( \
BIT_ULL(POWER_DOMAIN_PIPE_A) | \
BIT_ULL(POWER_DOMAIN_PIPE_B) | \
@@ -5601,12 +5658,16 @@ void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume)
* resources powered until display HW readout is complete. We drop
* this reference in intel_power_domains_enable().
*/
- power_domains->wakeref =
+ drm_WARN_ON(&i915->drm, power_domains->init_wakeref);
+ power_domains->init_wakeref =
intel_display_power_get(i915, POWER_DOMAIN_INIT);
/* Disable power support if the user asked so. */
- if (!i915->params.disable_power_well)
- intel_display_power_get(i915, POWER_DOMAIN_INIT);
+ if (!i915->params.disable_power_well) {
+ drm_WARN_ON(&i915->drm, power_domains->disable_wakeref);
+ i915->power_domains.disable_wakeref = intel_display_power_get(i915,
+ POWER_DOMAIN_INIT);
+ }
intel_power_domains_sync_hw(i915);
power_domains->initializing = false;
@@ -5626,11 +5687,12 @@ void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume)
void intel_power_domains_driver_remove(struct drm_i915_private *i915)
{
intel_wakeref_t wakeref __maybe_unused =
- fetch_and_zero(&i915->power_domains.wakeref);
+ fetch_and_zero(&i915->power_domains.init_wakeref);
/* Remove the refcount we took to keep power well support disabled. */
if (!i915->params.disable_power_well)
- intel_display_power_put_unchecked(i915, POWER_DOMAIN_INIT);
+ intel_display_power_put(i915, POWER_DOMAIN_INIT,
+ fetch_and_zero(&i915->power_domains.disable_wakeref));
intel_display_power_flush_work_sync(i915);
@@ -5655,7 +5717,7 @@ void intel_power_domains_driver_remove(struct drm_i915_private *i915)
void intel_power_domains_enable(struct drm_i915_private *i915)
{
intel_wakeref_t wakeref __maybe_unused =
- fetch_and_zero(&i915->power_domains.wakeref);
+ fetch_and_zero(&i915->power_domains.init_wakeref);
intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref);
intel_power_domains_verify_state(i915);
@@ -5672,8 +5734,8 @@ void intel_power_domains_disable(struct drm_i915_private *i915)
{
struct i915_power_domains *power_domains = &i915->power_domains;
- drm_WARN_ON(&i915->drm, power_domains->wakeref);
- power_domains->wakeref =
+ drm_WARN_ON(&i915->drm, power_domains->init_wakeref);
+ power_domains->init_wakeref =
intel_display_power_get(i915, POWER_DOMAIN_INIT);
intel_power_domains_verify_state(i915);
@@ -5695,7 +5757,7 @@ void intel_power_domains_suspend(struct drm_i915_private *i915,
{
struct i915_power_domains *power_domains = &i915->power_domains;
intel_wakeref_t wakeref __maybe_unused =
- fetch_and_zero(&power_domains->wakeref);
+ fetch_and_zero(&power_domains->init_wakeref);
intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref);
@@ -5719,7 +5781,8 @@ void intel_power_domains_suspend(struct drm_i915_private *i915,
* power wells if power domains must be deinitialized for suspend.
*/
if (!i915->params.disable_power_well)
- intel_display_power_put_unchecked(i915, POWER_DOMAIN_INIT);
+ intel_display_power_put(i915, POWER_DOMAIN_INIT,
+ fetch_and_zero(&i915->power_domains.disable_wakeref));
intel_display_power_flush_work(i915);
intel_power_domains_verify_state(i915);
@@ -5754,8 +5817,8 @@ void intel_power_domains_resume(struct drm_i915_private *i915)
intel_power_domains_init_hw(i915, true);
power_domains->display_core_suspended = false;
} else {
- drm_WARN_ON(&i915->drm, power_domains->wakeref);
- power_domains->wakeref =
+ drm_WARN_ON(&i915->drm, power_domains->init_wakeref);
+ power_domains->init_wakeref =
intel_display_power_get(i915, POWER_DOMAIN_INIT);
}
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.h b/drivers/gpu/drm/i915/display/intel_display_power.h
index 4aa0a09cf14f..bc30c479be53 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.h
+++ b/drivers/gpu/drm/i915/display/intel_display_power.h
@@ -212,7 +212,8 @@ struct i915_power_domains {
bool display_core_suspended;
int power_well_count;
- intel_wakeref_t wakeref;
+ intel_wakeref_t init_wakeref;
+ intel_wakeref_t disable_wakeref;
struct mutex lock;
int domain_use_count[POWER_DOMAIN_NUM];
@@ -224,6 +225,13 @@ struct i915_power_domains {
struct i915_power_well *power_wells;
};
+struct intel_display_power_domain_set {
+ u64 mask;
+#ifdef CONFIG_DRM_I915_DEBUG_RUNTIME_PM
+ intel_wakeref_t wakerefs[POWER_DOMAIN_NUM];
+#endif
+};
+
#define for_each_power_domain(domain, mask) \
for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++) \
for_each_if(BIT_ULL(domain) & (mask))
@@ -279,8 +287,6 @@ intel_wakeref_t intel_display_power_get(struct drm_i915_private *dev_priv,
intel_wakeref_t
intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain);
-void intel_display_power_put_unchecked(struct drm_i915_private *dev_priv,
- enum intel_display_power_domain domain);
void __intel_display_power_put_async(struct drm_i915_private *i915,
enum intel_display_power_domain domain,
intel_wakeref_t wakeref);
@@ -297,6 +303,9 @@ intel_display_power_put_async(struct drm_i915_private *i915,
__intel_display_power_put_async(i915, domain, wakeref);
}
#else
+void intel_display_power_put_unchecked(struct drm_i915_private *dev_priv,
+ enum intel_display_power_domain domain);
+
static inline void
intel_display_power_put(struct drm_i915_private *i915,
enum intel_display_power_domain domain,
@@ -314,6 +323,28 @@ intel_display_power_put_async(struct drm_i915_private *i915,
}
#endif
+void
+intel_display_power_get_in_set(struct drm_i915_private *i915,
+ struct intel_display_power_domain_set *power_domain_set,
+ enum intel_display_power_domain domain);
+
+bool
+intel_display_power_get_in_set_if_enabled(struct drm_i915_private *i915,
+ struct intel_display_power_domain_set *power_domain_set,
+ enum intel_display_power_domain domain);
+
+void
+intel_display_power_put_mask_in_set(struct drm_i915_private *i915,
+ struct intel_display_power_domain_set *power_domain_set,
+ u64 mask);
+
+static inline void
+intel_display_power_put_all_in_set(struct drm_i915_private *i915,
+ struct intel_display_power_domain_set *power_domain_set)
+{
+ intel_display_power_put_mask_in_set(i915, power_domain_set, power_domain_set->mask);
+}
+
enum dbuf_slice {
DBUF_S1,
DBUF_S2,
diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h
index ce82d654d0f2..39397748b4b0 100644
--- a/drivers/gpu/drm/i915/display/intel_display_types.h
+++ b/drivers/gpu/drm/i915/display/intel_display_types.h
@@ -225,6 +225,17 @@ struct intel_encoder {
const struct drm_connector *audio_connector;
};
+struct intel_panel_bl_funcs {
+ /* Connector and platform specific backlight functions */
+ int (*setup)(struct intel_connector *connector, enum pipe pipe);
+ u32 (*get)(struct intel_connector *connector, enum pipe pipe);
+ void (*set)(const struct drm_connector_state *conn_state, u32 level);
+ void (*disable)(const struct drm_connector_state *conn_state, u32 level);
+ void (*enable)(const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state, u32 level);
+ u32 (*hz_to_pwm)(struct intel_connector *connector, u32 hz);
+};
+
struct intel_panel {
struct drm_display_mode *fixed_mode;
struct drm_display_mode *downclock_mode;
@@ -241,24 +252,28 @@ struct intel_panel {
bool alternate_pwm_increment; /* lpt+ */
/* PWM chip */
+ u32 pwm_level_min;
+ u32 pwm_level_max;
+ bool pwm_enabled;
bool util_pin_active_low; /* bxt+ */
u8 controller; /* bxt+ only */
struct pwm_device *pwm;
struct pwm_state pwm_state;
/* DPCD backlight */
- u8 pwmgen_bit_count;
+ union {
+ struct {
+ u8 pwmgen_bit_count;
+ } vesa;
+ struct {
+ bool sdr_uses_aux;
+ } intel;
+ } edp;
struct backlight_device *device;
- /* Connector and platform specific backlight functions */
- int (*setup)(struct intel_connector *connector, enum pipe pipe);
- u32 (*get)(struct intel_connector *connector);
- void (*set)(const struct drm_connector_state *conn_state, u32 level);
- void (*disable)(const struct drm_connector_state *conn_state);
- void (*enable)(const struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state);
- u32 (*hz_to_pwm)(struct intel_connector *connector, u32 hz);
+ const struct intel_panel_bl_funcs *funcs;
+ const struct intel_panel_bl_funcs *pwm_funcs;
void (*power)(struct intel_connector *, bool enable);
} backlight;
};
@@ -339,6 +354,10 @@ struct intel_hdcp_shim {
enum transcoder cpu_transcoder,
bool enable);
+ /* Enable/Disable stream encryption on DP MST Transport Link */
+ int (*stream_encryption)(struct intel_connector *connector,
+ bool enable);
+
/* Ensures the link is still protected */
bool (*check_link)(struct intel_digital_port *dig_port,
struct intel_connector *connector);
@@ -370,8 +389,13 @@ struct intel_hdcp_shim {
int (*config_stream_type)(struct intel_digital_port *dig_port,
bool is_repeater, u8 type);
+ /* Enable/Disable HDCP 2.2 stream encryption on DP MST Transport Link */
+ int (*stream_2_2_encryption)(struct intel_connector *connector,
+ bool enable);
+
/* HDCP2.2 Link Integrity Check */
- int (*check_2_2_link)(struct intel_digital_port *dig_port);
+ int (*check_2_2_link)(struct intel_digital_port *dig_port,
+ struct intel_connector *connector);
};
struct intel_hdcp {
@@ -398,7 +422,6 @@ struct intel_hdcp {
* content can flow only through a link protected by HDCP2.2.
*/
u8 content_type;
- struct hdcp_port_data port_data;
bool is_paired;
bool is_repeater;
@@ -432,6 +455,8 @@ struct intel_hdcp {
* Hence caching the transcoder here.
*/
enum transcoder cpu_transcoder;
+ /* Only used for DP MST stream encryption */
+ enum transcoder stream_transcoder;
};
struct intel_connector {
@@ -531,7 +556,7 @@ struct intel_plane_state {
struct drm_framebuffer *fb;
u16 alpha;
- uint16_t pixel_blend_mode;
+ u16 pixel_blend_mode;
unsigned int rotation;
enum drm_color_encoding color_encoding;
enum drm_color_range color_range;
@@ -604,6 +629,11 @@ struct intel_plane_state {
u32 planar_slave;
struct drm_intel_sprite_colorkey ckey;
+
+ struct drm_rect psr2_sel_fetch_area;
+
+ /* Clear Color Value */
+ u64 ccval;
};
struct intel_initial_plane_config {
@@ -663,6 +693,8 @@ struct intel_crtc_scaler_state {
#define I915_MODE_FLAG_DSI_USE_TE1 (1<<4)
/* Flag to indicate mipi dsi periodic command mode where we do not get TE */
#define I915_MODE_FLAG_DSI_PERIODIC_CMD_MODE (1<<5)
+/* Do tricks to make vblank timestamps sane with VRR? */
+#define I915_MODE_FLAG_VRR (1<<6)
struct intel_wm_level {
bool enable;
@@ -1047,7 +1079,10 @@ struct intel_crtc_state {
u32 cgm_mode;
};
- /* bitmask of visible planes (enum plane_id) */
+ /* bitmask of logically enabled planes (enum plane_id) */
+ u8 enabled_planes;
+
+ /* bitmask of actually visible planes (enum plane_id) */
u8 active_planes;
u8 nv12_planes;
u8 c8_planes;
@@ -1118,6 +1153,13 @@ struct intel_crtc_state {
struct intel_dsb *dsb;
u32 psr2_man_track_ctl;
+
+ /* Variable Refresh Rate state */
+ struct {
+ bool enable;
+ u8 pipeline_full;
+ u16 flipline, vmin, vmax;
+ } vrr;
};
enum intel_pipe_crc_source {
@@ -1160,7 +1202,9 @@ struct intel_crtc {
/* I915_MODE_FLAG_* */
u8 mode_flags;
- unsigned long long enabled_power_domains;
+ u16 vmax_vblank_start;
+
+ struct intel_display_power_domain_set enabled_power_domains;
struct intel_overlay *overlay;
struct intel_crtc_state *config;
@@ -1186,6 +1230,15 @@ struct intel_crtc {
ktime_t start_vbl_time;
int min_vbl, max_vbl;
int scanline_start;
+#ifdef CONFIG_DRM_I915_DEBUG_VBLANK_EVADE
+ struct {
+ u64 min;
+ u64 max;
+ u64 sum;
+ unsigned int over;
+ unsigned int times[17]; /* [1us, 16ms] */
+ } vbl;
+#endif
} debug;
/* scalers available on this crtc */
@@ -1203,6 +1256,7 @@ struct intel_plane {
enum pipe pipe;
bool has_fbc;
bool has_ccs;
+ bool need_async_flip_disable_wa;
u32 frontbuffer_bit;
struct {
@@ -1239,7 +1293,10 @@ struct intel_plane {
const struct intel_plane_state *plane_state);
void (*async_flip)(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state);
+ const struct intel_plane_state *plane_state,
+ bool async_flip);
+ void (*enable_flip_done)(struct intel_plane *plane);
+ void (*disable_flip_done)(struct intel_plane *plane);
};
struct intel_watermark_params {
@@ -1321,6 +1378,43 @@ struct intel_dp_compliance {
u8 test_lane_count;
};
+struct intel_dp_pcon_frl {
+ bool is_trained;
+ int trained_rate_gbps;
+};
+
+struct intel_pps {
+ int panel_power_up_delay;
+ int panel_power_down_delay;
+ int panel_power_cycle_delay;
+ int backlight_on_delay;
+ int backlight_off_delay;
+ struct delayed_work panel_vdd_work;
+ bool want_panel_vdd;
+ unsigned long last_power_on;
+ unsigned long last_backlight_off;
+ ktime_t panel_power_off_time;
+ intel_wakeref_t vdd_wakeref;
+
+ /*
+ * Pipe whose power sequencer is currently locked into
+ * this port. Only relevant on VLV/CHV.
+ */
+ enum pipe pps_pipe;
+ /*
+ * Pipe currently driving the port. Used for preventing
+ * the use of the PPS for any pipe currentrly driving
+ * external DP as that will mess things up on VLV.
+ */
+ enum pipe active_pipe;
+ /*
+ * Set if the sequencer may be reset due to a power transition,
+ * requiring a reinitialization. Only relevant on BXT.
+ */
+ bool pps_reset;
+ struct edp_power_seq pps_delays;
+};
+
struct intel_dp {
i915_reg_t output_reg;
u32 DP;
@@ -1331,6 +1425,7 @@ struct intel_dp {
bool has_hdmi_sink;
bool has_audio;
bool reset_link_params;
+ bool use_max_params;
u8 dpcd[DP_RECEIVER_CAP_SIZE];
u8 psr_dpcd[EDP_PSR_RECEIVER_CAP_SIZE];
u8 downstream_ports[DP_MAX_DOWNSTREAM_PORTS];
@@ -1339,6 +1434,7 @@ struct intel_dp {
u8 lttpr_common_caps[DP_LTTPR_COMMON_CAP_SIZE];
u8 lttpr_phy_caps[DP_MAX_LTTPR_COUNT][DP_LTTPR_PHY_CAP_SIZE];
u8 fec_capable;
+ u8 pcon_dsc_dpcd[DP_PCON_DSC_ENCODER_CAP_SIZE];
/* source rates */
int num_source_rates;
const int *source_rates;
@@ -1355,38 +1451,11 @@ struct intel_dp {
int max_link_rate;
/* sink or branch descriptor */
struct drm_dp_desc desc;
- u32 edid_quirks;
struct drm_dp_aux aux;
u32 aux_busy_last_status;
u8 train_set[4];
- int panel_power_up_delay;
- int panel_power_down_delay;
- int panel_power_cycle_delay;
- int backlight_on_delay;
- int backlight_off_delay;
- struct delayed_work panel_vdd_work;
- bool want_panel_vdd;
- unsigned long last_power_on;
- unsigned long last_backlight_off;
- ktime_t panel_power_off_time;
- /*
- * Pipe whose power sequencer is currently locked into
- * this port. Only relevant on VLV/CHV.
- */
- enum pipe pps_pipe;
- /*
- * Pipe currently driving the port. Used for preventing
- * the use of the PPS for any pipe currentrly driving
- * external DP as that will mess things up on VLV.
- */
- enum pipe active_pipe;
- /*
- * Set if the sequencer may be reset due to a power transition,
- * requiring a reinitialization. Only relevant on BXT.
- */
- bool pps_reset;
- struct edp_power_seq pps_delays;
+ struct intel_pps pps;
bool can_mst; /* this port supports mst */
bool is_mst;
@@ -1432,15 +1501,22 @@ struct intel_dp {
struct {
int min_tmds_clock, max_tmds_clock;
int max_dotclock;
+ int pcon_max_frl_bw;
u8 max_bpc;
bool ycbcr_444_to_420;
+ bool rgb_to_ycbcr;
} dfp;
+ /* To control wakeup latency, e.g. for irq-driven dp aux transfers. */
+ struct pm_qos_request pm_qos;
+
/* Display stream compression testing */
bool force_dsc_en;
bool hobl_failed;
bool hobl_active;
+
+ struct intel_dp_pcon_frl frl;
};
enum lspcon_vendor {
@@ -1450,6 +1526,7 @@ enum lspcon_vendor {
struct intel_lspcon {
bool active;
+ bool hdr_supported;
enum drm_lspcon_mode mode;
enum lspcon_vendor vendor;
};
@@ -1466,6 +1543,8 @@ struct intel_digital_port {
/* Used for DP and ICL+ TypeC/DP and TypeC/HDMI ports. */
enum aux_ch aux_ch;
enum intel_display_power_domain ddi_io_power_domain;
+ intel_wakeref_t ddi_io_wakeref;
+ intel_wakeref_t aux_wakeref;
struct mutex tc_lock; /* protects the TypeC port mode */
intel_wakeref_t tc_lock_wakeref;
int tc_link_refcount;
@@ -1475,10 +1554,14 @@ struct intel_digital_port {
enum phy_fia tc_phy_fia;
u8 tc_phy_fia_idx;
- /* protects num_hdcp_streams reference count */
+ /* protects num_hdcp_streams reference count, hdcp_port_data and hdcp_auth_status */
struct mutex hdcp_mutex;
/* the number of pipes using HDCP signalling out of this port */
unsigned int num_hdcp_streams;
+ /* port HDCP auth status */
+ bool hdcp_auth_status;
+ /* HDCP port data need to pass to security f/w */
+ struct hdcp_port_data hdcp_port_data;
void (*write_infoframe)(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
@@ -1755,6 +1838,12 @@ intel_crtc_has_dp_encoder(const struct intel_crtc_state *crtc_state)
(1 << INTEL_OUTPUT_EDP));
}
+static inline bool
+intel_crtc_needs_modeset(const struct intel_crtc_state *crtc_state)
+{
+ return drm_atomic_crtc_needs_modeset(&crtc_state->uapi);
+}
+
static inline void
intel_wait_for_vblank(struct drm_i915_private *dev_priv, enum pipe pipe)
{
@@ -1777,4 +1866,32 @@ static inline u32 intel_plane_ggtt_offset(const struct intel_plane_state *state)
return i915_ggtt_offset(state->vma);
}
+static inline struct intel_frontbuffer *
+to_intel_frontbuffer(struct drm_framebuffer *fb)
+{
+ return fb ? to_intel_framebuffer(fb)->frontbuffer : NULL;
+}
+
+static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
+{
+ if (dev_priv->params.panel_use_ssc >= 0)
+ return dev_priv->params.panel_use_ssc != 0;
+ return dev_priv->vbt.lvds_use_ssc
+ && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
+}
+
+static inline u32 i9xx_dpll_compute_fp(struct dpll *dpll)
+{
+ return dpll->n << 16 | dpll->m1 << 8 | dpll->m2;
+}
+
+static inline u32 intel_fdi_link_freq(struct drm_i915_private *dev_priv,
+ const struct intel_crtc_state *pipe_config)
+{
+ if (HAS_DDI(dev_priv))
+ return pipe_config->port_clock; /* SPLL */
+ else
+ return dev_priv->fdi_pll_freq;
+}
+
#endif /* __INTEL_DISPLAY_TYPES_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index 2165398d2c7c..8c12d5375607 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -41,13 +41,13 @@
#include "i915_debugfs.h"
#include "i915_drv.h"
-#include "i915_trace.h"
#include "intel_atomic.h"
#include "intel_audio.h"
#include "intel_connector.h"
#include "intel_ddi.h"
#include "intel_display_types.h"
#include "intel_dp.h"
+#include "intel_dp_aux.h"
#include "intel_dp_link_training.h"
#include "intel_dp_mst.h"
#include "intel_dpio_phy.h"
@@ -58,10 +58,12 @@
#include "intel_lspcon.h"
#include "intel_lvds.h"
#include "intel_panel.h"
+#include "intel_pps.h"
#include "intel_psr.h"
#include "intel_sideband.h"
#include "intel_tc.h"
#include "intel_vdsc.h"
+#include "intel_vrr.h"
#define DP_DPRX_ESI_LEN 14
@@ -121,6 +123,11 @@ static const struct dp_link_dpll chv_dpll[] = {
{ .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
};
+const struct dpll *vlv_get_dpll(struct drm_i915_private *i915)
+{
+ return IS_CHERRYVIEW(i915) ? &chv_dpll[0].dpll : &vlv_dpll[0].dpll;
+}
+
/* Constants for DP DSC configurations */
static const u8 valid_dsc_bpp[] = {6, 8, 10, 12, 15};
@@ -145,12 +152,6 @@ bool intel_dp_is_edp(struct intel_dp *intel_dp)
static void intel_dp_link_down(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state);
-static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
-static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
-static void vlv_init_panel_power_sequencer(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state);
-static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
- enum pipe pipe);
static void intel_dp_unset_edid(struct intel_dp *intel_dp);
/* update sink rates from dpcd */
@@ -162,8 +163,7 @@ static void intel_dp_set_sink_rates(struct intel_dp *intel_dp)
int i, max_rate;
int max_lttpr_rate;
- if (drm_dp_has_quirk(&intel_dp->desc, 0,
- DP_DPCD_QUIRK_CAN_DO_MAX_LINK_RATE_3_24_GBPS)) {
+ if (drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_CAN_DO_MAX_LINK_RATE_3_24_GBPS)) {
/* Needed, e.g., for Apple MBP 2017, 15 inch eDP Retina panel */
static const int quirk_rates[] = { 162000, 270000, 324000 };
@@ -480,6 +480,13 @@ int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp,
return -1;
}
+ if (intel_dp_is_edp(intel_dp) && !intel_dp->use_max_params) {
+ drm_dbg_kms(&i915->drm,
+ "Retrying Link training for eDP with max parameters\n");
+ intel_dp->use_max_params = true;
+ return 0;
+ }
+
index = intel_dp_rate_index(intel_dp->common_rates,
intel_dp->num_common_rates,
link_rate);
@@ -651,6 +658,10 @@ intel_dp_output_format(struct drm_connector *connector,
!drm_mode_is_420_only(info, mode))
return INTEL_OUTPUT_FORMAT_RGB;
+ if (intel_dp->dfp.rgb_to_ycbcr &&
+ intel_dp->dfp.ycbcr_444_to_420)
+ return INTEL_OUTPUT_FORMAT_RGB;
+
if (intel_dp->dfp.ycbcr_444_to_420)
return INTEL_OUTPUT_FORMAT_YCBCR444;
else
@@ -716,6 +727,25 @@ intel_dp_mode_valid_downstream(struct intel_connector *connector,
const struct drm_display_info *info = &connector->base.display_info;
int tmds_clock;
+ /* If PCON supports FRL MODE, check FRL bandwidth constraints */
+ if (intel_dp->dfp.pcon_max_frl_bw) {
+ int target_bw;
+ int max_frl_bw;
+ int bpp = intel_dp_mode_min_output_bpp(&connector->base, mode);
+
+ target_bw = bpp * target_clock;
+
+ max_frl_bw = intel_dp->dfp.pcon_max_frl_bw;
+
+ /* converting bw from Gbps to Kbps*/
+ max_frl_bw = max_frl_bw * 1000000;
+
+ if (target_bw > max_frl_bw)
+ return MODE_CLOCK_HIGH;
+
+ return MODE_OK;
+ }
+
if (intel_dp->dfp.max_dotclock &&
target_clock > intel_dp->dfp.max_dotclock)
return MODE_CLOCK_HIGH;
@@ -833,1125 +863,6 @@ intel_dp_mode_valid(struct drm_connector *connector,
return intel_mode_valid_max_plane_size(dev_priv, mode, bigjoiner);
}
-u32 intel_dp_pack_aux(const u8 *src, int src_bytes)
-{
- int i;
- u32 v = 0;
-
- if (src_bytes > 4)
- src_bytes = 4;
- for (i = 0; i < src_bytes; i++)
- v |= ((u32)src[i]) << ((3 - i) * 8);
- return v;
-}
-
-static void intel_dp_unpack_aux(u32 src, u8 *dst, int dst_bytes)
-{
- int i;
- if (dst_bytes > 4)
- dst_bytes = 4;
- for (i = 0; i < dst_bytes; i++)
- dst[i] = src >> ((3-i) * 8);
-}
-
-static void
-intel_dp_init_panel_power_sequencer(struct intel_dp *intel_dp);
-static void
-intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp,
- bool force_disable_vdd);
-static void
-intel_dp_pps_init(struct intel_dp *intel_dp);
-
-static intel_wakeref_t
-pps_lock(struct intel_dp *intel_dp)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- intel_wakeref_t wakeref;
-
- /*
- * See intel_power_sequencer_reset() why we need
- * a power domain reference here.
- */
- wakeref = intel_display_power_get(dev_priv,
- intel_aux_power_domain(dp_to_dig_port(intel_dp)));
-
- mutex_lock(&dev_priv->pps_mutex);
-
- return wakeref;
-}
-
-static intel_wakeref_t
-pps_unlock(struct intel_dp *intel_dp, intel_wakeref_t wakeref)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-
- mutex_unlock(&dev_priv->pps_mutex);
- intel_display_power_put(dev_priv,
- intel_aux_power_domain(dp_to_dig_port(intel_dp)),
- wakeref);
- return 0;
-}
-
-#define with_pps_lock(dp, wf) \
- for ((wf) = pps_lock(dp); (wf); (wf) = pps_unlock((dp), (wf)))
-
-static void
-vlv_power_sequencer_kick(struct intel_dp *intel_dp)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- enum pipe pipe = intel_dp->pps_pipe;
- bool pll_enabled, release_cl_override = false;
- enum dpio_phy phy = DPIO_PHY(pipe);
- enum dpio_channel ch = vlv_pipe_to_channel(pipe);
- u32 DP;
-
- if (drm_WARN(&dev_priv->drm,
- intel_de_read(dev_priv, intel_dp->output_reg) & DP_PORT_EN,
- "skipping pipe %c power sequencer kick due to [ENCODER:%d:%s] being active\n",
- pipe_name(pipe), dig_port->base.base.base.id,
- dig_port->base.base.name))
- return;
-
- drm_dbg_kms(&dev_priv->drm,
- "kicking pipe %c power sequencer for [ENCODER:%d:%s]\n",
- pipe_name(pipe), dig_port->base.base.base.id,
- dig_port->base.base.name);
-
- /* Preserve the BIOS-computed detected bit. This is
- * supposed to be read-only.
- */
- DP = intel_de_read(dev_priv, intel_dp->output_reg) & DP_DETECTED;
- DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
- DP |= DP_PORT_WIDTH(1);
- DP |= DP_LINK_TRAIN_PAT_1;
-
- if (IS_CHERRYVIEW(dev_priv))
- DP |= DP_PIPE_SEL_CHV(pipe);
- else
- DP |= DP_PIPE_SEL(pipe);
-
- pll_enabled = intel_de_read(dev_priv, DPLL(pipe)) & DPLL_VCO_ENABLE;
-
- /*
- * The DPLL for the pipe must be enabled for this to work.
- * So enable temporarily it if it's not already enabled.
- */
- if (!pll_enabled) {
- release_cl_override = IS_CHERRYVIEW(dev_priv) &&
- !chv_phy_powergate_ch(dev_priv, phy, ch, true);
-
- if (vlv_force_pll_on(dev_priv, pipe, IS_CHERRYVIEW(dev_priv) ?
- &chv_dpll[0].dpll : &vlv_dpll[0].dpll)) {
- drm_err(&dev_priv->drm,
- "Failed to force on pll for pipe %c!\n",
- pipe_name(pipe));
- return;
- }
- }
-
- /*
- * Similar magic as in intel_dp_enable_port().
- * We _must_ do this port enable + disable trick
- * to make this power sequencer lock onto the port.
- * Otherwise even VDD force bit won't work.
- */
- intel_de_write(dev_priv, intel_dp->output_reg, DP);
- intel_de_posting_read(dev_priv, intel_dp->output_reg);
-
- intel_de_write(dev_priv, intel_dp->output_reg, DP | DP_PORT_EN);
- intel_de_posting_read(dev_priv, intel_dp->output_reg);
-
- intel_de_write(dev_priv, intel_dp->output_reg, DP & ~DP_PORT_EN);
- intel_de_posting_read(dev_priv, intel_dp->output_reg);
-
- if (!pll_enabled) {
- vlv_force_pll_off(dev_priv, pipe);
-
- if (release_cl_override)
- chv_phy_powergate_ch(dev_priv, phy, ch, false);
- }
-}
-
-static enum pipe vlv_find_free_pps(struct drm_i915_private *dev_priv)
-{
- struct intel_encoder *encoder;
- unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
-
- /*
- * We don't have power sequencer currently.
- * Pick one that's not used by other ports.
- */
- for_each_intel_dp(&dev_priv->drm, encoder) {
- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
-
- if (encoder->type == INTEL_OUTPUT_EDP) {
- drm_WARN_ON(&dev_priv->drm,
- intel_dp->active_pipe != INVALID_PIPE &&
- intel_dp->active_pipe !=
- intel_dp->pps_pipe);
-
- if (intel_dp->pps_pipe != INVALID_PIPE)
- pipes &= ~(1 << intel_dp->pps_pipe);
- } else {
- drm_WARN_ON(&dev_priv->drm,
- intel_dp->pps_pipe != INVALID_PIPE);
-
- if (intel_dp->active_pipe != INVALID_PIPE)
- pipes &= ~(1 << intel_dp->active_pipe);
- }
- }
-
- if (pipes == 0)
- return INVALID_PIPE;
-
- return ffs(pipes) - 1;
-}
-
-static enum pipe
-vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- enum pipe pipe;
-
- lockdep_assert_held(&dev_priv->pps_mutex);
-
- /* We should never land here with regular DP ports */
- drm_WARN_ON(&dev_priv->drm, !intel_dp_is_edp(intel_dp));
-
- drm_WARN_ON(&dev_priv->drm, intel_dp->active_pipe != INVALID_PIPE &&
- intel_dp->active_pipe != intel_dp->pps_pipe);
-
- if (intel_dp->pps_pipe != INVALID_PIPE)
- return intel_dp->pps_pipe;
-
- pipe = vlv_find_free_pps(dev_priv);
-
- /*
- * Didn't find one. This should not happen since there
- * are two power sequencers and up to two eDP ports.
- */
- if (drm_WARN_ON(&dev_priv->drm, pipe == INVALID_PIPE))
- pipe = PIPE_A;
-
- vlv_steal_power_sequencer(dev_priv, pipe);
- intel_dp->pps_pipe = pipe;
-
- drm_dbg_kms(&dev_priv->drm,
- "picked pipe %c power sequencer for [ENCODER:%d:%s]\n",
- pipe_name(intel_dp->pps_pipe),
- dig_port->base.base.base.id,
- dig_port->base.base.name);
-
- /* init power sequencer on this pipe and port */
- intel_dp_init_panel_power_sequencer(intel_dp);
- intel_dp_init_panel_power_sequencer_registers(intel_dp, true);
-
- /*
- * Even vdd force doesn't work until we've made
- * the power sequencer lock in on the port.
- */
- vlv_power_sequencer_kick(intel_dp);
-
- return intel_dp->pps_pipe;
-}
-
-static int
-bxt_power_sequencer_idx(struct intel_dp *intel_dp)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- int backlight_controller = dev_priv->vbt.backlight.controller;
-
- lockdep_assert_held(&dev_priv->pps_mutex);
-
- /* We should never land here with regular DP ports */
- drm_WARN_ON(&dev_priv->drm, !intel_dp_is_edp(intel_dp));
-
- if (!intel_dp->pps_reset)
- return backlight_controller;
-
- intel_dp->pps_reset = false;
-
- /*
- * Only the HW needs to be reprogrammed, the SW state is fixed and
- * has been setup during connector init.
- */
- intel_dp_init_panel_power_sequencer_registers(intel_dp, false);
-
- return backlight_controller;
-}
-
-typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
- enum pipe pipe);
-
-static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
- enum pipe pipe)
-{
- return intel_de_read(dev_priv, PP_STATUS(pipe)) & PP_ON;
-}
-
-static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
- enum pipe pipe)
-{
- return intel_de_read(dev_priv, PP_CONTROL(pipe)) & EDP_FORCE_VDD;
-}
-
-static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
- enum pipe pipe)
-{
- return true;
-}
-
-static enum pipe
-vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
- enum port port,
- vlv_pipe_check pipe_check)
-{
- enum pipe pipe;
-
- for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
- u32 port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(pipe)) &
- PANEL_PORT_SELECT_MASK;
-
- if (port_sel != PANEL_PORT_SELECT_VLV(port))
- continue;
-
- if (!pipe_check(dev_priv, pipe))
- continue;
-
- return pipe;
- }
-
- return INVALID_PIPE;
-}
-
-static void
-vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- enum port port = dig_port->base.port;
-
- lockdep_assert_held(&dev_priv->pps_mutex);
-
- /* try to find a pipe with this port selected */
- /* first pick one where the panel is on */
- intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
- vlv_pipe_has_pp_on);
- /* didn't find one? pick one where vdd is on */
- if (intel_dp->pps_pipe == INVALID_PIPE)
- intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
- vlv_pipe_has_vdd_on);
- /* didn't find one? pick one with just the correct port */
- if (intel_dp->pps_pipe == INVALID_PIPE)
- intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
- vlv_pipe_any);
-
- /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
- if (intel_dp->pps_pipe == INVALID_PIPE) {
- drm_dbg_kms(&dev_priv->drm,
- "no initial power sequencer for [ENCODER:%d:%s]\n",
- dig_port->base.base.base.id,
- dig_port->base.base.name);
- return;
- }
-
- drm_dbg_kms(&dev_priv->drm,
- "initial power sequencer for [ENCODER:%d:%s]: pipe %c\n",
- dig_port->base.base.base.id,
- dig_port->base.base.name,
- pipe_name(intel_dp->pps_pipe));
-
- intel_dp_init_panel_power_sequencer(intel_dp);
- intel_dp_init_panel_power_sequencer_registers(intel_dp, false);
-}
-
-void intel_power_sequencer_reset(struct drm_i915_private *dev_priv)
-{
- struct intel_encoder *encoder;
-
- if (drm_WARN_ON(&dev_priv->drm,
- !(IS_VALLEYVIEW(dev_priv) ||
- IS_CHERRYVIEW(dev_priv) ||
- IS_GEN9_LP(dev_priv))))
- return;
-
- /*
- * We can't grab pps_mutex here due to deadlock with power_domain
- * mutex when power_domain functions are called while holding pps_mutex.
- * That also means that in order to use pps_pipe the code needs to
- * hold both a power domain reference and pps_mutex, and the power domain
- * reference get/put must be done while _not_ holding pps_mutex.
- * pps_{lock,unlock}() do these steps in the correct order, so one
- * should use them always.
- */
-
- for_each_intel_dp(&dev_priv->drm, encoder) {
- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
-
- drm_WARN_ON(&dev_priv->drm,
- intel_dp->active_pipe != INVALID_PIPE);
-
- if (encoder->type != INTEL_OUTPUT_EDP)
- continue;
-
- if (IS_GEN9_LP(dev_priv))
- intel_dp->pps_reset = true;
- else
- intel_dp->pps_pipe = INVALID_PIPE;
- }
-}
-
-struct pps_registers {
- i915_reg_t pp_ctrl;
- i915_reg_t pp_stat;
- i915_reg_t pp_on;
- i915_reg_t pp_off;
- i915_reg_t pp_div;
-};
-
-static void intel_pps_get_registers(struct intel_dp *intel_dp,
- struct pps_registers *regs)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- int pps_idx = 0;
-
- memset(regs, 0, sizeof(*regs));
-
- if (IS_GEN9_LP(dev_priv))
- pps_idx = bxt_power_sequencer_idx(intel_dp);
- else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- pps_idx = vlv_power_sequencer_pipe(intel_dp);
-
- regs->pp_ctrl = PP_CONTROL(pps_idx);
- regs->pp_stat = PP_STATUS(pps_idx);
- regs->pp_on = PP_ON_DELAYS(pps_idx);
- regs->pp_off = PP_OFF_DELAYS(pps_idx);
-
- /* Cycle delay moved from PP_DIVISOR to PP_CONTROL */
- if (IS_GEN9_LP(dev_priv) || INTEL_PCH_TYPE(dev_priv) >= PCH_CNP)
- regs->pp_div = INVALID_MMIO_REG;
- else
- regs->pp_div = PP_DIVISOR(pps_idx);
-}
-
-static i915_reg_t
-_pp_ctrl_reg(struct intel_dp *intel_dp)
-{
- struct pps_registers regs;
-
- intel_pps_get_registers(intel_dp, &regs);
-
- return regs.pp_ctrl;
-}
-
-static i915_reg_t
-_pp_stat_reg(struct intel_dp *intel_dp)
-{
- struct pps_registers regs;
-
- intel_pps_get_registers(intel_dp, &regs);
-
- return regs.pp_stat;
-}
-
-static bool edp_have_panel_power(struct intel_dp *intel_dp)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-
- lockdep_assert_held(&dev_priv->pps_mutex);
-
- if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
- intel_dp->pps_pipe == INVALID_PIPE)
- return false;
-
- return (intel_de_read(dev_priv, _pp_stat_reg(intel_dp)) & PP_ON) != 0;
-}
-
-static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-
- lockdep_assert_held(&dev_priv->pps_mutex);
-
- if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
- intel_dp->pps_pipe == INVALID_PIPE)
- return false;
-
- return intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
-}
-
-static void
-intel_dp_check_edp(struct intel_dp *intel_dp)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-
- if (!intel_dp_is_edp(intel_dp))
- return;
-
- if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
- drm_WARN(&dev_priv->drm, 1,
- "eDP powered off while attempting aux channel communication.\n");
- drm_dbg_kms(&dev_priv->drm, "Status 0x%08x Control 0x%08x\n",
- intel_de_read(dev_priv, _pp_stat_reg(intel_dp)),
- intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp)));
- }
-}
-
-static u32
-intel_dp_aux_wait_done(struct intel_dp *intel_dp)
-{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
- i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp);
- const unsigned int timeout_ms = 10;
- u32 status;
- bool done;
-
-#define C (((status = intel_uncore_read_notrace(&i915->uncore, ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
- done = wait_event_timeout(i915->gmbus_wait_queue, C,
- msecs_to_jiffies_timeout(timeout_ms));
-
- /* just trace the final value */
- trace_i915_reg_rw(false, ch_ctl, status, sizeof(status), true);
-
- if (!done)
- drm_err(&i915->drm,
- "%s: did not complete or timeout within %ums (status 0x%08x)\n",
- intel_dp->aux.name, timeout_ms, status);
-#undef C
-
- return status;
-}
-
-static u32 g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-
- if (index)
- return 0;
-
- /*
- * The clock divider is based off the hrawclk, and would like to run at
- * 2MHz. So, take the hrawclk value and divide by 2000 and use that
- */
- return DIV_ROUND_CLOSEST(RUNTIME_INFO(dev_priv)->rawclk_freq, 2000);
-}
-
-static u32 ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- u32 freq;
-
- if (index)
- return 0;
-
- /*
- * The clock divider is based off the cdclk or PCH rawclk, and would
- * like to run at 2MHz. So, take the cdclk or PCH rawclk value and
- * divide by 2000 and use that
- */
- if (dig_port->aux_ch == AUX_CH_A)
- freq = dev_priv->cdclk.hw.cdclk;
- else
- freq = RUNTIME_INFO(dev_priv)->rawclk_freq;
- return DIV_ROUND_CLOSEST(freq, 2000);
-}
-
-static u32 hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
-
- if (dig_port->aux_ch != AUX_CH_A && HAS_PCH_LPT_H(dev_priv)) {
- /* Workaround for non-ULT HSW */
- switch (index) {
- case 0: return 63;
- case 1: return 72;
- default: return 0;
- }
- }
-
- return ilk_get_aux_clock_divider(intel_dp, index);
-}
-
-static u32 skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
-{
- /*
- * SKL doesn't need us to program the AUX clock divider (Hardware will
- * derive the clock from CDCLK automatically). We still implement the
- * get_aux_clock_divider vfunc to plug-in into the existing code.
- */
- return index ? 0 : 1;
-}
-
-static u32 g4x_get_aux_send_ctl(struct intel_dp *intel_dp,
- int send_bytes,
- u32 aux_clock_divider)
-{
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_i915_private *dev_priv =
- to_i915(dig_port->base.base.dev);
- u32 precharge, timeout;
-
- if (IS_GEN(dev_priv, 6))
- precharge = 3;
- else
- precharge = 5;
-
- if (IS_BROADWELL(dev_priv))
- timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
- else
- timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
-
- return DP_AUX_CH_CTL_SEND_BUSY |
- DP_AUX_CH_CTL_DONE |
- DP_AUX_CH_CTL_INTERRUPT |
- DP_AUX_CH_CTL_TIME_OUT_ERROR |
- timeout |
- DP_AUX_CH_CTL_RECEIVE_ERROR |
- (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
- (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
- (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
-}
-
-static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp,
- int send_bytes,
- u32 unused)
-{
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_i915_private *i915 =
- to_i915(dig_port->base.base.dev);
- enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
- u32 ret;
-
- ret = DP_AUX_CH_CTL_SEND_BUSY |
- DP_AUX_CH_CTL_DONE |
- DP_AUX_CH_CTL_INTERRUPT |
- DP_AUX_CH_CTL_TIME_OUT_ERROR |
- DP_AUX_CH_CTL_TIME_OUT_MAX |
- DP_AUX_CH_CTL_RECEIVE_ERROR |
- (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
- DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(32) |
- DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
-
- if (intel_phy_is_tc(i915, phy) &&
- dig_port->tc_mode == TC_PORT_TBT_ALT)
- ret |= DP_AUX_CH_CTL_TBT_IO;
-
- return ret;
-}
-
-static int
-intel_dp_aux_xfer(struct intel_dp *intel_dp,
- const u8 *send, int send_bytes,
- u8 *recv, int recv_size,
- u32 aux_send_ctl_flags)
-{
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_i915_private *i915 =
- to_i915(dig_port->base.base.dev);
- struct intel_uncore *uncore = &i915->uncore;
- enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
- bool is_tc_port = intel_phy_is_tc(i915, phy);
- i915_reg_t ch_ctl, ch_data[5];
- u32 aux_clock_divider;
- enum intel_display_power_domain aux_domain;
- intel_wakeref_t aux_wakeref;
- intel_wakeref_t pps_wakeref;
- int i, ret, recv_bytes;
- int try, clock = 0;
- u32 status;
- bool vdd;
-
- ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp);
- for (i = 0; i < ARRAY_SIZE(ch_data); i++)
- ch_data[i] = intel_dp->aux_ch_data_reg(intel_dp, i);
-
- if (is_tc_port)
- intel_tc_port_lock(dig_port);
-
- aux_domain = intel_aux_power_domain(dig_port);
-
- aux_wakeref = intel_display_power_get(i915, aux_domain);
- pps_wakeref = pps_lock(intel_dp);
-
- /*
- * We will be called with VDD already enabled for dpcd/edid/oui reads.
- * In such cases we want to leave VDD enabled and it's up to upper layers
- * to turn it off. But for eg. i2c-dev access we need to turn it on/off
- * ourselves.
- */
- vdd = edp_panel_vdd_on(intel_dp);
-
- /* dp aux is extremely sensitive to irq latency, hence request the
- * lowest possible wakeup latency and so prevent the cpu from going into
- * deep sleep states.
- */
- cpu_latency_qos_update_request(&i915->pm_qos, 0);
-
- intel_dp_check_edp(intel_dp);
-
- /* Try to wait for any previous AUX channel activity */
- for (try = 0; try < 3; try++) {
- status = intel_uncore_read_notrace(uncore, ch_ctl);
- if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
- break;
- msleep(1);
- }
- /* just trace the final value */
- trace_i915_reg_rw(false, ch_ctl, status, sizeof(status), true);
-
- if (try == 3) {
- const u32 status = intel_uncore_read(uncore, ch_ctl);
-
- if (status != intel_dp->aux_busy_last_status) {
- drm_WARN(&i915->drm, 1,
- "%s: not started (status 0x%08x)\n",
- intel_dp->aux.name, status);
- intel_dp->aux_busy_last_status = status;
- }
-
- ret = -EBUSY;
- goto out;
- }
-
- /* Only 5 data registers! */
- if (drm_WARN_ON(&i915->drm, send_bytes > 20 || recv_size > 20)) {
- ret = -E2BIG;
- goto out;
- }
-
- while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
- u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
- send_bytes,
- aux_clock_divider);
-
- send_ctl |= aux_send_ctl_flags;
-
- /* Must try at least 3 times according to DP spec */
- for (try = 0; try < 5; try++) {
- /* Load the send data into the aux channel data registers */
- for (i = 0; i < send_bytes; i += 4)
- intel_uncore_write(uncore,
- ch_data[i >> 2],
- intel_dp_pack_aux(send + i,
- send_bytes - i));
-
- /* Send the command and wait for it to complete */
- intel_uncore_write(uncore, ch_ctl, send_ctl);
-
- status = intel_dp_aux_wait_done(intel_dp);
-
- /* Clear done status and any errors */
- intel_uncore_write(uncore,
- ch_ctl,
- status |
- DP_AUX_CH_CTL_DONE |
- DP_AUX_CH_CTL_TIME_OUT_ERROR |
- DP_AUX_CH_CTL_RECEIVE_ERROR);
-
- /* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
- * 400us delay required for errors and timeouts
- * Timeout errors from the HW already meet this
- * requirement so skip to next iteration
- */
- if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR)
- continue;
-
- if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
- usleep_range(400, 500);
- continue;
- }
- if (status & DP_AUX_CH_CTL_DONE)
- goto done;
- }
- }
-
- if ((status & DP_AUX_CH_CTL_DONE) == 0) {
- drm_err(&i915->drm, "%s: not done (status 0x%08x)\n",
- intel_dp->aux.name, status);
- ret = -EBUSY;
- goto out;
- }
-
-done:
- /* Check for timeout or receive error.
- * Timeouts occur when the sink is not connected
- */
- if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
- drm_err(&i915->drm, "%s: receive error (status 0x%08x)\n",
- intel_dp->aux.name, status);
- ret = -EIO;
- goto out;
- }
-
- /* Timeouts occur when the device isn't connected, so they're
- * "normal" -- don't fill the kernel log with these */
- if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
- drm_dbg_kms(&i915->drm, "%s: timeout (status 0x%08x)\n",
- intel_dp->aux.name, status);
- ret = -ETIMEDOUT;
- goto out;
- }
-
- /* Unload any bytes sent back from the other side */
- recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
- DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
-
- /*
- * By BSpec: "Message sizes of 0 or >20 are not allowed."
- * We have no idea of what happened so we return -EBUSY so
- * drm layer takes care for the necessary retries.
- */
- if (recv_bytes == 0 || recv_bytes > 20) {
- drm_dbg_kms(&i915->drm,
- "%s: Forbidden recv_bytes = %d on aux transaction\n",
- intel_dp->aux.name, recv_bytes);
- ret = -EBUSY;
- goto out;
- }
-
- if (recv_bytes > recv_size)
- recv_bytes = recv_size;
-
- for (i = 0; i < recv_bytes; i += 4)
- intel_dp_unpack_aux(intel_uncore_read(uncore, ch_data[i >> 2]),
- recv + i, recv_bytes - i);
-
- ret = recv_bytes;
-out:
- cpu_latency_qos_update_request(&i915->pm_qos, PM_QOS_DEFAULT_VALUE);
-
- if (vdd)
- edp_panel_vdd_off(intel_dp, false);
-
- pps_unlock(intel_dp, pps_wakeref);
- intel_display_power_put_async(i915, aux_domain, aux_wakeref);
-
- if (is_tc_port)
- intel_tc_port_unlock(dig_port);
-
- return ret;
-}
-
-#define BARE_ADDRESS_SIZE 3
-#define HEADER_SIZE (BARE_ADDRESS_SIZE + 1)
-
-static void
-intel_dp_aux_header(u8 txbuf[HEADER_SIZE],
- const struct drm_dp_aux_msg *msg)
-{
- txbuf[0] = (msg->request << 4) | ((msg->address >> 16) & 0xf);
- txbuf[1] = (msg->address >> 8) & 0xff;
- txbuf[2] = msg->address & 0xff;
- txbuf[3] = msg->size - 1;
-}
-
-static u32 intel_dp_aux_xfer_flags(const struct drm_dp_aux_msg *msg)
-{
- /*
- * If we're trying to send the HDCP Aksv, we need to set a the Aksv
- * select bit to inform the hardware to send the Aksv after our header
- * since we can't access that data from software.
- */
- if ((msg->request & ~DP_AUX_I2C_MOT) == DP_AUX_NATIVE_WRITE &&
- msg->address == DP_AUX_HDCP_AKSV)
- return DP_AUX_CH_CTL_AUX_AKSV_SELECT;
-
- return 0;
-}
-
-static ssize_t
-intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
-{
- struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
- u8 txbuf[20], rxbuf[20];
- size_t txsize, rxsize;
- u32 flags = intel_dp_aux_xfer_flags(msg);
- int ret;
-
- intel_dp_aux_header(txbuf, msg);
-
- switch (msg->request & ~DP_AUX_I2C_MOT) {
- case DP_AUX_NATIVE_WRITE:
- case DP_AUX_I2C_WRITE:
- case DP_AUX_I2C_WRITE_STATUS_UPDATE:
- txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
- rxsize = 2; /* 0 or 1 data bytes */
-
- if (drm_WARN_ON(&i915->drm, txsize > 20))
- return -E2BIG;
-
- drm_WARN_ON(&i915->drm, !msg->buffer != !msg->size);
-
- if (msg->buffer)
- memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
-
- ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize,
- rxbuf, rxsize, flags);
- if (ret > 0) {
- msg->reply = rxbuf[0] >> 4;
-
- if (ret > 1) {
- /* Number of bytes written in a short write. */
- ret = clamp_t(int, rxbuf[1], 0, msg->size);
- } else {
- /* Return payload size. */
- ret = msg->size;
- }
- }
- break;
-
- case DP_AUX_NATIVE_READ:
- case DP_AUX_I2C_READ:
- txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
- rxsize = msg->size + 1;
-
- if (drm_WARN_ON(&i915->drm, rxsize > 20))
- return -E2BIG;
-
- ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize,
- rxbuf, rxsize, flags);
- if (ret > 0) {
- msg->reply = rxbuf[0] >> 4;
- /*
- * Assume happy day, and copy the data. The caller is
- * expected to check msg->reply before touching it.
- *
- * Return payload size.
- */
- ret--;
- memcpy(msg->buffer, rxbuf + 1, ret);
- }
- break;
-
- default:
- ret = -EINVAL;
- break;
- }
-
- return ret;
-}
-
-
-static i915_reg_t g4x_aux_ctl_reg(struct intel_dp *intel_dp)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- enum aux_ch aux_ch = dig_port->aux_ch;
-
- switch (aux_ch) {
- case AUX_CH_B:
- case AUX_CH_C:
- case AUX_CH_D:
- return DP_AUX_CH_CTL(aux_ch);
- default:
- MISSING_CASE(aux_ch);
- return DP_AUX_CH_CTL(AUX_CH_B);
- }
-}
-
-static i915_reg_t g4x_aux_data_reg(struct intel_dp *intel_dp, int index)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- enum aux_ch aux_ch = dig_port->aux_ch;
-
- switch (aux_ch) {
- case AUX_CH_B:
- case AUX_CH_C:
- case AUX_CH_D:
- return DP_AUX_CH_DATA(aux_ch, index);
- default:
- MISSING_CASE(aux_ch);
- return DP_AUX_CH_DATA(AUX_CH_B, index);
- }
-}
-
-static i915_reg_t ilk_aux_ctl_reg(struct intel_dp *intel_dp)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- enum aux_ch aux_ch = dig_port->aux_ch;
-
- switch (aux_ch) {
- case AUX_CH_A:
- return DP_AUX_CH_CTL(aux_ch);
- case AUX_CH_B:
- case AUX_CH_C:
- case AUX_CH_D:
- return PCH_DP_AUX_CH_CTL(aux_ch);
- default:
- MISSING_CASE(aux_ch);
- return DP_AUX_CH_CTL(AUX_CH_A);
- }
-}
-
-static i915_reg_t ilk_aux_data_reg(struct intel_dp *intel_dp, int index)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- enum aux_ch aux_ch = dig_port->aux_ch;
-
- switch (aux_ch) {
- case AUX_CH_A:
- return DP_AUX_CH_DATA(aux_ch, index);
- case AUX_CH_B:
- case AUX_CH_C:
- case AUX_CH_D:
- return PCH_DP_AUX_CH_DATA(aux_ch, index);
- default:
- MISSING_CASE(aux_ch);
- return DP_AUX_CH_DATA(AUX_CH_A, index);
- }
-}
-
-static i915_reg_t skl_aux_ctl_reg(struct intel_dp *intel_dp)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- enum aux_ch aux_ch = dig_port->aux_ch;
-
- switch (aux_ch) {
- case AUX_CH_A:
- case AUX_CH_B:
- case AUX_CH_C:
- case AUX_CH_D:
- case AUX_CH_E:
- case AUX_CH_F:
- return DP_AUX_CH_CTL(aux_ch);
- default:
- MISSING_CASE(aux_ch);
- return DP_AUX_CH_CTL(AUX_CH_A);
- }
-}
-
-static i915_reg_t skl_aux_data_reg(struct intel_dp *intel_dp, int index)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- enum aux_ch aux_ch = dig_port->aux_ch;
-
- switch (aux_ch) {
- case AUX_CH_A:
- case AUX_CH_B:
- case AUX_CH_C:
- case AUX_CH_D:
- case AUX_CH_E:
- case AUX_CH_F:
- return DP_AUX_CH_DATA(aux_ch, index);
- default:
- MISSING_CASE(aux_ch);
- return DP_AUX_CH_DATA(AUX_CH_A, index);
- }
-}
-
-static i915_reg_t tgl_aux_ctl_reg(struct intel_dp *intel_dp)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- enum aux_ch aux_ch = dig_port->aux_ch;
-
- switch (aux_ch) {
- case AUX_CH_A:
- case AUX_CH_B:
- case AUX_CH_C:
- case AUX_CH_USBC1:
- case AUX_CH_USBC2:
- case AUX_CH_USBC3:
- case AUX_CH_USBC4:
- case AUX_CH_USBC5:
- case AUX_CH_USBC6:
- return DP_AUX_CH_CTL(aux_ch);
- default:
- MISSING_CASE(aux_ch);
- return DP_AUX_CH_CTL(AUX_CH_A);
- }
-}
-
-static i915_reg_t tgl_aux_data_reg(struct intel_dp *intel_dp, int index)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- enum aux_ch aux_ch = dig_port->aux_ch;
-
- switch (aux_ch) {
- case AUX_CH_A:
- case AUX_CH_B:
- case AUX_CH_C:
- case AUX_CH_USBC1:
- case AUX_CH_USBC2:
- case AUX_CH_USBC3:
- case AUX_CH_USBC4:
- case AUX_CH_USBC5:
- case AUX_CH_USBC6:
- return DP_AUX_CH_DATA(aux_ch, index);
- default:
- MISSING_CASE(aux_ch);
- return DP_AUX_CH_DATA(AUX_CH_A, index);
- }
-}
-
-static void
-intel_dp_aux_fini(struct intel_dp *intel_dp)
-{
- kfree(intel_dp->aux.name);
-}
-
-static void
-intel_dp_aux_init(struct intel_dp *intel_dp)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct intel_encoder *encoder = &dig_port->base;
- enum aux_ch aux_ch = dig_port->aux_ch;
-
- if (INTEL_GEN(dev_priv) >= 12) {
- intel_dp->aux_ch_ctl_reg = tgl_aux_ctl_reg;
- intel_dp->aux_ch_data_reg = tgl_aux_data_reg;
- } else if (INTEL_GEN(dev_priv) >= 9) {
- intel_dp->aux_ch_ctl_reg = skl_aux_ctl_reg;
- intel_dp->aux_ch_data_reg = skl_aux_data_reg;
- } else if (HAS_PCH_SPLIT(dev_priv)) {
- intel_dp->aux_ch_ctl_reg = ilk_aux_ctl_reg;
- intel_dp->aux_ch_data_reg = ilk_aux_data_reg;
- } else {
- intel_dp->aux_ch_ctl_reg = g4x_aux_ctl_reg;
- intel_dp->aux_ch_data_reg = g4x_aux_data_reg;
- }
-
- if (INTEL_GEN(dev_priv) >= 9)
- intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
- else if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
- intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
- else if (HAS_PCH_SPLIT(dev_priv))
- intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
- else
- intel_dp->get_aux_clock_divider = g4x_get_aux_clock_divider;
-
- if (INTEL_GEN(dev_priv) >= 9)
- intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
- else
- intel_dp->get_aux_send_ctl = g4x_get_aux_send_ctl;
-
- drm_dp_aux_init(&intel_dp->aux);
-
- /* Failure to allocate our preferred name is not critical */
- if (INTEL_GEN(dev_priv) >= 12 && aux_ch >= AUX_CH_USBC1)
- intel_dp->aux.name = kasprintf(GFP_KERNEL, "AUX USBC%c/%s",
- aux_ch - AUX_CH_USBC1 + '1',
- encoder->base.name);
- else
- intel_dp->aux.name = kasprintf(GFP_KERNEL, "AUX %c/%s",
- aux_ch_name(aux_ch),
- encoder->base.name);
-
- intel_dp->aux.transfer = intel_dp_aux_transfer;
-}
-
bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp)
{
int max_rate = intel_dp->source_rates[intel_dp->num_source_rates - 1];
@@ -2263,6 +1174,44 @@ intel_dp_compute_link_config_wide(struct intel_dp *intel_dp,
return -EINVAL;
}
+/* Optimize link config in order: max bpp, min lanes, min clock */
+static int
+intel_dp_compute_link_config_fast(struct intel_dp *intel_dp,
+ struct intel_crtc_state *pipe_config,
+ const struct link_config_limits *limits)
+{
+ const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
+ int bpp, clock, lane_count;
+ int mode_rate, link_clock, link_avail;
+
+ for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= 2 * 3) {
+ int output_bpp = intel_dp_output_bpp(pipe_config->output_format, bpp);
+
+ mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
+ output_bpp);
+
+ for (lane_count = limits->min_lane_count;
+ lane_count <= limits->max_lane_count;
+ lane_count <<= 1) {
+ for (clock = limits->min_clock; clock <= limits->max_clock; clock++) {
+ link_clock = intel_dp->common_rates[clock];
+ link_avail = intel_dp_max_data_rate(link_clock,
+ lane_count);
+
+ if (mode_rate <= link_avail) {
+ pipe_config->lane_count = lane_count;
+ pipe_config->pipe_bpp = bpp;
+ pipe_config->port_clock = link_clock;
+
+ return 0;
+ }
+ }
+ }
+ }
+
+ return -EINVAL;
+}
+
static int intel_dp_dsc_compute_bpp(struct intel_dp *intel_dp, u8 dsc_max_bpc)
{
int i, num_bpc;
@@ -2289,6 +1238,14 @@ static int intel_dp_dsc_compute_params(struct intel_encoder *encoder,
u8 line_buf_depth;
int ret;
+ /*
+ * RC_MODEL_SIZE is currently a constant across all configurations.
+ *
+ * FIXME: Look into using sink defined DPCD DP_DSC_RC_BUF_BLK_SIZE and
+ * DP_DSC_RC_BUF_SIZE for this.
+ */
+ vdsc_cfg->rc_model_size = DSC_RC_MODEL_SIZE_CONST;
+
ret = intel_dsc_compute_params(encoder, crtc_state);
if (ret)
return ret;
@@ -2478,13 +1435,14 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
limits.min_bpp = intel_dp_min_bpp(pipe_config->output_format);
limits.max_bpp = intel_dp_max_bpp(intel_dp, pipe_config);
- if (intel_dp_is_edp(intel_dp)) {
+ if (intel_dp->use_max_params) {
/*
* Use the maximum clock and number of lanes the eDP panel
- * advertizes being capable of. The panels are generally
+ * advertizes being capable of in case the initial fast
+ * optimal params failed us. The panels are generally
* designed to support only a single clock and lane
- * configuration, and typically these values correspond to the
- * native resolution of the panel.
+ * configuration, and typically on older panels these
+ * values correspond to the native resolution of the panel.
*/
limits.min_lane_count = limits.max_lane_count;
limits.min_clock = limits.max_clock;
@@ -2503,11 +1461,22 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
intel_dp_can_bigjoiner(intel_dp))
pipe_config->bigjoiner = true;
- /*
- * Optimize for slow and wide. This is the place to add alternative
- * optimization policy.
- */
- ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config, &limits);
+ if (intel_dp_is_edp(intel_dp))
+ /*
+ * Optimize for fast and narrow. eDP 1.3 section 3.3 and eDP 1.4
+ * section A.1: "It is recommended that the minimum number of
+ * lanes be used, using the minimum link rate allowed for that
+ * lane configuration."
+ *
+ * Note that we fall back to the max clock and lane count for eDP
+ * panels that fail with the fast optimal settings (see
+ * intel_dp->use_max_params), in which case the fast vs. wide
+ * choice doesn't matter.
+ */
+ ret = intel_dp_compute_link_config_fast(intel_dp, pipe_config, &limits);
+ else
+ /* Optimize for slow and wide. */
+ ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config, &limits);
/* enable compression if the mode doesn't fit available BW */
drm_dbg_kms(&i915->drm, "Force DSC en = %d\n", intel_dp->force_dsc_en);
@@ -2756,6 +1725,9 @@ intel_dp_drrs_compute_config(struct intel_dp *intel_dp,
struct intel_connector *intel_connector = intel_dp->attached_connector;
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ if (pipe_config->vrr.enable)
+ return;
+
/*
* DRRS and PSR can't be enable together, so giving preference to PSR
* as it allows more power-savings by complete shutting down display,
@@ -2788,8 +1760,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
struct intel_connector *intel_connector = intel_dp->attached_connector;
struct intel_digital_connector_state *intel_conn_state =
to_intel_digital_connector_state(conn_state);
- bool constant_n = drm_dp_has_quirk(&intel_dp->desc, 0,
- DP_DPCD_QUIRK_CONSTANT_N);
+ bool constant_n = drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_CONSTANT_N);
int ret = 0, output_bpp;
if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv) && port != PORT_A)
@@ -2859,6 +1830,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
if (!HAS_DDI(dev_priv))
intel_dp_set_clock(encoder, pipe_config);
+ intel_vrr_compute_config(pipe_config, conn_state);
intel_psr_compute_config(intel_dp, pipe_config);
intel_dp_drrs_compute_config(intel_dp, pipe_config, output_bpp,
constant_n);
@@ -2959,427 +1931,6 @@ static void intel_dp_prepare(struct intel_encoder *encoder,
}
}
-#define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
-#define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
-
-#define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0)
-#define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0)
-
-#define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
-#define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
-
-static void intel_pps_verify_state(struct intel_dp *intel_dp);
-
-static void wait_panel_status(struct intel_dp *intel_dp,
- u32 mask,
- u32 value)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- i915_reg_t pp_stat_reg, pp_ctrl_reg;
-
- lockdep_assert_held(&dev_priv->pps_mutex);
-
- intel_pps_verify_state(intel_dp);
-
- pp_stat_reg = _pp_stat_reg(intel_dp);
- pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
-
- drm_dbg_kms(&dev_priv->drm,
- "mask %08x value %08x status %08x control %08x\n",
- mask, value,
- intel_de_read(dev_priv, pp_stat_reg),
- intel_de_read(dev_priv, pp_ctrl_reg));
-
- if (intel_de_wait_for_register(dev_priv, pp_stat_reg,
- mask, value, 5000))
- drm_err(&dev_priv->drm,
- "Panel status timeout: status %08x control %08x\n",
- intel_de_read(dev_priv, pp_stat_reg),
- intel_de_read(dev_priv, pp_ctrl_reg));
-
- drm_dbg_kms(&dev_priv->drm, "Wait complete\n");
-}
-
-static void wait_panel_on(struct intel_dp *intel_dp)
-{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
-
- drm_dbg_kms(&i915->drm, "Wait for panel power on\n");
- wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
-}
-
-static void wait_panel_off(struct intel_dp *intel_dp)
-{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
-
- drm_dbg_kms(&i915->drm, "Wait for panel power off time\n");
- wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
-}
-
-static void wait_panel_power_cycle(struct intel_dp *intel_dp)
-{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
- ktime_t panel_power_on_time;
- s64 panel_power_off_duration;
-
- drm_dbg_kms(&i915->drm, "Wait for panel power cycle\n");
-
- /* take the difference of currrent time and panel power off time
- * and then make panel wait for t11_t12 if needed. */
- panel_power_on_time = ktime_get_boottime();
- panel_power_off_duration = ktime_ms_delta(panel_power_on_time, intel_dp->panel_power_off_time);
-
- /* When we disable the VDD override bit last we have to do the manual
- * wait. */
- if (panel_power_off_duration < (s64)intel_dp->panel_power_cycle_delay)
- wait_remaining_ms_from_jiffies(jiffies,
- intel_dp->panel_power_cycle_delay - panel_power_off_duration);
-
- wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
-}
-
-static void wait_backlight_on(struct intel_dp *intel_dp)
-{
- wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
- intel_dp->backlight_on_delay);
-}
-
-static void edp_wait_backlight_off(struct intel_dp *intel_dp)
-{
- wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
- intel_dp->backlight_off_delay);
-}
-
-/* Read the current pp_control value, unlocking the register if it
- * is locked
- */
-
-static u32 ilk_get_pp_control(struct intel_dp *intel_dp)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- u32 control;
-
- lockdep_assert_held(&dev_priv->pps_mutex);
-
- control = intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp));
- if (drm_WARN_ON(&dev_priv->drm, !HAS_DDI(dev_priv) &&
- (control & PANEL_UNLOCK_MASK) != PANEL_UNLOCK_REGS)) {
- control &= ~PANEL_UNLOCK_MASK;
- control |= PANEL_UNLOCK_REGS;
- }
- return control;
-}
-
-/*
- * Must be paired with edp_panel_vdd_off().
- * Must hold pps_mutex around the whole on/off sequence.
- * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
- */
-static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- u32 pp;
- i915_reg_t pp_stat_reg, pp_ctrl_reg;
- bool need_to_disable = !intel_dp->want_panel_vdd;
-
- lockdep_assert_held(&dev_priv->pps_mutex);
-
- if (!intel_dp_is_edp(intel_dp))
- return false;
-
- cancel_delayed_work(&intel_dp->panel_vdd_work);
- intel_dp->want_panel_vdd = true;
-
- if (edp_have_panel_vdd(intel_dp))
- return need_to_disable;
-
- intel_display_power_get(dev_priv,
- intel_aux_power_domain(dig_port));
-
- drm_dbg_kms(&dev_priv->drm, "Turning [ENCODER:%d:%s] VDD on\n",
- dig_port->base.base.base.id,
- dig_port->base.base.name);
-
- if (!edp_have_panel_power(intel_dp))
- wait_panel_power_cycle(intel_dp);
-
- pp = ilk_get_pp_control(intel_dp);
- pp |= EDP_FORCE_VDD;
-
- pp_stat_reg = _pp_stat_reg(intel_dp);
- pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
-
- intel_de_write(dev_priv, pp_ctrl_reg, pp);
- intel_de_posting_read(dev_priv, pp_ctrl_reg);
- drm_dbg_kms(&dev_priv->drm, "PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
- intel_de_read(dev_priv, pp_stat_reg),
- intel_de_read(dev_priv, pp_ctrl_reg));
- /*
- * If the panel wasn't on, delay before accessing aux channel
- */
- if (!edp_have_panel_power(intel_dp)) {
- drm_dbg_kms(&dev_priv->drm,
- "[ENCODER:%d:%s] panel power wasn't enabled\n",
- dig_port->base.base.base.id,
- dig_port->base.base.name);
- msleep(intel_dp->panel_power_up_delay);
- }
-
- return need_to_disable;
-}
-
-/*
- * Must be paired with intel_edp_panel_vdd_off() or
- * intel_edp_panel_off().
- * Nested calls to these functions are not allowed since
- * we drop the lock. Caller must use some higher level
- * locking to prevent nested calls from other threads.
- */
-void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
-{
- intel_wakeref_t wakeref;
- bool vdd;
-
- if (!intel_dp_is_edp(intel_dp))
- return;
-
- vdd = false;
- with_pps_lock(intel_dp, wakeref)
- vdd = edp_panel_vdd_on(intel_dp);
- I915_STATE_WARN(!vdd, "[ENCODER:%d:%s] VDD already requested on\n",
- dp_to_dig_port(intel_dp)->base.base.base.id,
- dp_to_dig_port(intel_dp)->base.base.name);
-}
-
-static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- struct intel_digital_port *dig_port =
- dp_to_dig_port(intel_dp);
- u32 pp;
- i915_reg_t pp_stat_reg, pp_ctrl_reg;
-
- lockdep_assert_held(&dev_priv->pps_mutex);
-
- drm_WARN_ON(&dev_priv->drm, intel_dp->want_panel_vdd);
-
- if (!edp_have_panel_vdd(intel_dp))
- return;
-
- drm_dbg_kms(&dev_priv->drm, "Turning [ENCODER:%d:%s] VDD off\n",
- dig_port->base.base.base.id,
- dig_port->base.base.name);
-
- pp = ilk_get_pp_control(intel_dp);
- pp &= ~EDP_FORCE_VDD;
-
- pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
- pp_stat_reg = _pp_stat_reg(intel_dp);
-
- intel_de_write(dev_priv, pp_ctrl_reg, pp);
- intel_de_posting_read(dev_priv, pp_ctrl_reg);
-
- /* Make sure sequencer is idle before allowing subsequent activity */
- drm_dbg_kms(&dev_priv->drm, "PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
- intel_de_read(dev_priv, pp_stat_reg),
- intel_de_read(dev_priv, pp_ctrl_reg));
-
- if ((pp & PANEL_POWER_ON) == 0)
- intel_dp->panel_power_off_time = ktime_get_boottime();
-
- intel_display_power_put_unchecked(dev_priv,
- intel_aux_power_domain(dig_port));
-}
-
-static void edp_panel_vdd_work(struct work_struct *__work)
-{
- struct intel_dp *intel_dp =
- container_of(to_delayed_work(__work),
- struct intel_dp, panel_vdd_work);
- intel_wakeref_t wakeref;
-
- with_pps_lock(intel_dp, wakeref) {
- if (!intel_dp->want_panel_vdd)
- edp_panel_vdd_off_sync(intel_dp);
- }
-}
-
-static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
-{
- unsigned long delay;
-
- /*
- * Queue the timer to fire a long time from now (relative to the power
- * down delay) to keep the panel power up across a sequence of
- * operations.
- */
- delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
- schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
-}
-
-/*
- * Must be paired with edp_panel_vdd_on().
- * Must hold pps_mutex around the whole on/off sequence.
- * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
- */
-static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-
- lockdep_assert_held(&dev_priv->pps_mutex);
-
- if (!intel_dp_is_edp(intel_dp))
- return;
-
- I915_STATE_WARN(!intel_dp->want_panel_vdd, "[ENCODER:%d:%s] VDD not forced on",
- dp_to_dig_port(intel_dp)->base.base.base.id,
- dp_to_dig_port(intel_dp)->base.base.name);
-
- intel_dp->want_panel_vdd = false;
-
- if (sync)
- edp_panel_vdd_off_sync(intel_dp);
- else
- edp_panel_vdd_schedule_off(intel_dp);
-}
-
-static void edp_panel_on(struct intel_dp *intel_dp)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- u32 pp;
- i915_reg_t pp_ctrl_reg;
-
- lockdep_assert_held(&dev_priv->pps_mutex);
-
- if (!intel_dp_is_edp(intel_dp))
- return;
-
- drm_dbg_kms(&dev_priv->drm, "Turn [ENCODER:%d:%s] panel power on\n",
- dp_to_dig_port(intel_dp)->base.base.base.id,
- dp_to_dig_port(intel_dp)->base.base.name);
-
- if (drm_WARN(&dev_priv->drm, edp_have_panel_power(intel_dp),
- "[ENCODER:%d:%s] panel power already on\n",
- dp_to_dig_port(intel_dp)->base.base.base.id,
- dp_to_dig_port(intel_dp)->base.base.name))
- return;
-
- wait_panel_power_cycle(intel_dp);
-
- pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
- pp = ilk_get_pp_control(intel_dp);
- if (IS_GEN(dev_priv, 5)) {
- /* ILK workaround: disable reset around power sequence */
- pp &= ~PANEL_POWER_RESET;
- intel_de_write(dev_priv, pp_ctrl_reg, pp);
- intel_de_posting_read(dev_priv, pp_ctrl_reg);
- }
-
- pp |= PANEL_POWER_ON;
- if (!IS_GEN(dev_priv, 5))
- pp |= PANEL_POWER_RESET;
-
- intel_de_write(dev_priv, pp_ctrl_reg, pp);
- intel_de_posting_read(dev_priv, pp_ctrl_reg);
-
- wait_panel_on(intel_dp);
- intel_dp->last_power_on = jiffies;
-
- if (IS_GEN(dev_priv, 5)) {
- pp |= PANEL_POWER_RESET; /* restore panel reset bit */
- intel_de_write(dev_priv, pp_ctrl_reg, pp);
- intel_de_posting_read(dev_priv, pp_ctrl_reg);
- }
-}
-
-void intel_edp_panel_on(struct intel_dp *intel_dp)
-{
- intel_wakeref_t wakeref;
-
- if (!intel_dp_is_edp(intel_dp))
- return;
-
- with_pps_lock(intel_dp, wakeref)
- edp_panel_on(intel_dp);
-}
-
-
-static void edp_panel_off(struct intel_dp *intel_dp)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- u32 pp;
- i915_reg_t pp_ctrl_reg;
-
- lockdep_assert_held(&dev_priv->pps_mutex);
-
- if (!intel_dp_is_edp(intel_dp))
- return;
-
- drm_dbg_kms(&dev_priv->drm, "Turn [ENCODER:%d:%s] panel power off\n",
- dig_port->base.base.base.id, dig_port->base.base.name);
-
- drm_WARN(&dev_priv->drm, !intel_dp->want_panel_vdd,
- "Need [ENCODER:%d:%s] VDD to turn off panel\n",
- dig_port->base.base.base.id, dig_port->base.base.name);
-
- pp = ilk_get_pp_control(intel_dp);
- /* We need to switch off panel power _and_ force vdd, for otherwise some
- * panels get very unhappy and cease to work. */
- pp &= ~(PANEL_POWER_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
- EDP_BLC_ENABLE);
-
- pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
-
- intel_dp->want_panel_vdd = false;
-
- intel_de_write(dev_priv, pp_ctrl_reg, pp);
- intel_de_posting_read(dev_priv, pp_ctrl_reg);
-
- wait_panel_off(intel_dp);
- intel_dp->panel_power_off_time = ktime_get_boottime();
-
- /* We got a reference when we enabled the VDD. */
- intel_display_power_put_unchecked(dev_priv, intel_aux_power_domain(dig_port));
-}
-
-void intel_edp_panel_off(struct intel_dp *intel_dp)
-{
- intel_wakeref_t wakeref;
-
- if (!intel_dp_is_edp(intel_dp))
- return;
-
- with_pps_lock(intel_dp, wakeref)
- edp_panel_off(intel_dp);
-}
-
-/* Enable backlight in the panel power control. */
-static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- intel_wakeref_t wakeref;
-
- /*
- * If we enable the backlight right away following a panel power
- * on, we may see slight flicker as the panel syncs with the eDP
- * link. So delay a bit to make sure the image is solid before
- * allowing it to appear.
- */
- wait_backlight_on(intel_dp);
-
- with_pps_lock(intel_dp, wakeref) {
- i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
- u32 pp;
-
- pp = ilk_get_pp_control(intel_dp);
- pp |= EDP_BLC_ENABLE;
-
- intel_de_write(dev_priv, pp_ctrl_reg, pp);
- intel_de_posting_read(dev_priv, pp_ctrl_reg);
- }
-}
/* Enable backlight PWM and backlight PP control. */
void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state,
@@ -3394,31 +1945,7 @@ void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state,
drm_dbg_kms(&i915->drm, "\n");
intel_panel_enable_backlight(crtc_state, conn_state);
- _intel_edp_backlight_on(intel_dp);
-}
-
-/* Disable backlight in the panel power control. */
-static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- intel_wakeref_t wakeref;
-
- if (!intel_dp_is_edp(intel_dp))
- return;
-
- with_pps_lock(intel_dp, wakeref) {
- i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
- u32 pp;
-
- pp = ilk_get_pp_control(intel_dp);
- pp &= ~EDP_BLC_ENABLE;
-
- intel_de_write(dev_priv, pp_ctrl_reg, pp);
- intel_de_posting_read(dev_priv, pp_ctrl_reg);
- }
-
- intel_dp->last_backlight_off = jiffies;
- edp_wait_backlight_off(intel_dp);
+ intel_pps_backlight_on(intel_dp);
}
/* Disable backlight PP control and backlight PWM. */
@@ -3432,37 +1959,10 @@ void intel_edp_backlight_off(const struct drm_connector_state *old_conn_state)
drm_dbg_kms(&i915->drm, "\n");
- _intel_edp_backlight_off(intel_dp);
+ intel_pps_backlight_off(intel_dp);
intel_panel_disable_backlight(old_conn_state);
}
-/*
- * Hook for controlling the panel power control backlight through the bl_power
- * sysfs attribute. Take care to handle multiple calls.
- */
-static void intel_edp_backlight_power(struct intel_connector *connector,
- bool enable)
-{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
- struct intel_dp *intel_dp = intel_attached_dp(connector);
- intel_wakeref_t wakeref;
- bool is_enabled;
-
- is_enabled = false;
- with_pps_lock(intel_dp, wakeref)
- is_enabled = ilk_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
- if (is_enabled == enable)
- return;
-
- drm_dbg_kms(&i915->drm, "panel power control backlight %s\n",
- enable ? "enable" : "disable");
-
- if (enable)
- _intel_edp_backlight_on(intel_dp);
- else
- _intel_edp_backlight_off(intel_dp);
-}
-
static void assert_dp_port(struct intel_dp *intel_dp, bool state)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
@@ -3579,6 +2079,29 @@ void intel_dp_sink_set_decompression_state(struct intel_dp *intel_dp,
enable ? "enable" : "disable");
}
+static void
+intel_edp_init_source_oui(struct intel_dp *intel_dp, bool careful)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ u8 oui[] = { 0x00, 0xaa, 0x01 };
+ u8 buf[3] = { 0 };
+
+ /*
+ * During driver init, we want to be careful and avoid changing the source OUI if it's
+ * already set to what we want, so as to avoid clearing any state by accident
+ */
+ if (careful) {
+ if (drm_dp_dpcd_read(&intel_dp->aux, DP_SOURCE_OUI, buf, sizeof(buf)) < 0)
+ drm_err(&i915->drm, "Failed to read source OUI\n");
+
+ if (memcmp(oui, buf, sizeof(oui)) == 0)
+ return;
+ }
+
+ if (drm_dp_dpcd_write(&intel_dp->aux, DP_SOURCE_OUI, oui, sizeof(oui)) < 0)
+ drm_err(&i915->drm, "Failed to write source OUI\n");
+}
+
/* If the device supports it, try to set the power state appropriately */
void intel_dp_set_power(struct intel_dp *intel_dp, u8 mode)
{
@@ -3600,6 +2123,10 @@ void intel_dp_set_power(struct intel_dp *intel_dp, u8 mode)
lspcon_resume(dp_to_dig_port(intel_dp));
+ /* Write the source OUI as early as possible */
+ if (intel_dp_is_edp(intel_dp))
+ intel_edp_init_source_oui(intel_dp, false);
+
/*
* When turning on, we need to retry for 1ms to give the sink
* time to wake up.
@@ -3856,10 +2383,12 @@ static void intel_disable_dp(struct intel_atomic_state *state,
/* Make sure the panel is off before trying to change the mode. But also
* ensure that we have vdd while we switch off the panel. */
- intel_edp_panel_vdd_on(intel_dp);
+ intel_pps_vdd_on(intel_dp);
intel_edp_backlight_off(old_conn_state);
intel_dp_set_power(intel_dp, DP_SET_POWER_D3);
- intel_edp_panel_off(intel_dp);
+ intel_pps_off(intel_dp);
+ intel_dp->frl.is_trained = false;
+ intel_dp->frl.trained_rate_gbps = 0;
}
static void g4x_disable_dp(struct intel_atomic_state *state,
@@ -3955,6 +2484,280 @@ cpt_set_link_train(struct intel_dp *intel_dp,
intel_de_posting_read(dev_priv, intel_dp->output_reg);
}
+static void intel_dp_get_pcon_dsc_cap(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+
+ /* Clear the cached register set to avoid using stale values */
+
+ memset(intel_dp->pcon_dsc_dpcd, 0, sizeof(intel_dp->pcon_dsc_dpcd));
+
+ if (drm_dp_dpcd_read(&intel_dp->aux, DP_PCON_DSC_ENCODER,
+ intel_dp->pcon_dsc_dpcd,
+ sizeof(intel_dp->pcon_dsc_dpcd)) < 0)
+ drm_err(&i915->drm, "Failed to read DPCD register 0x%x\n",
+ DP_PCON_DSC_ENCODER);
+
+ drm_dbg_kms(&i915->drm, "PCON ENCODER DSC DPCD: %*ph\n",
+ (int)sizeof(intel_dp->pcon_dsc_dpcd), intel_dp->pcon_dsc_dpcd);
+}
+
+static int intel_dp_pcon_get_frl_mask(u8 frl_bw_mask)
+{
+ int bw_gbps[] = {9, 18, 24, 32, 40, 48};
+ int i;
+
+ for (i = ARRAY_SIZE(bw_gbps) - 1; i >= 0; i--) {
+ if (frl_bw_mask & (1 << i))
+ return bw_gbps[i];
+ }
+ return 0;
+}
+
+static int intel_dp_pcon_set_frl_mask(int max_frl)
+{
+ switch (max_frl) {
+ case 48:
+ return DP_PCON_FRL_BW_MASK_48GBPS;
+ case 40:
+ return DP_PCON_FRL_BW_MASK_40GBPS;
+ case 32:
+ return DP_PCON_FRL_BW_MASK_32GBPS;
+ case 24:
+ return DP_PCON_FRL_BW_MASK_24GBPS;
+ case 18:
+ return DP_PCON_FRL_BW_MASK_18GBPS;
+ case 9:
+ return DP_PCON_FRL_BW_MASK_9GBPS;
+ }
+
+ return 0;
+}
+
+static int intel_dp_hdmi_sink_max_frl(struct intel_dp *intel_dp)
+{
+ struct intel_connector *intel_connector = intel_dp->attached_connector;
+ struct drm_connector *connector = &intel_connector->base;
+ int max_frl_rate;
+ int max_lanes, rate_per_lane;
+ int max_dsc_lanes, dsc_rate_per_lane;
+
+ max_lanes = connector->display_info.hdmi.max_lanes;
+ rate_per_lane = connector->display_info.hdmi.max_frl_rate_per_lane;
+ max_frl_rate = max_lanes * rate_per_lane;
+
+ if (connector->display_info.hdmi.dsc_cap.v_1p2) {
+ max_dsc_lanes = connector->display_info.hdmi.dsc_cap.max_lanes;
+ dsc_rate_per_lane = connector->display_info.hdmi.dsc_cap.max_frl_rate_per_lane;
+ if (max_dsc_lanes && dsc_rate_per_lane)
+ max_frl_rate = min(max_frl_rate, max_dsc_lanes * dsc_rate_per_lane);
+ }
+
+ return max_frl_rate;
+}
+
+static int intel_dp_pcon_start_frl_training(struct intel_dp *intel_dp)
+{
+#define PCON_EXTENDED_TRAIN_MODE (1 > 0)
+#define PCON_CONCURRENT_MODE (1 > 0)
+#define PCON_SEQUENTIAL_MODE !PCON_CONCURRENT_MODE
+#define PCON_NORMAL_TRAIN_MODE !PCON_EXTENDED_TRAIN_MODE
+#define TIMEOUT_FRL_READY_MS 500
+#define TIMEOUT_HDMI_LINK_ACTIVE_MS 1000
+
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ int max_frl_bw, max_pcon_frl_bw, max_edid_frl_bw, ret;
+ u8 max_frl_bw_mask = 0, frl_trained_mask;
+ bool is_active;
+
+ ret = drm_dp_pcon_reset_frl_config(&intel_dp->aux);
+ if (ret < 0)
+ return ret;
+
+ max_pcon_frl_bw = intel_dp->dfp.pcon_max_frl_bw;
+ drm_dbg(&i915->drm, "PCON max rate = %d Gbps\n", max_pcon_frl_bw);
+
+ max_edid_frl_bw = intel_dp_hdmi_sink_max_frl(intel_dp);
+ drm_dbg(&i915->drm, "Sink max rate from EDID = %d Gbps\n", max_edid_frl_bw);
+
+ max_frl_bw = min(max_edid_frl_bw, max_pcon_frl_bw);
+
+ if (max_frl_bw <= 0)
+ return -EINVAL;
+
+ ret = drm_dp_pcon_frl_prepare(&intel_dp->aux, false);
+ if (ret < 0)
+ return ret;
+ /* Wait for PCON to be FRL Ready */
+ wait_for(is_active = drm_dp_pcon_is_frl_ready(&intel_dp->aux) == true, TIMEOUT_FRL_READY_MS);
+
+ if (!is_active)
+ return -ETIMEDOUT;
+
+ max_frl_bw_mask = intel_dp_pcon_set_frl_mask(max_frl_bw);
+ ret = drm_dp_pcon_frl_configure_1(&intel_dp->aux, max_frl_bw, PCON_SEQUENTIAL_MODE);
+ if (ret < 0)
+ return ret;
+ ret = drm_dp_pcon_frl_configure_2(&intel_dp->aux, max_frl_bw_mask, PCON_NORMAL_TRAIN_MODE);
+ if (ret < 0)
+ return ret;
+ ret = drm_dp_pcon_frl_enable(&intel_dp->aux);
+ if (ret < 0)
+ return ret;
+ /*
+ * Wait for FRL to be completed
+ * Check if the HDMI Link is up and active.
+ */
+ wait_for(is_active = drm_dp_pcon_hdmi_link_active(&intel_dp->aux) == true, TIMEOUT_HDMI_LINK_ACTIVE_MS);
+
+ if (!is_active)
+ return -ETIMEDOUT;
+
+ /* Verify HDMI Link configuration shows FRL Mode */
+ if (drm_dp_pcon_hdmi_link_mode(&intel_dp->aux, &frl_trained_mask) !=
+ DP_PCON_HDMI_MODE_FRL) {
+ drm_dbg(&i915->drm, "HDMI couldn't be trained in FRL Mode\n");
+ return -EINVAL;
+ }
+ drm_dbg(&i915->drm, "MAX_FRL_MASK = %u, FRL_TRAINED_MASK = %u\n", max_frl_bw_mask, frl_trained_mask);
+
+ intel_dp->frl.trained_rate_gbps = intel_dp_pcon_get_frl_mask(frl_trained_mask);
+ intel_dp->frl.is_trained = true;
+ drm_dbg(&i915->drm, "FRL trained with : %d Gbps\n", intel_dp->frl.trained_rate_gbps);
+
+ return 0;
+}
+
+static bool intel_dp_is_hdmi_2_1_sink(struct intel_dp *intel_dp)
+{
+ if (drm_dp_is_branch(intel_dp->dpcd) &&
+ intel_dp->has_hdmi_sink &&
+ intel_dp_hdmi_sink_max_frl(intel_dp) > 0)
+ return true;
+
+ return false;
+}
+
+void intel_dp_check_frl_training(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
+ /* Always go for FRL training if supported */
+ if (!intel_dp_is_hdmi_2_1_sink(intel_dp) ||
+ intel_dp->frl.is_trained)
+ return;
+
+ if (intel_dp_pcon_start_frl_training(intel_dp) < 0) {
+ int ret, mode;
+
+ drm_dbg(&dev_priv->drm, "Couldnt set FRL mode, continuing with TMDS mode\n");
+ ret = drm_dp_pcon_reset_frl_config(&intel_dp->aux);
+ mode = drm_dp_pcon_hdmi_link_mode(&intel_dp->aux, NULL);
+
+ if (ret < 0 || mode != DP_PCON_HDMI_MODE_TMDS)
+ drm_dbg(&dev_priv->drm, "Issue with PCON, cannot set TMDS mode\n");
+ } else {
+ drm_dbg(&dev_priv->drm, "FRL training Completed\n");
+ }
+}
+
+static int
+intel_dp_pcon_dsc_enc_slice_height(const struct intel_crtc_state *crtc_state)
+{
+ int vactive = crtc_state->hw.adjusted_mode.vdisplay;
+
+ return intel_hdmi_dsc_get_slice_height(vactive);
+}
+
+static int
+intel_dp_pcon_dsc_enc_slices(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct intel_connector *intel_connector = intel_dp->attached_connector;
+ struct drm_connector *connector = &intel_connector->base;
+ int hdmi_throughput = connector->display_info.hdmi.dsc_cap.clk_per_slice;
+ int hdmi_max_slices = connector->display_info.hdmi.dsc_cap.max_slices;
+ int pcon_max_slices = drm_dp_pcon_dsc_max_slices(intel_dp->pcon_dsc_dpcd);
+ int pcon_max_slice_width = drm_dp_pcon_dsc_max_slice_width(intel_dp->pcon_dsc_dpcd);
+
+ return intel_hdmi_dsc_get_num_slices(crtc_state, pcon_max_slices,
+ pcon_max_slice_width,
+ hdmi_max_slices, hdmi_throughput);
+}
+
+static int
+intel_dp_pcon_dsc_enc_bpp(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state,
+ int num_slices, int slice_width)
+{
+ struct intel_connector *intel_connector = intel_dp->attached_connector;
+ struct drm_connector *connector = &intel_connector->base;
+ int output_format = crtc_state->output_format;
+ bool hdmi_all_bpp = connector->display_info.hdmi.dsc_cap.all_bpp;
+ int pcon_fractional_bpp = drm_dp_pcon_dsc_bpp_incr(intel_dp->pcon_dsc_dpcd);
+ int hdmi_max_chunk_bytes =
+ connector->display_info.hdmi.dsc_cap.total_chunk_kbytes * 1024;
+
+ return intel_hdmi_dsc_get_bpp(pcon_fractional_bpp, slice_width,
+ num_slices, output_format, hdmi_all_bpp,
+ hdmi_max_chunk_bytes);
+}
+
+void
+intel_dp_pcon_dsc_configure(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state)
+{
+ u8 pps_param[6];
+ int slice_height;
+ int slice_width;
+ int num_slices;
+ int bits_per_pixel;
+ int ret;
+ struct intel_connector *intel_connector = intel_dp->attached_connector;
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct drm_connector *connector;
+ bool hdmi_is_dsc_1_2;
+
+ if (!intel_dp_is_hdmi_2_1_sink(intel_dp))
+ return;
+
+ if (!intel_connector)
+ return;
+ connector = &intel_connector->base;
+ hdmi_is_dsc_1_2 = connector->display_info.hdmi.dsc_cap.v_1p2;
+
+ if (!drm_dp_pcon_enc_is_dsc_1_2(intel_dp->pcon_dsc_dpcd) ||
+ !hdmi_is_dsc_1_2)
+ return;
+
+ slice_height = intel_dp_pcon_dsc_enc_slice_height(crtc_state);
+ if (!slice_height)
+ return;
+
+ num_slices = intel_dp_pcon_dsc_enc_slices(intel_dp, crtc_state);
+ if (!num_slices)
+ return;
+
+ slice_width = DIV_ROUND_UP(crtc_state->hw.adjusted_mode.hdisplay,
+ num_slices);
+
+ bits_per_pixel = intel_dp_pcon_dsc_enc_bpp(intel_dp, crtc_state,
+ num_slices, slice_width);
+ if (!bits_per_pixel)
+ return;
+
+ pps_param[0] = slice_height & 0xFF;
+ pps_param[1] = slice_height >> 8;
+ pps_param[2] = slice_width & 0xFF;
+ pps_param[3] = slice_width >> 8;
+ pps_param[4] = bits_per_pixel & 0xFF;
+ pps_param[5] = (bits_per_pixel >> 8) & 0x3;
+
+ ret = drm_dp_pcon_pps_override_param(&intel_dp->aux, pps_param);
+ if (ret < 0)
+ drm_dbg_kms(&i915->drm, "Failed to set pcon DSC\n");
+}
+
static void
g4x_set_link_train(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
@@ -4010,7 +2813,8 @@ static void intel_dp_enable_port(struct intel_dp *intel_dp,
intel_de_posting_read(dev_priv, intel_dp->output_reg);
}
-void intel_dp_configure_protocol_converter(struct intel_dp *intel_dp)
+void intel_dp_configure_protocol_converter(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
u8 tmp;
@@ -4029,8 +2833,8 @@ void intel_dp_configure_protocol_converter(struct intel_dp *intel_dp)
drm_dbg_kms(&i915->drm, "Failed to set protocol converter HDMI mode to %s\n",
enableddisabled(intel_dp->has_hdmi_sink));
- tmp = intel_dp->dfp.ycbcr_444_to_420 ?
- DP_CONVERSION_TO_YCBCR420_ENABLE : 0;
+ tmp = crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444 &&
+ intel_dp->dfp.ycbcr_444_to_420 ? DP_CONVERSION_TO_YCBCR420_ENABLE : 0;
if (drm_dp_dpcd_writeb(&intel_dp->aux,
DP_PROTOCOL_CONVERTER_CONTROL_1, tmp) != 1)
@@ -4039,12 +2843,42 @@ void intel_dp_configure_protocol_converter(struct intel_dp *intel_dp)
enableddisabled(intel_dp->dfp.ycbcr_444_to_420));
tmp = 0;
+ if (intel_dp->dfp.rgb_to_ycbcr) {
+ bool bt2020, bt709;
- if (drm_dp_dpcd_writeb(&intel_dp->aux,
- DP_PROTOCOL_CONVERTER_CONTROL_2, tmp) <= 0)
+ /*
+ * FIXME: Currently if userspace selects BT2020 or BT709, but PCON supports only
+ * RGB->YCbCr for BT601 colorspace, we go ahead with BT601, as default.
+ *
+ */
+ tmp = DP_CONVERSION_BT601_RGB_YCBCR_ENABLE;
+
+ bt2020 = drm_dp_downstream_rgb_to_ycbcr_conversion(intel_dp->dpcd,
+ intel_dp->downstream_ports,
+ DP_DS_HDMI_BT2020_RGB_YCBCR_CONV);
+ bt709 = drm_dp_downstream_rgb_to_ycbcr_conversion(intel_dp->dpcd,
+ intel_dp->downstream_ports,
+ DP_DS_HDMI_BT709_RGB_YCBCR_CONV);
+ switch (crtc_state->infoframes.vsc.colorimetry) {
+ case DP_COLORIMETRY_BT2020_RGB:
+ case DP_COLORIMETRY_BT2020_YCC:
+ if (bt2020)
+ tmp = DP_CONVERSION_BT2020_RGB_YCBCR_ENABLE;
+ break;
+ case DP_COLORIMETRY_BT709_YCC:
+ case DP_COLORIMETRY_XVYCC_709:
+ if (bt709)
+ tmp = DP_CONVERSION_BT709_RGB_YCBCR_ENABLE;
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (drm_dp_pcon_convert_rgb_to_ycbcr(&intel_dp->aux, tmp) < 0)
drm_dbg_kms(&i915->drm,
- "Failed to set protocol converter YCbCr 4:2:2 conversion mode to %s\n",
- enableddisabled(false));
+ "Failed to set protocol converter RGB->YCbCr conversion mode to %s\n",
+ enableddisabled(tmp ? true : false));
}
static void intel_enable_dp(struct intel_atomic_state *state,
@@ -4062,15 +2896,15 @@ static void intel_enable_dp(struct intel_atomic_state *state,
if (drm_WARN_ON(&dev_priv->drm, dp_reg & DP_PORT_EN))
return;
- with_pps_lock(intel_dp, wakeref) {
+ with_intel_pps_lock(intel_dp, wakeref) {
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- vlv_init_panel_power_sequencer(encoder, pipe_config);
+ vlv_pps_init(encoder, pipe_config);
intel_dp_enable_port(intel_dp, pipe_config);
- edp_panel_vdd_on(intel_dp);
- edp_panel_on(intel_dp);
- edp_panel_vdd_off(intel_dp, true);
+ intel_pps_vdd_on_unlocked(intel_dp);
+ intel_pps_on_unlocked(intel_dp);
+ intel_pps_vdd_off_unlocked(intel_dp, true);
}
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
@@ -4084,7 +2918,9 @@ static void intel_enable_dp(struct intel_atomic_state *state,
}
intel_dp_set_power(intel_dp, DP_SET_POWER_D0);
- intel_dp_configure_protocol_converter(intel_dp);
+ intel_dp_configure_protocol_converter(intel_dp, pipe_config);
+ intel_dp_check_frl_training(intel_dp);
+ intel_dp_pcon_dsc_configure(intel_dp, pipe_config);
intel_dp_start_link_train(intel_dp, pipe_config);
intel_dp_stop_link_train(intel_dp, pipe_config);
@@ -4127,112 +2963,6 @@ static void g4x_pre_enable_dp(struct intel_atomic_state *state,
ilk_edp_pll_on(intel_dp, pipe_config);
}
-static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
-{
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
- enum pipe pipe = intel_dp->pps_pipe;
- i915_reg_t pp_on_reg = PP_ON_DELAYS(pipe);
-
- drm_WARN_ON(&dev_priv->drm, intel_dp->active_pipe != INVALID_PIPE);
-
- if (drm_WARN_ON(&dev_priv->drm, pipe != PIPE_A && pipe != PIPE_B))
- return;
-
- edp_panel_vdd_off_sync(intel_dp);
-
- /*
- * VLV seems to get confused when multiple power sequencers
- * have the same port selected (even if only one has power/vdd
- * enabled). The failure manifests as vlv_wait_port_ready() failing
- * CHV on the other hand doesn't seem to mind having the same port
- * selected in multiple power sequencers, but let's clear the
- * port select always when logically disconnecting a power sequencer
- * from a port.
- */
- drm_dbg_kms(&dev_priv->drm,
- "detaching pipe %c power sequencer from [ENCODER:%d:%s]\n",
- pipe_name(pipe), dig_port->base.base.base.id,
- dig_port->base.base.name);
- intel_de_write(dev_priv, pp_on_reg, 0);
- intel_de_posting_read(dev_priv, pp_on_reg);
-
- intel_dp->pps_pipe = INVALID_PIPE;
-}
-
-static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
- enum pipe pipe)
-{
- struct intel_encoder *encoder;
-
- lockdep_assert_held(&dev_priv->pps_mutex);
-
- for_each_intel_dp(&dev_priv->drm, encoder) {
- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
-
- drm_WARN(&dev_priv->drm, intel_dp->active_pipe == pipe,
- "stealing pipe %c power sequencer from active [ENCODER:%d:%s]\n",
- pipe_name(pipe), encoder->base.base.id,
- encoder->base.name);
-
- if (intel_dp->pps_pipe != pipe)
- continue;
-
- drm_dbg_kms(&dev_priv->drm,
- "stealing pipe %c power sequencer from [ENCODER:%d:%s]\n",
- pipe_name(pipe), encoder->base.base.id,
- encoder->base.name);
-
- /* make sure vdd is off before we steal it */
- vlv_detach_power_sequencer(intel_dp);
- }
-}
-
-static void vlv_init_panel_power_sequencer(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state)
-{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
-
- lockdep_assert_held(&dev_priv->pps_mutex);
-
- drm_WARN_ON(&dev_priv->drm, intel_dp->active_pipe != INVALID_PIPE);
-
- if (intel_dp->pps_pipe != INVALID_PIPE &&
- intel_dp->pps_pipe != crtc->pipe) {
- /*
- * If another power sequencer was being used on this
- * port previously make sure to turn off vdd there while
- * we still have control of it.
- */
- vlv_detach_power_sequencer(intel_dp);
- }
-
- /*
- * We may be stealing the power
- * sequencer from another port.
- */
- vlv_steal_power_sequencer(dev_priv, crtc->pipe);
-
- intel_dp->active_pipe = crtc->pipe;
-
- if (!intel_dp_is_edp(intel_dp))
- return;
-
- /* now it's all ours */
- intel_dp->pps_pipe = crtc->pipe;
-
- drm_dbg_kms(&dev_priv->drm,
- "initializing pipe %c power sequencer for [ENCODER:%d:%s]\n",
- pipe_name(intel_dp->pps_pipe), encoder->base.base.id,
- encoder->base.name);
-
- /* init power sequencer on this pipe and port */
- intel_dp_init_panel_power_sequencer(intel_dp);
- intel_dp_init_panel_power_sequencer_registers(intel_dp, true);
-}
-
static void vlv_pre_enable_dp(struct intel_atomic_state *state,
struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
@@ -4632,22 +3362,19 @@ ivb_cpu_edp_set_signal_levels(struct intel_dp *intel_dp,
intel_de_posting_read(dev_priv, intel_dp->output_reg);
}
-void intel_dp_set_signal_levels(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state)
+static char dp_training_pattern_name(u8 train_pat)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- u8 train_set = intel_dp->train_set[0];
-
- drm_dbg_kms(&dev_priv->drm, "Using vswing level %d%s\n",
- train_set & DP_TRAIN_VOLTAGE_SWING_MASK,
- train_set & DP_TRAIN_MAX_SWING_REACHED ? " (max)" : "");
- drm_dbg_kms(&dev_priv->drm, "Using pre-emphasis level %d%s\n",
- (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
- DP_TRAIN_PRE_EMPHASIS_SHIFT,
- train_set & DP_TRAIN_MAX_PRE_EMPHASIS_REACHED ?
- " (max)" : "");
-
- intel_dp->set_signal_levels(intel_dp, crtc_state);
+ switch (train_pat) {
+ case DP_TRAINING_PATTERN_1:
+ case DP_TRAINING_PATTERN_2:
+ case DP_TRAINING_PATTERN_3:
+ return '0' + train_pat;
+ case DP_TRAINING_PATTERN_4:
+ return '4';
+ default:
+ MISSING_CASE(train_pat);
+ return '?';
+ }
}
void
@@ -4655,13 +3382,15 @@ intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
u8 dp_train_pat)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ u8 train_pat = intel_dp_training_pattern_symbol(dp_train_pat);
- if ((intel_dp_training_pattern_symbol(dp_train_pat)) !=
- DP_TRAINING_PATTERN_DISABLE)
+ if (train_pat != DP_TRAINING_PATTERN_DISABLE)
drm_dbg_kms(&dev_priv->drm,
- "Using DP training pattern TPS%d\n",
- intel_dp_training_pattern_symbol(dp_train_pat));
+ "[ENCODER:%d:%s] Using DP training pattern TPS%c\n",
+ encoder->base.base.id, encoder->base.name,
+ dp_training_pattern_name(train_pat));
intel_dp->set_link_train(intel_dp, crtc_state, dp_train_pat);
}
@@ -4727,15 +3456,15 @@ intel_dp_link_down(struct intel_encoder *encoder,
intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
}
- msleep(intel_dp->panel_power_down_delay);
+ msleep(intel_dp->pps.panel_power_down_delay);
intel_dp->DP = DP;
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
intel_wakeref_t wakeref;
- with_pps_lock(intel_dp, wakeref)
- intel_dp->active_pipe = INVALID_PIPE;
+ with_intel_pps_lock(intel_dp, wakeref)
+ intel_dp->pps.active_pipe = INVALID_PIPE;
}
}
@@ -4865,6 +3594,12 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp)
if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
intel_dp_get_dsc_sink_cap(intel_dp);
+ /*
+ * If needed, program our source OUI so we can make various Intel-specific AUX services
+ * available (such as HDR backlight controls)
+ */
+ intel_edp_init_source_oui(intel_dp, true);
+
return true;
}
@@ -5698,7 +4433,7 @@ static void intel_dp_process_phy_request(struct intel_dp *intel_dp,
intel_dp_autotest_phy_ddi_disable(intel_dp, crtc_state);
- intel_dp_set_signal_levels(intel_dp, crtc_state);
+ intel_dp_set_signal_levels(intel_dp, crtc_state, DP_PHY_DPRX);
intel_dp_phy_pattern_update(intel_dp, crtc_state);
@@ -5771,6 +4506,17 @@ update_status:
"Could not write test response to sink\n");
}
+static void
+intel_dp_mst_hpd_irq(struct intel_dp *intel_dp, u8 *esi, bool *handled)
+{
+ drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, handled);
+
+ if (esi[1] & DP_CP_IRQ) {
+ intel_hdcp_handle_cp_irq(intel_dp->attached_connector);
+ *handled = true;
+ }
+}
+
/**
* intel_dp_check_mst_status - service any pending MST interrupts, check link status
* @intel_dp: Intel DP struct
@@ -5815,7 +4561,8 @@ intel_dp_check_mst_status(struct intel_dp *intel_dp)
drm_dbg_kms(&i915->drm, "got esi %3ph\n", esi);
- drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
+ intel_dp_mst_hpd_irq(intel_dp, esi, &handled);
+
if (!handled)
break;
@@ -5833,6 +4580,28 @@ intel_dp_check_mst_status(struct intel_dp *intel_dp)
return link_ok;
}
+static void
+intel_dp_handle_hdmi_link_status_change(struct intel_dp *intel_dp)
+{
+ bool is_active;
+ u8 buf = 0;
+
+ is_active = drm_dp_pcon_hdmi_link_active(&intel_dp->aux);
+ if (intel_dp->frl.is_trained && !is_active) {
+ if (drm_dp_dpcd_readb(&intel_dp->aux, DP_PCON_HDMI_LINK_CONFIG_1, &buf) < 0)
+ return;
+
+ buf &= ~DP_PCON_ENABLE_HDMI_LINK;
+ if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_PCON_HDMI_LINK_CONFIG_1, buf) < 0)
+ return;
+
+ drm_dp_pcon_hdmi_frl_link_error_count(&intel_dp->aux, &intel_dp->attached_connector->base);
+
+ /* Restart FRL training or fall back to TMDS mode */
+ intel_dp_check_frl_training(intel_dp);
+ }
+}
+
static bool
intel_dp_needs_link_retrain(struct intel_dp *intel_dp)
{
@@ -6006,6 +4775,8 @@ int intel_dp_retrain_link(struct intel_encoder *encoder,
!intel_dp_mst_is_master_trans(crtc_state))
continue;
+ intel_dp_check_frl_training(intel_dp);
+ intel_dp_pcon_dsc_configure(intel_dp, crtc_state);
intel_dp_start_link_train(intel_dp, crtc_state);
intel_dp_stop_link_train(intel_dp, crtc_state);
break;
@@ -6115,7 +4886,7 @@ static int intel_dp_do_phy_test(struct intel_encoder *encoder,
return 0;
}
-static void intel_dp_phy_test(struct intel_encoder *encoder)
+void intel_dp_phy_test(struct intel_encoder *encoder)
{
struct drm_modeset_acquire_ctx ctx;
int ret;
@@ -6197,7 +4968,7 @@ intel_dp_hotplug(struct intel_encoder *encoder,
return state;
}
-static void intel_dp_check_service_irq(struct intel_dp *intel_dp)
+static void intel_dp_check_device_service_irq(struct intel_dp *intel_dp)
{
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
u8 val;
@@ -6221,6 +4992,30 @@ static void intel_dp_check_service_irq(struct intel_dp *intel_dp)
drm_dbg_kms(&i915->drm, "Sink specific irq unhandled\n");
}
+static void intel_dp_check_link_service_irq(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ u8 val;
+
+ if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
+ return;
+
+ if (drm_dp_dpcd_readb(&intel_dp->aux,
+ DP_LINK_SERVICE_IRQ_VECTOR_ESI0, &val) != 1 || !val) {
+ drm_dbg_kms(&i915->drm, "Error in reading link service irq vector\n");
+ return;
+ }
+
+ if (drm_dp_dpcd_writeb(&intel_dp->aux,
+ DP_LINK_SERVICE_IRQ_VECTOR_ESI0, val) != 1) {
+ drm_dbg_kms(&i915->drm, "Error in writing link service irq vector\n");
+ return;
+ }
+
+ if (val & HDMI_LINK_STATUS_CHANGED)
+ intel_dp_handle_hdmi_link_status_change(intel_dp);
+}
+
/*
* According to DP spec
* 5.1.2:
@@ -6260,7 +5055,8 @@ intel_dp_short_pulse(struct intel_dp *intel_dp)
return false;
}
- intel_dp_check_service_irq(intel_dp);
+ intel_dp_check_device_service_irq(intel_dp);
+ intel_dp_check_link_service_irq(intel_dp);
/* Handle CEC interrupts, if any */
drm_dp_cec_irq(&intel_dp->aux);
@@ -6480,13 +5276,20 @@ intel_dp_update_dfp(struct intel_dp *intel_dp,
intel_dp->downstream_ports,
edid);
+ intel_dp->dfp.pcon_max_frl_bw =
+ drm_dp_get_pcon_max_frl_bw(intel_dp->dpcd,
+ intel_dp->downstream_ports);
+
drm_dbg_kms(&i915->drm,
- "[CONNECTOR:%d:%s] DFP max bpc %d, max dotclock %d, TMDS clock %d-%d\n",
+ "[CONNECTOR:%d:%s] DFP max bpc %d, max dotclock %d, TMDS clock %d-%d, PCON Max FRL BW %dGbps\n",
connector->base.base.id, connector->base.name,
intel_dp->dfp.max_bpc,
intel_dp->dfp.max_dotclock,
intel_dp->dfp.min_tmds_clock,
- intel_dp->dfp.max_tmds_clock);
+ intel_dp->dfp.max_tmds_clock,
+ intel_dp->dfp.pcon_max_frl_bw);
+
+ intel_dp_get_pcon_dsc_cap(intel_dp);
}
static void
@@ -6494,7 +5297,7 @@ intel_dp_update_420(struct intel_dp *intel_dp)
{
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
struct intel_connector *connector = intel_dp->attached_connector;
- bool is_branch, ycbcr_420_passthrough, ycbcr_444_to_420;
+ bool is_branch, ycbcr_420_passthrough, ycbcr_444_to_420, rgb_to_ycbcr;
/* No YCbCr output support on gmch platforms */
if (HAS_GMCH(i915))
@@ -6516,14 +5319,26 @@ intel_dp_update_420(struct intel_dp *intel_dp)
dp_to_dig_port(intel_dp)->lspcon.active ||
drm_dp_downstream_444_to_420_conversion(intel_dp->dpcd,
intel_dp->downstream_ports);
+ rgb_to_ycbcr = drm_dp_downstream_rgb_to_ycbcr_conversion(intel_dp->dpcd,
+ intel_dp->downstream_ports,
+ DP_DS_HDMI_BT601_RGB_YCBCR_CONV |
+ DP_DS_HDMI_BT709_RGB_YCBCR_CONV |
+ DP_DS_HDMI_BT2020_RGB_YCBCR_CONV);
if (INTEL_GEN(i915) >= 11) {
+ /* Let PCON convert from RGB->YCbCr if possible */
+ if (is_branch && rgb_to_ycbcr && ycbcr_444_to_420) {
+ intel_dp->dfp.rgb_to_ycbcr = true;
+ intel_dp->dfp.ycbcr_444_to_420 = true;
+ connector->base.ycbcr_420_allowed = true;
+ } else {
/* Prefer 4:2:0 passthrough over 4:4:4->4:2:0 conversion */
- intel_dp->dfp.ycbcr_444_to_420 =
- ycbcr_444_to_420 && !ycbcr_420_passthrough;
+ intel_dp->dfp.ycbcr_444_to_420 =
+ ycbcr_444_to_420 && !ycbcr_420_passthrough;
- connector->base.ycbcr_420_allowed =
- !is_branch || ycbcr_444_to_420 || ycbcr_420_passthrough;
+ connector->base.ycbcr_420_allowed =
+ !is_branch || ycbcr_444_to_420 || ycbcr_420_passthrough;
+ }
} else {
/* 4:4:4->4:2:0 conversion is the only way */
intel_dp->dfp.ycbcr_444_to_420 = ycbcr_444_to_420;
@@ -6532,8 +5347,9 @@ intel_dp_update_420(struct intel_dp *intel_dp)
}
drm_dbg_kms(&i915->drm,
- "[CONNECTOR:%d:%s] YCbCr 4:2:0 allowed? %s, YCbCr 4:4:4->4:2:0 conversion? %s\n",
+ "[CONNECTOR:%d:%s] RGB->YcbCr conversion? %s, YCbCr 4:2:0 allowed? %s, YCbCr 4:4:4->4:2:0 conversion? %s\n",
connector->base.base.id, connector->base.name,
+ yesno(intel_dp->dfp.rgb_to_ycbcr),
yesno(connector->base.ycbcr_420_allowed),
yesno(intel_dp->dfp.ycbcr_444_to_420));
}
@@ -6557,7 +5373,6 @@ intel_dp_set_edid(struct intel_dp *intel_dp)
}
drm_dp_cec_set_edid(&intel_dp->aux, edid);
- intel_dp->edid_quirks = drm_dp_get_edid_quirks(edid);
}
static void
@@ -6571,13 +5386,14 @@ intel_dp_unset_edid(struct intel_dp *intel_dp)
intel_dp->has_hdmi_sink = false;
intel_dp->has_audio = false;
- intel_dp->edid_quirks = 0;
intel_dp->dfp.max_bpc = 0;
intel_dp->dfp.max_dotclock = 0;
intel_dp->dfp.min_tmds_clock = 0;
intel_dp->dfp.max_tmds_clock = 0;
+ intel_dp->dfp.pcon_max_frl_bw = 0;
+
intel_dp->dfp.ycbcr_444_to_420 = false;
connector->base.ycbcr_420_allowed = false;
}
@@ -6683,7 +5499,7 @@ intel_dp_detect(struct drm_connector *connector,
to_intel_connector(connector)->detect_edid)
status = connector_status_connected;
- intel_dp_check_service_irq(intel_dp);
+ intel_dp_check_device_service_irq(intel_dp);
out:
if (status != connector_status_connected && !intel_dp->is_mst)
@@ -6736,6 +5552,10 @@ static int intel_dp_get_modes(struct drm_connector *connector)
edid = intel_connector->detect_edid;
if (edid) {
int ret = intel_connector_update_modes(connector, edid);
+
+ if (intel_vrr_is_capable(connector))
+ drm_connector_set_vrr_capable_property(connector,
+ true);
if (ret)
return ret;
}
@@ -6774,6 +5594,8 @@ intel_dp_connector_register(struct drm_connector *connector)
{
struct drm_i915_private *i915 = to_i915(connector->dev);
struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector));
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct intel_lspcon *lspcon = &dig_port->lspcon;
int ret;
ret = intel_connector_register(connector);
@@ -6787,6 +5609,22 @@ intel_dp_connector_register(struct drm_connector *connector)
ret = drm_dp_aux_register(&intel_dp->aux);
if (!ret)
drm_dp_cec_register_connector(&intel_dp->aux, connector);
+
+ if (!intel_bios_is_lspcon_present(i915, dig_port->base.port))
+ return ret;
+
+ /*
+ * ToDo: Clean this up to handle lspcon init and resume more
+ * efficiently and streamlined.
+ */
+ if (lspcon_init(dig_port)) {
+ lspcon_detect_hdr_capability(lspcon);
+ if (lspcon->hdr_supported)
+ drm_object_attach_property(&connector->base,
+ connector->dev->mode_config.hdr_output_metadata_property,
+ 0);
+ }
+
return ret;
}
@@ -6806,17 +5644,8 @@ void intel_dp_encoder_flush_work(struct drm_encoder *encoder)
struct intel_dp *intel_dp = &dig_port->dp;
intel_dp_mst_encoder_cleanup(dig_port);
- if (intel_dp_is_edp(intel_dp)) {
- intel_wakeref_t wakeref;
- cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
- /*
- * vdd might still be enabled do to the delayed vdd off.
- * Make sure vdd is actually turned off here.
- */
- with_pps_lock(intel_dp, wakeref)
- edp_panel_vdd_off_sync(intel_dp);
- }
+ intel_pps_vdd_off_sync(intel_dp);
intel_dp_aux_fini(intel_dp);
}
@@ -6832,53 +5661,15 @@ static void intel_dp_encoder_destroy(struct drm_encoder *encoder)
void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
{
struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder);
- intel_wakeref_t wakeref;
- if (!intel_dp_is_edp(intel_dp))
- return;
-
- /*
- * vdd might still be enabled do to the delayed vdd off.
- * Make sure vdd is actually turned off here.
- */
- cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
- with_pps_lock(intel_dp, wakeref)
- edp_panel_vdd_off_sync(intel_dp);
+ intel_pps_vdd_off_sync(intel_dp);
}
void intel_dp_encoder_shutdown(struct intel_encoder *intel_encoder)
{
struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder);
- intel_wakeref_t wakeref;
- if (!intel_dp_is_edp(intel_dp))
- return;
-
- with_pps_lock(intel_dp, wakeref)
- wait_panel_power_cycle(intel_dp);
-}
-
-static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
-
- lockdep_assert_held(&dev_priv->pps_mutex);
-
- if (!edp_have_panel_vdd(intel_dp))
- return;
-
- /*
- * The VDD bit needs a power domain reference, so if the bit is
- * already enabled when we boot or resume, grab this reference and
- * schedule a vdd off, so we don't hold on to the reference
- * indefinitely.
- */
- drm_dbg_kms(&dev_priv->drm,
- "VDD left on by BIOS, adjusting state tracking\n");
- intel_display_power_get(dev_priv, intel_aux_power_domain(dig_port));
-
- edp_panel_vdd_schedule_off(intel_dp);
+ intel_pps_wait_power_cycle(intel_dp);
}
static enum pipe vlv_active_pipe(struct intel_dp *intel_dp)
@@ -6898,30 +5689,20 @@ void intel_dp_encoder_reset(struct drm_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->dev);
struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(encoder));
- intel_wakeref_t wakeref;
if (!HAS_DDI(dev_priv))
intel_dp->DP = intel_de_read(dev_priv, intel_dp->output_reg);
intel_dp->reset_link_params = true;
- if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
- !intel_dp_is_edp(intel_dp))
- return;
-
- with_pps_lock(intel_dp, wakeref) {
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- intel_dp->active_pipe = vlv_active_pipe(intel_dp);
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+ intel_wakeref_t wakeref;
- if (intel_dp_is_edp(intel_dp)) {
- /*
- * Reinit the power sequencer, in case BIOS did
- * something nasty with it.
- */
- intel_dp_pps_init(intel_dp);
- intel_edp_panel_vdd_sanitize(intel_dp);
- }
+ with_intel_pps_lock(intel_dp, wakeref)
+ intel_dp->pps.active_pipe = vlv_active_pipe(intel_dp);
}
+
+ intel_pps_encoder_reset(intel_dp);
}
static int intel_modeset_tile_group(struct intel_atomic_state *state,
@@ -7086,19 +5867,6 @@ static const struct drm_encoder_funcs intel_dp_enc_funcs = {
.destroy = intel_dp_encoder_destroy,
};
-static bool intel_edp_have_power(struct intel_dp *intel_dp)
-{
- intel_wakeref_t wakeref;
- bool have_power = false;
-
- with_pps_lock(intel_dp, wakeref) {
- have_power = edp_have_panel_power(intel_dp) &&
- edp_have_panel_vdd(intel_dp);
- }
-
- return have_power;
-}
-
enum irqreturn
intel_dp_hpd_pulse(struct intel_digital_port *dig_port, bool long_hpd)
{
@@ -7106,7 +5874,7 @@ intel_dp_hpd_pulse(struct intel_digital_port *dig_port, bool long_hpd)
struct intel_dp *intel_dp = &dig_port->dp;
if (dig_port->base.type == INTEL_OUTPUT_EDP &&
- (long_hpd || !intel_edp_have_power(intel_dp))) {
+ (long_hpd || !intel_pps_have_power(intel_dp))) {
/*
* vdd off can generate a long/short pulse on eDP which
* would require vdd on to handle it, and thus we
@@ -7175,7 +5943,13 @@ intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connect
else if (INTEL_GEN(dev_priv) >= 5)
drm_connector_attach_max_bpc_property(connector, 6, 12);
- intel_attach_colorspace_property(connector);
+ /* Register HDMI colorspace for case of lspcon */
+ if (intel_bios_is_lspcon_present(dev_priv, port)) {
+ drm_connector_attach_content_type_property(connector);
+ intel_attach_hdmi_colorspace_property(connector);
+ } else {
+ intel_attach_dp_colorspace_property(connector);
+ }
if (IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 11)
drm_object_attach_property(&connector->base,
@@ -7194,277 +5968,9 @@ intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connect
connector->state->scaling_mode = DRM_MODE_SCALE_ASPECT;
}
-}
-
-static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
-{
- intel_dp->panel_power_off_time = ktime_get_boottime();
- intel_dp->last_power_on = jiffies;
- intel_dp->last_backlight_off = jiffies;
-}
-
-static void
-intel_pps_readout_hw_state(struct intel_dp *intel_dp, struct edp_power_seq *seq)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- u32 pp_on, pp_off, pp_ctl;
- struct pps_registers regs;
-
- intel_pps_get_registers(intel_dp, &regs);
-
- pp_ctl = ilk_get_pp_control(intel_dp);
-
- /* Ensure PPS is unlocked */
- if (!HAS_DDI(dev_priv))
- intel_de_write(dev_priv, regs.pp_ctrl, pp_ctl);
-
- pp_on = intel_de_read(dev_priv, regs.pp_on);
- pp_off = intel_de_read(dev_priv, regs.pp_off);
-
- /* Pull timing values out of registers */
- seq->t1_t3 = REG_FIELD_GET(PANEL_POWER_UP_DELAY_MASK, pp_on);
- seq->t8 = REG_FIELD_GET(PANEL_LIGHT_ON_DELAY_MASK, pp_on);
- seq->t9 = REG_FIELD_GET(PANEL_LIGHT_OFF_DELAY_MASK, pp_off);
- seq->t10 = REG_FIELD_GET(PANEL_POWER_DOWN_DELAY_MASK, pp_off);
-
- if (i915_mmio_reg_valid(regs.pp_div)) {
- u32 pp_div;
-
- pp_div = intel_de_read(dev_priv, regs.pp_div);
-
- seq->t11_t12 = REG_FIELD_GET(PANEL_POWER_CYCLE_DELAY_MASK, pp_div) * 1000;
- } else {
- seq->t11_t12 = REG_FIELD_GET(BXT_POWER_CYCLE_DELAY_MASK, pp_ctl) * 1000;
- }
-}
-
-static void
-intel_pps_dump_state(const char *state_name, const struct edp_power_seq *seq)
-{
- DRM_DEBUG_KMS("%s t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
- state_name,
- seq->t1_t3, seq->t8, seq->t9, seq->t10, seq->t11_t12);
-}
-static void
-intel_pps_verify_state(struct intel_dp *intel_dp)
-{
- struct edp_power_seq hw;
- struct edp_power_seq *sw = &intel_dp->pps_delays;
-
- intel_pps_readout_hw_state(intel_dp, &hw);
-
- if (hw.t1_t3 != sw->t1_t3 || hw.t8 != sw->t8 || hw.t9 != sw->t9 ||
- hw.t10 != sw->t10 || hw.t11_t12 != sw->t11_t12) {
- DRM_ERROR("PPS state mismatch\n");
- intel_pps_dump_state("sw", sw);
- intel_pps_dump_state("hw", &hw);
- }
-}
-
-static void
-intel_dp_init_panel_power_sequencer(struct intel_dp *intel_dp)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- struct edp_power_seq cur, vbt, spec,
- *final = &intel_dp->pps_delays;
-
- lockdep_assert_held(&dev_priv->pps_mutex);
-
- /* already initialized? */
- if (final->t11_t12 != 0)
- return;
-
- intel_pps_readout_hw_state(intel_dp, &cur);
-
- intel_pps_dump_state("cur", &cur);
-
- vbt = dev_priv->vbt.edp.pps;
- /* On Toshiba Satellite P50-C-18C system the VBT T12 delay
- * of 500ms appears to be too short. Ocassionally the panel
- * just fails to power back on. Increasing the delay to 800ms
- * seems sufficient to avoid this problem.
- */
- if (dev_priv->quirks & QUIRK_INCREASE_T12_DELAY) {
- vbt.t11_t12 = max_t(u16, vbt.t11_t12, 1300 * 10);
- drm_dbg_kms(&dev_priv->drm,
- "Increasing T12 panel delay as per the quirk to %d\n",
- vbt.t11_t12);
- }
- /* T11_T12 delay is special and actually in units of 100ms, but zero
- * based in the hw (so we need to add 100 ms). But the sw vbt
- * table multiplies it with 1000 to make it in units of 100usec,
- * too. */
- vbt.t11_t12 += 100 * 10;
-
- /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
- * our hw here, which are all in 100usec. */
- spec.t1_t3 = 210 * 10;
- spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
- spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
- spec.t10 = 500 * 10;
- /* This one is special and actually in units of 100ms, but zero
- * based in the hw (so we need to add 100 ms). But the sw vbt
- * table multiplies it with 1000 to make it in units of 100usec,
- * too. */
- spec.t11_t12 = (510 + 100) * 10;
-
- intel_pps_dump_state("vbt", &vbt);
-
- /* Use the max of the register settings and vbt. If both are
- * unset, fall back to the spec limits. */
-#define assign_final(field) final->field = (max(cur.field, vbt.field) == 0 ? \
- spec.field : \
- max(cur.field, vbt.field))
- assign_final(t1_t3);
- assign_final(t8);
- assign_final(t9);
- assign_final(t10);
- assign_final(t11_t12);
-#undef assign_final
-
-#define get_delay(field) (DIV_ROUND_UP(final->field, 10))
- intel_dp->panel_power_up_delay = get_delay(t1_t3);
- intel_dp->backlight_on_delay = get_delay(t8);
- intel_dp->backlight_off_delay = get_delay(t9);
- intel_dp->panel_power_down_delay = get_delay(t10);
- intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
-#undef get_delay
-
- drm_dbg_kms(&dev_priv->drm,
- "panel power up delay %d, power down delay %d, power cycle delay %d\n",
- intel_dp->panel_power_up_delay,
- intel_dp->panel_power_down_delay,
- intel_dp->panel_power_cycle_delay);
-
- drm_dbg_kms(&dev_priv->drm, "backlight on delay %d, off delay %d\n",
- intel_dp->backlight_on_delay,
- intel_dp->backlight_off_delay);
-
- /*
- * We override the HW backlight delays to 1 because we do manual waits
- * on them. For T8, even BSpec recommends doing it. For T9, if we
- * don't do this, we'll end up waiting for the backlight off delay
- * twice: once when we do the manual sleep, and once when we disable
- * the panel and wait for the PP_STATUS bit to become zero.
- */
- final->t8 = 1;
- final->t9 = 1;
-
- /*
- * HW has only a 100msec granularity for t11_t12 so round it up
- * accordingly.
- */
- final->t11_t12 = roundup(final->t11_t12, 100 * 10);
-}
-
-static void
-intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp,
- bool force_disable_vdd)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- u32 pp_on, pp_off, port_sel = 0;
- int div = RUNTIME_INFO(dev_priv)->rawclk_freq / 1000;
- struct pps_registers regs;
- enum port port = dp_to_dig_port(intel_dp)->base.port;
- const struct edp_power_seq *seq = &intel_dp->pps_delays;
-
- lockdep_assert_held(&dev_priv->pps_mutex);
-
- intel_pps_get_registers(intel_dp, &regs);
-
- /*
- * On some VLV machines the BIOS can leave the VDD
- * enabled even on power sequencers which aren't
- * hooked up to any port. This would mess up the
- * power domain tracking the first time we pick
- * one of these power sequencers for use since
- * edp_panel_vdd_on() would notice that the VDD was
- * already on and therefore wouldn't grab the power
- * domain reference. Disable VDD first to avoid this.
- * This also avoids spuriously turning the VDD on as
- * soon as the new power sequencer gets initialized.
- */
- if (force_disable_vdd) {
- u32 pp = ilk_get_pp_control(intel_dp);
-
- drm_WARN(&dev_priv->drm, pp & PANEL_POWER_ON,
- "Panel power already on\n");
-
- if (pp & EDP_FORCE_VDD)
- drm_dbg_kms(&dev_priv->drm,
- "VDD already on, disabling first\n");
-
- pp &= ~EDP_FORCE_VDD;
-
- intel_de_write(dev_priv, regs.pp_ctrl, pp);
- }
-
- pp_on = REG_FIELD_PREP(PANEL_POWER_UP_DELAY_MASK, seq->t1_t3) |
- REG_FIELD_PREP(PANEL_LIGHT_ON_DELAY_MASK, seq->t8);
- pp_off = REG_FIELD_PREP(PANEL_LIGHT_OFF_DELAY_MASK, seq->t9) |
- REG_FIELD_PREP(PANEL_POWER_DOWN_DELAY_MASK, seq->t10);
-
- /* Haswell doesn't have any port selection bits for the panel
- * power sequencer any more. */
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
- port_sel = PANEL_PORT_SELECT_VLV(port);
- } else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
- switch (port) {
- case PORT_A:
- port_sel = PANEL_PORT_SELECT_DPA;
- break;
- case PORT_C:
- port_sel = PANEL_PORT_SELECT_DPC;
- break;
- case PORT_D:
- port_sel = PANEL_PORT_SELECT_DPD;
- break;
- default:
- MISSING_CASE(port);
- break;
- }
- }
-
- pp_on |= port_sel;
-
- intel_de_write(dev_priv, regs.pp_on, pp_on);
- intel_de_write(dev_priv, regs.pp_off, pp_off);
-
- /*
- * Compute the divisor for the pp clock, simply match the Bspec formula.
- */
- if (i915_mmio_reg_valid(regs.pp_div)) {
- intel_de_write(dev_priv, regs.pp_div,
- REG_FIELD_PREP(PP_REFERENCE_DIVIDER_MASK, (100 * div) / 2 - 1) | REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000)));
- } else {
- u32 pp_ctl;
-
- pp_ctl = intel_de_read(dev_priv, regs.pp_ctrl);
- pp_ctl &= ~BXT_POWER_CYCLE_DELAY_MASK;
- pp_ctl |= REG_FIELD_PREP(BXT_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000));
- intel_de_write(dev_priv, regs.pp_ctrl, pp_ctl);
- }
-
- drm_dbg_kms(&dev_priv->drm,
- "panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
- intel_de_read(dev_priv, regs.pp_on),
- intel_de_read(dev_priv, regs.pp_off),
- i915_mmio_reg_valid(regs.pp_div) ?
- intel_de_read(dev_priv, regs.pp_div) :
- (intel_de_read(dev_priv, regs.pp_ctrl) & BXT_POWER_CYCLE_DELAY_MASK));
-}
-
-static void intel_dp_pps_init(struct intel_dp *intel_dp)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
- vlv_initial_power_sequencer_setup(intel_dp);
- } else {
- intel_dp_init_panel_power_sequencer(intel_dp);
- intel_dp_init_panel_power_sequencer_registers(intel_dp, false);
- }
+ if (HAS_VRR(dev_priv))
+ drm_connector_attach_vrr_capable_property(connector);
}
/**
@@ -7903,14 +6409,11 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
struct drm_display_mode *downclock_mode = NULL;
bool has_dpcd;
enum pipe pipe = INVALID_PIPE;
- intel_wakeref_t wakeref;
struct edid *edid;
if (!intel_dp_is_edp(intel_dp))
return true;
- INIT_DELAYED_WORK(&intel_dp->panel_vdd_work, edp_panel_vdd_work);
-
/*
* On IBX/CPT we may get here with LVDS already registered. Since the
* driver uses the only internal power sequencer available for both
@@ -7926,11 +6429,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
return false;
}
- with_pps_lock(intel_dp, wakeref) {
- intel_dp_init_panel_power_timestamps(intel_dp);
- intel_dp_pps_init(intel_dp);
- intel_edp_panel_vdd_sanitize(intel_dp);
- }
+ intel_pps_init(intel_dp);
/* Cache DPCD and EDID for edp. */
has_dpcd = intel_edp_init_dpcd(intel_dp);
@@ -7947,7 +6446,6 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
if (edid) {
if (drm_add_edid_modes(connector, edid)) {
drm_connector_update_edid_property(connector, edid);
- intel_dp->edid_quirks = drm_dp_get_edid_quirks(edid);
} else {
kfree(edid);
edid = ERR_PTR(-EINVAL);
@@ -7975,7 +6473,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
pipe = vlv_active_pipe(intel_dp);
if (pipe != PIPE_A && pipe != PIPE_B)
- pipe = intel_dp->pps_pipe;
+ pipe = intel_dp->pps.pps_pipe;
if (pipe != PIPE_A && pipe != PIPE_B)
pipe = PIPE_A;
@@ -7986,7 +6484,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
}
intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
- intel_connector->panel.backlight.power = intel_edp_backlight_power;
+ intel_connector->panel.backlight.power = intel_pps_backlight_power;
intel_panel_setup_backlight(connector, pipe);
if (fixed_mode) {
@@ -7998,13 +6496,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
return true;
out_vdd_off:
- cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
- /*
- * vdd might still be enabled do to the delayed vdd off.
- * Make sure vdd is actually turned off here.
- */
- with_pps_lock(intel_dp, wakeref)
- edp_panel_vdd_off_sync(intel_dp);
+ intel_pps_vdd_off_sync(intel_dp);
return false;
}
@@ -8058,8 +6550,8 @@ intel_dp_init_connector(struct intel_digital_port *dig_port,
intel_dp_set_source_rates(intel_dp);
intel_dp->reset_link_params = true;
- intel_dp->pps_pipe = INVALID_PIPE;
- intel_dp->active_pipe = INVALID_PIPE;
+ intel_dp->pps.pps_pipe = INVALID_PIPE;
+ intel_dp->pps.active_pipe = INVALID_PIPE;
/* Preserve the current hw state. */
intel_dp->DP = intel_de_read(dev_priv, intel_dp->output_reg);
@@ -8077,7 +6569,7 @@ intel_dp_init_connector(struct intel_digital_port *dig_port,
}
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- intel_dp->active_pipe = vlv_active_pipe(intel_dp);
+ intel_dp->pps.active_pipe = vlv_active_pipe(intel_dp);
/*
* For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
@@ -8146,6 +6638,9 @@ intel_dp_init_connector(struct intel_digital_port *dig_port,
(temp & ~0xf) | 0xd);
}
+ intel_dp->frl.is_trained = false;
+ intel_dp->frl.trained_rate_gbps = 0;
+
return true;
fail:
diff --git a/drivers/gpu/drm/i915/display/intel_dp.h b/drivers/gpu/drm/i915/display/intel_dp.h
index b871a09b6901..d80839139bfb 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.h
+++ b/drivers/gpu/drm/i915/display/intel_dp.h
@@ -51,7 +51,8 @@ int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp,
int intel_dp_retrain_link(struct intel_encoder *encoder,
struct drm_modeset_acquire_ctx *ctx);
void intel_dp_set_power(struct intel_dp *intel_dp, u8 mode);
-void intel_dp_configure_protocol_converter(struct intel_dp *intel_dp);
+void intel_dp_configure_protocol_converter(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state);
void intel_dp_sink_set_decompression_state(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
bool enable);
@@ -69,16 +70,11 @@ enum irqreturn intel_dp_hpd_pulse(struct intel_digital_port *dig_port,
void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state);
void intel_edp_backlight_off(const struct drm_connector_state *conn_state);
-void intel_edp_panel_vdd_on(struct intel_dp *intel_dp);
-void intel_edp_panel_on(struct intel_dp *intel_dp);
-void intel_edp_panel_off(struct intel_dp *intel_dp);
void intel_dp_mst_suspend(struct drm_i915_private *dev_priv);
void intel_dp_mst_resume(struct drm_i915_private *dev_priv);
int intel_dp_max_link_rate(struct intel_dp *intel_dp);
int intel_dp_max_lane_count(struct intel_dp *intel_dp);
int intel_dp_rate_select(struct intel_dp *intel_dp, int rate);
-void intel_power_sequencer_reset(struct drm_i915_private *dev_priv);
-u32 intel_dp_pack_aux(const u8 *src, int src_bytes);
void intel_edp_drrs_enable(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state);
@@ -95,9 +91,6 @@ void
intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
u8 dp_train_pat);
-void
-intel_dp_set_signal_levels(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state);
void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
u8 *link_bw, u8 *rate_select);
bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp);
@@ -143,5 +136,11 @@ bool intel_dp_initial_fastset_check(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state);
void intel_dp_sync_state(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
+const struct dpll *vlv_get_dpll(struct drm_i915_private *i915);
+
+void intel_dp_check_frl_training(struct intel_dp *intel_dp);
+void intel_dp_pcon_dsc_configure(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state);
+void intel_dp_phy_test(struct intel_encoder *encoder);
#endif /* __INTEL_DP_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux.c b/drivers/gpu/drm/i915/display/intel_dp_aux.c
new file mode 100644
index 000000000000..eaebf123310a
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_dp_aux.c
@@ -0,0 +1,692 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020-2021 Intel Corporation
+ */
+
+#include "i915_drv.h"
+#include "i915_trace.h"
+#include "intel_display_types.h"
+#include "intel_dp_aux.h"
+#include "intel_pps.h"
+#include "intel_tc.h"
+
+u32 intel_dp_pack_aux(const u8 *src, int src_bytes)
+{
+ int i;
+ u32 v = 0;
+
+ if (src_bytes > 4)
+ src_bytes = 4;
+ for (i = 0; i < src_bytes; i++)
+ v |= ((u32)src[i]) << ((3 - i) * 8);
+ return v;
+}
+
+static void intel_dp_unpack_aux(u32 src, u8 *dst, int dst_bytes)
+{
+ int i;
+
+ if (dst_bytes > 4)
+ dst_bytes = 4;
+ for (i = 0; i < dst_bytes; i++)
+ dst[i] = src >> ((3 - i) * 8);
+}
+
+static u32
+intel_dp_aux_wait_done(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp);
+ const unsigned int timeout_ms = 10;
+ u32 status;
+ bool done;
+
+#define C (((status = intel_uncore_read_notrace(&i915->uncore, ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
+ done = wait_event_timeout(i915->gmbus_wait_queue, C,
+ msecs_to_jiffies_timeout(timeout_ms));
+
+ /* just trace the final value */
+ trace_i915_reg_rw(false, ch_ctl, status, sizeof(status), true);
+
+ if (!done)
+ drm_err(&i915->drm,
+ "%s: did not complete or timeout within %ums (status 0x%08x)\n",
+ intel_dp->aux.name, timeout_ms, status);
+#undef C
+
+ return status;
+}
+
+static u32 g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
+ if (index)
+ return 0;
+
+ /*
+ * The clock divider is based off the hrawclk, and would like to run at
+ * 2MHz. So, take the hrawclk value and divide by 2000 and use that
+ */
+ return DIV_ROUND_CLOSEST(RUNTIME_INFO(dev_priv)->rawclk_freq, 2000);
+}
+
+static u32 ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ u32 freq;
+
+ if (index)
+ return 0;
+
+ /*
+ * The clock divider is based off the cdclk or PCH rawclk, and would
+ * like to run at 2MHz. So, take the cdclk or PCH rawclk value and
+ * divide by 2000 and use that
+ */
+ if (dig_port->aux_ch == AUX_CH_A)
+ freq = dev_priv->cdclk.hw.cdclk;
+ else
+ freq = RUNTIME_INFO(dev_priv)->rawclk_freq;
+ return DIV_ROUND_CLOSEST(freq, 2000);
+}
+
+static u32 hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+
+ if (dig_port->aux_ch != AUX_CH_A && HAS_PCH_LPT_H(dev_priv)) {
+ /* Workaround for non-ULT HSW */
+ switch (index) {
+ case 0: return 63;
+ case 1: return 72;
+ default: return 0;
+ }
+ }
+
+ return ilk_get_aux_clock_divider(intel_dp, index);
+}
+
+static u32 skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
+{
+ /*
+ * SKL doesn't need us to program the AUX clock divider (Hardware will
+ * derive the clock from CDCLK automatically). We still implement the
+ * get_aux_clock_divider vfunc to plug-in into the existing code.
+ */
+ return index ? 0 : 1;
+}
+
+static u32 g4x_get_aux_send_ctl(struct intel_dp *intel_dp,
+ int send_bytes,
+ u32 aux_clock_divider)
+{
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct drm_i915_private *dev_priv =
+ to_i915(dig_port->base.base.dev);
+ u32 precharge, timeout;
+
+ if (IS_GEN(dev_priv, 6))
+ precharge = 3;
+ else
+ precharge = 5;
+
+ if (IS_BROADWELL(dev_priv))
+ timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
+ else
+ timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
+
+ return DP_AUX_CH_CTL_SEND_BUSY |
+ DP_AUX_CH_CTL_DONE |
+ DP_AUX_CH_CTL_INTERRUPT |
+ DP_AUX_CH_CTL_TIME_OUT_ERROR |
+ timeout |
+ DP_AUX_CH_CTL_RECEIVE_ERROR |
+ (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
+ (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
+ (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
+}
+
+static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp,
+ int send_bytes,
+ u32 unused)
+{
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct drm_i915_private *i915 =
+ to_i915(dig_port->base.base.dev);
+ enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
+ u32 ret;
+
+ ret = DP_AUX_CH_CTL_SEND_BUSY |
+ DP_AUX_CH_CTL_DONE |
+ DP_AUX_CH_CTL_INTERRUPT |
+ DP_AUX_CH_CTL_TIME_OUT_ERROR |
+ DP_AUX_CH_CTL_TIME_OUT_MAX |
+ DP_AUX_CH_CTL_RECEIVE_ERROR |
+ (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
+ DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(32) |
+ DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
+
+ if (intel_phy_is_tc(i915, phy) &&
+ dig_port->tc_mode == TC_PORT_TBT_ALT)
+ ret |= DP_AUX_CH_CTL_TBT_IO;
+
+ return ret;
+}
+
+static int
+intel_dp_aux_xfer(struct intel_dp *intel_dp,
+ const u8 *send, int send_bytes,
+ u8 *recv, int recv_size,
+ u32 aux_send_ctl_flags)
+{
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct drm_i915_private *i915 =
+ to_i915(dig_port->base.base.dev);
+ struct intel_uncore *uncore = &i915->uncore;
+ enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
+ bool is_tc_port = intel_phy_is_tc(i915, phy);
+ i915_reg_t ch_ctl, ch_data[5];
+ u32 aux_clock_divider;
+ enum intel_display_power_domain aux_domain;
+ intel_wakeref_t aux_wakeref;
+ intel_wakeref_t pps_wakeref;
+ int i, ret, recv_bytes;
+ int try, clock = 0;
+ u32 status;
+ bool vdd;
+
+ ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp);
+ for (i = 0; i < ARRAY_SIZE(ch_data); i++)
+ ch_data[i] = intel_dp->aux_ch_data_reg(intel_dp, i);
+
+ if (is_tc_port)
+ intel_tc_port_lock(dig_port);
+
+ aux_domain = intel_aux_power_domain(dig_port);
+
+ aux_wakeref = intel_display_power_get(i915, aux_domain);
+ pps_wakeref = intel_pps_lock(intel_dp);
+
+ /*
+ * We will be called with VDD already enabled for dpcd/edid/oui reads.
+ * In such cases we want to leave VDD enabled and it's up to upper layers
+ * to turn it off. But for eg. i2c-dev access we need to turn it on/off
+ * ourselves.
+ */
+ vdd = intel_pps_vdd_on_unlocked(intel_dp);
+
+ /*
+ * dp aux is extremely sensitive to irq latency, hence request the
+ * lowest possible wakeup latency and so prevent the cpu from going into
+ * deep sleep states.
+ */
+ cpu_latency_qos_update_request(&intel_dp->pm_qos, 0);
+
+ intel_pps_check_power_unlocked(intel_dp);
+
+ /* Try to wait for any previous AUX channel activity */
+ for (try = 0; try < 3; try++) {
+ status = intel_uncore_read_notrace(uncore, ch_ctl);
+ if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
+ break;
+ msleep(1);
+ }
+ /* just trace the final value */
+ trace_i915_reg_rw(false, ch_ctl, status, sizeof(status), true);
+
+ if (try == 3) {
+ const u32 status = intel_uncore_read(uncore, ch_ctl);
+
+ if (status != intel_dp->aux_busy_last_status) {
+ drm_WARN(&i915->drm, 1,
+ "%s: not started (status 0x%08x)\n",
+ intel_dp->aux.name, status);
+ intel_dp->aux_busy_last_status = status;
+ }
+
+ ret = -EBUSY;
+ goto out;
+ }
+
+ /* Only 5 data registers! */
+ if (drm_WARN_ON(&i915->drm, send_bytes > 20 || recv_size > 20)) {
+ ret = -E2BIG;
+ goto out;
+ }
+
+ while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
+ u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
+ send_bytes,
+ aux_clock_divider);
+
+ send_ctl |= aux_send_ctl_flags;
+
+ /* Must try at least 3 times according to DP spec */
+ for (try = 0; try < 5; try++) {
+ /* Load the send data into the aux channel data registers */
+ for (i = 0; i < send_bytes; i += 4)
+ intel_uncore_write(uncore,
+ ch_data[i >> 2],
+ intel_dp_pack_aux(send + i,
+ send_bytes - i));
+
+ /* Send the command and wait for it to complete */
+ intel_uncore_write(uncore, ch_ctl, send_ctl);
+
+ status = intel_dp_aux_wait_done(intel_dp);
+
+ /* Clear done status and any errors */
+ intel_uncore_write(uncore,
+ ch_ctl,
+ status |
+ DP_AUX_CH_CTL_DONE |
+ DP_AUX_CH_CTL_TIME_OUT_ERROR |
+ DP_AUX_CH_CTL_RECEIVE_ERROR);
+
+ /*
+ * DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
+ * 400us delay required for errors and timeouts
+ * Timeout errors from the HW already meet this
+ * requirement so skip to next iteration
+ */
+ if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR)
+ continue;
+
+ if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
+ usleep_range(400, 500);
+ continue;
+ }
+ if (status & DP_AUX_CH_CTL_DONE)
+ goto done;
+ }
+ }
+
+ if ((status & DP_AUX_CH_CTL_DONE) == 0) {
+ drm_err(&i915->drm, "%s: not done (status 0x%08x)\n",
+ intel_dp->aux.name, status);
+ ret = -EBUSY;
+ goto out;
+ }
+
+done:
+ /*
+ * Check for timeout or receive error. Timeouts occur when the sink is
+ * not connected.
+ */
+ if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
+ drm_err(&i915->drm, "%s: receive error (status 0x%08x)\n",
+ intel_dp->aux.name, status);
+ ret = -EIO;
+ goto out;
+ }
+
+ /*
+ * Timeouts occur when the device isn't connected, so they're "normal"
+ * -- don't fill the kernel log with these
+ */
+ if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
+ drm_dbg_kms(&i915->drm, "%s: timeout (status 0x%08x)\n",
+ intel_dp->aux.name, status);
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ /* Unload any bytes sent back from the other side */
+ recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
+ DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
+
+ /*
+ * By BSpec: "Message sizes of 0 or >20 are not allowed."
+ * We have no idea of what happened so we return -EBUSY so
+ * drm layer takes care for the necessary retries.
+ */
+ if (recv_bytes == 0 || recv_bytes > 20) {
+ drm_dbg_kms(&i915->drm,
+ "%s: Forbidden recv_bytes = %d on aux transaction\n",
+ intel_dp->aux.name, recv_bytes);
+ ret = -EBUSY;
+ goto out;
+ }
+
+ if (recv_bytes > recv_size)
+ recv_bytes = recv_size;
+
+ for (i = 0; i < recv_bytes; i += 4)
+ intel_dp_unpack_aux(intel_uncore_read(uncore, ch_data[i >> 2]),
+ recv + i, recv_bytes - i);
+
+ ret = recv_bytes;
+out:
+ cpu_latency_qos_update_request(&intel_dp->pm_qos, PM_QOS_DEFAULT_VALUE);
+
+ if (vdd)
+ intel_pps_vdd_off_unlocked(intel_dp, false);
+
+ intel_pps_unlock(intel_dp, pps_wakeref);
+ intel_display_power_put_async(i915, aux_domain, aux_wakeref);
+
+ if (is_tc_port)
+ intel_tc_port_unlock(dig_port);
+
+ return ret;
+}
+
+#define BARE_ADDRESS_SIZE 3
+#define HEADER_SIZE (BARE_ADDRESS_SIZE + 1)
+
+static void
+intel_dp_aux_header(u8 txbuf[HEADER_SIZE],
+ const struct drm_dp_aux_msg *msg)
+{
+ txbuf[0] = (msg->request << 4) | ((msg->address >> 16) & 0xf);
+ txbuf[1] = (msg->address >> 8) & 0xff;
+ txbuf[2] = msg->address & 0xff;
+ txbuf[3] = msg->size - 1;
+}
+
+static u32 intel_dp_aux_xfer_flags(const struct drm_dp_aux_msg *msg)
+{
+ /*
+ * If we're trying to send the HDCP Aksv, we need to set a the Aksv
+ * select bit to inform the hardware to send the Aksv after our header
+ * since we can't access that data from software.
+ */
+ if ((msg->request & ~DP_AUX_I2C_MOT) == DP_AUX_NATIVE_WRITE &&
+ msg->address == DP_AUX_HDCP_AKSV)
+ return DP_AUX_CH_CTL_AUX_AKSV_SELECT;
+
+ return 0;
+}
+
+static ssize_t
+intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
+{
+ struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ u8 txbuf[20], rxbuf[20];
+ size_t txsize, rxsize;
+ u32 flags = intel_dp_aux_xfer_flags(msg);
+ int ret;
+
+ intel_dp_aux_header(txbuf, msg);
+
+ switch (msg->request & ~DP_AUX_I2C_MOT) {
+ case DP_AUX_NATIVE_WRITE:
+ case DP_AUX_I2C_WRITE:
+ case DP_AUX_I2C_WRITE_STATUS_UPDATE:
+ txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
+ rxsize = 2; /* 0 or 1 data bytes */
+
+ if (drm_WARN_ON(&i915->drm, txsize > 20))
+ return -E2BIG;
+
+ drm_WARN_ON(&i915->drm, !msg->buffer != !msg->size);
+
+ if (msg->buffer)
+ memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
+
+ ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize,
+ rxbuf, rxsize, flags);
+ if (ret > 0) {
+ msg->reply = rxbuf[0] >> 4;
+
+ if (ret > 1) {
+ /* Number of bytes written in a short write. */
+ ret = clamp_t(int, rxbuf[1], 0, msg->size);
+ } else {
+ /* Return payload size. */
+ ret = msg->size;
+ }
+ }
+ break;
+
+ case DP_AUX_NATIVE_READ:
+ case DP_AUX_I2C_READ:
+ txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
+ rxsize = msg->size + 1;
+
+ if (drm_WARN_ON(&i915->drm, rxsize > 20))
+ return -E2BIG;
+
+ ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize,
+ rxbuf, rxsize, flags);
+ if (ret > 0) {
+ msg->reply = rxbuf[0] >> 4;
+ /*
+ * Assume happy day, and copy the data. The caller is
+ * expected to check msg->reply before touching it.
+ *
+ * Return payload size.
+ */
+ ret--;
+ memcpy(msg->buffer, rxbuf + 1, ret);
+ }
+ break;
+
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static i915_reg_t g4x_aux_ctl_reg(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ enum aux_ch aux_ch = dig_port->aux_ch;
+
+ switch (aux_ch) {
+ case AUX_CH_B:
+ case AUX_CH_C:
+ case AUX_CH_D:
+ return DP_AUX_CH_CTL(aux_ch);
+ default:
+ MISSING_CASE(aux_ch);
+ return DP_AUX_CH_CTL(AUX_CH_B);
+ }
+}
+
+static i915_reg_t g4x_aux_data_reg(struct intel_dp *intel_dp, int index)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ enum aux_ch aux_ch = dig_port->aux_ch;
+
+ switch (aux_ch) {
+ case AUX_CH_B:
+ case AUX_CH_C:
+ case AUX_CH_D:
+ return DP_AUX_CH_DATA(aux_ch, index);
+ default:
+ MISSING_CASE(aux_ch);
+ return DP_AUX_CH_DATA(AUX_CH_B, index);
+ }
+}
+
+static i915_reg_t ilk_aux_ctl_reg(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ enum aux_ch aux_ch = dig_port->aux_ch;
+
+ switch (aux_ch) {
+ case AUX_CH_A:
+ return DP_AUX_CH_CTL(aux_ch);
+ case AUX_CH_B:
+ case AUX_CH_C:
+ case AUX_CH_D:
+ return PCH_DP_AUX_CH_CTL(aux_ch);
+ default:
+ MISSING_CASE(aux_ch);
+ return DP_AUX_CH_CTL(AUX_CH_A);
+ }
+}
+
+static i915_reg_t ilk_aux_data_reg(struct intel_dp *intel_dp, int index)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ enum aux_ch aux_ch = dig_port->aux_ch;
+
+ switch (aux_ch) {
+ case AUX_CH_A:
+ return DP_AUX_CH_DATA(aux_ch, index);
+ case AUX_CH_B:
+ case AUX_CH_C:
+ case AUX_CH_D:
+ return PCH_DP_AUX_CH_DATA(aux_ch, index);
+ default:
+ MISSING_CASE(aux_ch);
+ return DP_AUX_CH_DATA(AUX_CH_A, index);
+ }
+}
+
+static i915_reg_t skl_aux_ctl_reg(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ enum aux_ch aux_ch = dig_port->aux_ch;
+
+ switch (aux_ch) {
+ case AUX_CH_A:
+ case AUX_CH_B:
+ case AUX_CH_C:
+ case AUX_CH_D:
+ case AUX_CH_E:
+ case AUX_CH_F:
+ return DP_AUX_CH_CTL(aux_ch);
+ default:
+ MISSING_CASE(aux_ch);
+ return DP_AUX_CH_CTL(AUX_CH_A);
+ }
+}
+
+static i915_reg_t skl_aux_data_reg(struct intel_dp *intel_dp, int index)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ enum aux_ch aux_ch = dig_port->aux_ch;
+
+ switch (aux_ch) {
+ case AUX_CH_A:
+ case AUX_CH_B:
+ case AUX_CH_C:
+ case AUX_CH_D:
+ case AUX_CH_E:
+ case AUX_CH_F:
+ return DP_AUX_CH_DATA(aux_ch, index);
+ default:
+ MISSING_CASE(aux_ch);
+ return DP_AUX_CH_DATA(AUX_CH_A, index);
+ }
+}
+
+static i915_reg_t tgl_aux_ctl_reg(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ enum aux_ch aux_ch = dig_port->aux_ch;
+
+ switch (aux_ch) {
+ case AUX_CH_A:
+ case AUX_CH_B:
+ case AUX_CH_C:
+ case AUX_CH_USBC1:
+ case AUX_CH_USBC2:
+ case AUX_CH_USBC3:
+ case AUX_CH_USBC4:
+ case AUX_CH_USBC5:
+ case AUX_CH_USBC6:
+ return DP_AUX_CH_CTL(aux_ch);
+ default:
+ MISSING_CASE(aux_ch);
+ return DP_AUX_CH_CTL(AUX_CH_A);
+ }
+}
+
+static i915_reg_t tgl_aux_data_reg(struct intel_dp *intel_dp, int index)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ enum aux_ch aux_ch = dig_port->aux_ch;
+
+ switch (aux_ch) {
+ case AUX_CH_A:
+ case AUX_CH_B:
+ case AUX_CH_C:
+ case AUX_CH_USBC1:
+ case AUX_CH_USBC2:
+ case AUX_CH_USBC3:
+ case AUX_CH_USBC4:
+ case AUX_CH_USBC5:
+ case AUX_CH_USBC6:
+ return DP_AUX_CH_DATA(aux_ch, index);
+ default:
+ MISSING_CASE(aux_ch);
+ return DP_AUX_CH_DATA(AUX_CH_A, index);
+ }
+}
+
+void intel_dp_aux_fini(struct intel_dp *intel_dp)
+{
+ if (cpu_latency_qos_request_active(&intel_dp->pm_qos))
+ cpu_latency_qos_remove_request(&intel_dp->pm_qos);
+
+ kfree(intel_dp->aux.name);
+}
+
+void intel_dp_aux_init(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct intel_encoder *encoder = &dig_port->base;
+ enum aux_ch aux_ch = dig_port->aux_ch;
+
+ if (INTEL_GEN(dev_priv) >= 12) {
+ intel_dp->aux_ch_ctl_reg = tgl_aux_ctl_reg;
+ intel_dp->aux_ch_data_reg = tgl_aux_data_reg;
+ } else if (INTEL_GEN(dev_priv) >= 9) {
+ intel_dp->aux_ch_ctl_reg = skl_aux_ctl_reg;
+ intel_dp->aux_ch_data_reg = skl_aux_data_reg;
+ } else if (HAS_PCH_SPLIT(dev_priv)) {
+ intel_dp->aux_ch_ctl_reg = ilk_aux_ctl_reg;
+ intel_dp->aux_ch_data_reg = ilk_aux_data_reg;
+ } else {
+ intel_dp->aux_ch_ctl_reg = g4x_aux_ctl_reg;
+ intel_dp->aux_ch_data_reg = g4x_aux_data_reg;
+ }
+
+ if (INTEL_GEN(dev_priv) >= 9)
+ intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
+ else if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
+ intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
+ else if (HAS_PCH_SPLIT(dev_priv))
+ intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
+ else
+ intel_dp->get_aux_clock_divider = g4x_get_aux_clock_divider;
+
+ if (INTEL_GEN(dev_priv) >= 9)
+ intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
+ else
+ intel_dp->get_aux_send_ctl = g4x_get_aux_send_ctl;
+
+ drm_dp_aux_init(&intel_dp->aux);
+
+ /* Failure to allocate our preferred name is not critical */
+ if (INTEL_GEN(dev_priv) >= 12 && aux_ch >= AUX_CH_USBC1)
+ intel_dp->aux.name = kasprintf(GFP_KERNEL, "AUX USBC%c/%s",
+ aux_ch - AUX_CH_USBC1 + '1',
+ encoder->base.name);
+ else
+ intel_dp->aux.name = kasprintf(GFP_KERNEL, "AUX %c/%s",
+ aux_ch_name(aux_ch),
+ encoder->base.name);
+
+ intel_dp->aux.transfer = intel_dp_aux_transfer;
+ cpu_latency_qos_add_request(&intel_dp->pm_qos, PM_QOS_DEFAULT_VALUE);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux.h b/drivers/gpu/drm/i915/display/intel_dp_aux.h
new file mode 100644
index 000000000000..4afbe76217b9
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_dp_aux.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020-2021 Intel Corporation
+ */
+
+#ifndef __INTEL_DP_AUX_H__
+#define __INTEL_DP_AUX_H__
+
+#include <linux/types.h>
+
+struct intel_dp;
+
+u32 intel_dp_pack_aux(const u8 *src, int src_bytes);
+
+void intel_dp_aux_fini(struct intel_dp *intel_dp);
+void intel_dp_aux_init(struct intel_dp *intel_dp);
+
+#endif /* __INTEL_DP_AUX_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
index 51d27fc98d48..651884390137 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
@@ -22,10 +22,253 @@
*
*/
+/*
+ * Laptops with Intel GPUs which have panels that support controlling the
+ * backlight through DP AUX can actually use two different interfaces: Intel's
+ * proprietary DP AUX backlight interface, and the standard VESA backlight
+ * interface. Unfortunately, at the time of writing this a lot of laptops will
+ * advertise support for the standard VESA backlight interface when they
+ * don't properly support it. However, on these systems the Intel backlight
+ * interface generally does work properly. Additionally, these systems will
+ * usually just indicate that they use PWM backlight controls in their VBIOS
+ * for some reason.
+ */
+
#include "intel_display_types.h"
#include "intel_dp_aux_backlight.h"
+#include "intel_panel.h"
+
+/* TODO:
+ * Implement HDR, right now we just implement the bare minimum to bring us back into SDR mode so we
+ * can make people's backlights work in the mean time
+ */
+
+/*
+ * DP AUX registers for Intel's proprietary HDR backlight interface. We define
+ * them here since we'll likely be the only driver to ever use these.
+ */
+#define INTEL_EDP_HDR_TCON_CAP0 0x340
+
+#define INTEL_EDP_HDR_TCON_CAP1 0x341
+# define INTEL_EDP_HDR_TCON_2084_DECODE_CAP BIT(0)
+# define INTEL_EDP_HDR_TCON_2020_GAMUT_CAP BIT(1)
+# define INTEL_EDP_HDR_TCON_TONE_MAPPING_CAP BIT(2)
+# define INTEL_EDP_HDR_TCON_SEGMENTED_BACKLIGHT_CAP BIT(3)
+# define INTEL_EDP_HDR_TCON_BRIGHTNESS_NITS_CAP BIT(4)
+# define INTEL_EDP_HDR_TCON_OPTIMIZATION_CAP BIT(5)
+# define INTEL_EDP_HDR_TCON_SDP_COLORIMETRY_CAP BIT(6)
+# define INTEL_EDP_HDR_TCON_SRGB_TO_PANEL_GAMUT_CONVERSION_CAP BIT(7)
+
+#define INTEL_EDP_HDR_TCON_CAP2 0x342
+# define INTEL_EDP_SDR_TCON_BRIGHTNESS_AUX_CAP BIT(0)
+
+#define INTEL_EDP_HDR_TCON_CAP3 0x343
+
+#define INTEL_EDP_HDR_GETSET_CTRL_PARAMS 0x344
+# define INTEL_EDP_HDR_TCON_2084_DECODE_ENABLE BIT(0)
+# define INTEL_EDP_HDR_TCON_2020_GAMUT_ENABLE BIT(1)
+# define INTEL_EDP_HDR_TCON_TONE_MAPPING_ENABLE BIT(2) /* Pre-TGL+ */
+# define INTEL_EDP_HDR_TCON_SEGMENTED_BACKLIGHT_ENABLE BIT(3)
+# define INTEL_EDP_HDR_TCON_BRIGHTNESS_AUX_ENABLE BIT(4)
+# define INTEL_EDP_HDR_TCON_SRGB_TO_PANEL_GAMUT_ENABLE BIT(5)
+/* Bit 6 is reserved */
+# define INTEL_EDP_HDR_TCON_SDP_COLORIMETRY_ENABLE BIT(7)
+
+#define INTEL_EDP_HDR_CONTENT_LUMINANCE 0x346 /* Pre-TGL+ */
+#define INTEL_EDP_HDR_PANEL_LUMINANCE_OVERRIDE 0x34A
+#define INTEL_EDP_SDR_LUMINANCE_LEVEL 0x352
+#define INTEL_EDP_BRIGHTNESS_NITS_LSB 0x354
+#define INTEL_EDP_BRIGHTNESS_NITS_MSB 0x355
+#define INTEL_EDP_BRIGHTNESS_DELAY_FRAMES 0x356
+#define INTEL_EDP_BRIGHTNESS_PER_FRAME_STEPS 0x357
+
+#define INTEL_EDP_BRIGHTNESS_OPTIMIZATION_0 0x358
+# define INTEL_EDP_TCON_USAGE_MASK GENMASK(0, 3)
+# define INTEL_EDP_TCON_USAGE_UNKNOWN 0x0
+# define INTEL_EDP_TCON_USAGE_DESKTOP 0x1
+# define INTEL_EDP_TCON_USAGE_FULL_SCREEN_MEDIA 0x2
+# define INTEL_EDP_TCON_USAGE_FULL_SCREEN_GAMING 0x3
+# define INTEL_EDP_TCON_POWER_MASK BIT(4)
+# define INTEL_EDP_TCON_POWER_DC (0 << 4)
+# define INTEL_EDP_TCON_POWER_AC (1 << 4)
+# define INTEL_EDP_TCON_OPTIMIZATION_STRENGTH_MASK GENMASK(5, 7)
+
+#define INTEL_EDP_BRIGHTNESS_OPTIMIZATION_1 0x359
+
+/* Intel EDP backlight callbacks */
+static bool
+intel_dp_aux_supports_hdr_backlight(struct intel_connector *connector)
+{
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
+ struct drm_dp_aux *aux = &intel_dp->aux;
+ struct intel_panel *panel = &connector->panel;
+ int ret;
+ u8 tcon_cap[4];
+
+ ret = drm_dp_dpcd_read(aux, INTEL_EDP_HDR_TCON_CAP0, tcon_cap, sizeof(tcon_cap));
+ if (ret < 0)
+ return false;
+
+ if (!(tcon_cap[1] & INTEL_EDP_HDR_TCON_BRIGHTNESS_NITS_CAP))
+ return false;
+
+ if (tcon_cap[0] >= 1) {
+ drm_dbg_kms(&i915->drm, "Detected Intel HDR backlight interface version %d\n",
+ tcon_cap[0]);
+ } else {
+ drm_dbg_kms(&i915->drm, "Detected unsupported HDR backlight interface version %d\n",
+ tcon_cap[0]);
+ return false;
+ }
+
+ panel->backlight.edp.intel.sdr_uses_aux =
+ tcon_cap[2] & INTEL_EDP_SDR_TCON_BRIGHTNESS_AUX_CAP;
+
+ return true;
+}
+
+static u32
+intel_dp_aux_hdr_get_backlight(struct intel_connector *connector, enum pipe pipe)
+{
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_panel *panel = &connector->panel;
+ struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
+ u8 tmp;
+ u8 buf[2] = { 0 };
+
+ if (drm_dp_dpcd_readb(&intel_dp->aux, INTEL_EDP_HDR_GETSET_CTRL_PARAMS, &tmp) < 0) {
+ drm_err(&i915->drm, "Failed to read current backlight mode from DPCD\n");
+ return 0;
+ }
+
+ if (!(tmp & INTEL_EDP_HDR_TCON_BRIGHTNESS_AUX_ENABLE)) {
+ if (!panel->backlight.edp.intel.sdr_uses_aux) {
+ u32 pwm_level = panel->backlight.pwm_funcs->get(connector, pipe);
+
+ return intel_panel_backlight_level_from_pwm(connector, pwm_level);
+ }
+
+ /* Assume 100% brightness if backlight controls aren't enabled yet */
+ return panel->backlight.max;
+ }
+
+ if (drm_dp_dpcd_read(&intel_dp->aux, INTEL_EDP_BRIGHTNESS_NITS_LSB, buf, sizeof(buf)) < 0) {
+ drm_err(&i915->drm, "Failed to read brightness from DPCD\n");
+ return 0;
+ }
+
+ return (buf[1] << 8 | buf[0]);
+}
+
+static void
+intel_dp_aux_hdr_set_aux_backlight(const struct drm_connector_state *conn_state, u32 level)
+{
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ struct drm_device *dev = connector->base.dev;
+ struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
+ u8 buf[4] = { 0 };
+
+ buf[0] = level & 0xFF;
+ buf[1] = (level & 0xFF00) >> 8;
+
+ if (drm_dp_dpcd_write(&intel_dp->aux, INTEL_EDP_BRIGHTNESS_NITS_LSB, buf, 4) < 0)
+ drm_err(dev, "Failed to write brightness level to DPCD\n");
+}
+
+static void
+intel_dp_aux_hdr_set_backlight(const struct drm_connector_state *conn_state, u32 level)
+{
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ struct intel_panel *panel = &connector->panel;
+
+ if (panel->backlight.edp.intel.sdr_uses_aux) {
+ intel_dp_aux_hdr_set_aux_backlight(conn_state, level);
+ } else {
+ const u32 pwm_level = intel_panel_backlight_level_to_pwm(connector, level);
+
+ intel_panel_set_pwm_level(conn_state, pwm_level);
+ }
+}
+
+static void
+intel_dp_aux_hdr_enable_backlight(const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state, u32 level)
+{
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ struct intel_panel *panel = &connector->panel;
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
+ int ret;
+ u8 old_ctrl, ctrl;
+
+ ret = drm_dp_dpcd_readb(&intel_dp->aux, INTEL_EDP_HDR_GETSET_CTRL_PARAMS, &old_ctrl);
+ if (ret < 0) {
+ drm_err(&i915->drm, "Failed to read current backlight control mode: %d\n", ret);
+ return;
+ }
+
+ ctrl = old_ctrl;
+ if (panel->backlight.edp.intel.sdr_uses_aux) {
+ ctrl |= INTEL_EDP_HDR_TCON_BRIGHTNESS_AUX_ENABLE;
+ intel_dp_aux_hdr_set_aux_backlight(conn_state, level);
+ } else {
+ u32 pwm_level = intel_panel_backlight_level_to_pwm(connector, level);
+
+ panel->backlight.pwm_funcs->enable(crtc_state, conn_state, pwm_level);
+
+ ctrl &= ~INTEL_EDP_HDR_TCON_BRIGHTNESS_AUX_ENABLE;
+ }
+
+ if (ctrl != old_ctrl)
+ if (drm_dp_dpcd_writeb(&intel_dp->aux, INTEL_EDP_HDR_GETSET_CTRL_PARAMS, ctrl) < 0)
+ drm_err(&i915->drm, "Failed to configure DPCD brightness controls\n");
+}
+
+static void
+intel_dp_aux_hdr_disable_backlight(const struct drm_connector_state *conn_state, u32 level)
+{
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ struct intel_panel *panel = &connector->panel;
+
+ /* Nothing to do for AUX based backlight controls */
+ if (panel->backlight.edp.intel.sdr_uses_aux)
+ return;
+
+ /* Note we want the actual pwm_level to be 0, regardless of pwm_min */
+ panel->backlight.pwm_funcs->disable(conn_state, intel_panel_invert_pwm_level(connector, 0));
+}
+
+static int
+intel_dp_aux_hdr_setup_backlight(struct intel_connector *connector, enum pipe pipe)
+{
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_panel *panel = &connector->panel;
+ int ret;
+
+ if (panel->backlight.edp.intel.sdr_uses_aux) {
+ drm_dbg_kms(&i915->drm, "SDR backlight is controlled through DPCD\n");
+ } else {
+ drm_dbg_kms(&i915->drm, "SDR backlight is controlled through PWM\n");
+
+ ret = panel->backlight.pwm_funcs->setup(connector, pipe);
+ if (ret < 0) {
+ drm_err(&i915->drm,
+ "Failed to setup SDR backlight controls through PWM: %d\n", ret);
+ return ret;
+ }
+ }
+
+ panel->backlight.max = 512;
+ panel->backlight.min = 0;
+ panel->backlight.level = intel_dp_aux_hdr_get_backlight(connector, pipe);
+ panel->backlight.enabled = panel->backlight.level != 0;
-static void set_aux_backlight_enable(struct intel_dp *intel_dp, bool enable)
+ return 0;
+}
+
+/* VESA backlight callbacks */
+static void set_vesa_backlight_enable(struct intel_dp *intel_dp, bool enable)
{
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
u8 reg_val = 0;
@@ -52,7 +295,7 @@ static void set_aux_backlight_enable(struct intel_dp *intel_dp, bool enable)
}
}
-static bool intel_dp_aux_backlight_dpcd_mode(struct intel_connector *connector)
+static bool intel_dp_aux_vesa_backlight_dpcd_mode(struct intel_connector *connector)
{
struct intel_dp *intel_dp = intel_attached_dp(connector);
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
@@ -75,7 +318,7 @@ static bool intel_dp_aux_backlight_dpcd_mode(struct intel_connector *connector)
* Read the current backlight value from DPCD register(s) based
* on if 8-bit(MSB) or 16-bit(MSB and LSB) values are supported
*/
-static u32 intel_dp_aux_get_backlight(struct intel_connector *connector)
+static u32 intel_dp_aux_vesa_get_backlight(struct intel_connector *connector, enum pipe unused)
{
struct intel_dp *intel_dp = intel_attached_dp(connector);
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
@@ -86,7 +329,7 @@ static u32 intel_dp_aux_get_backlight(struct intel_connector *connector)
* If we're not in DPCD control mode yet, the programmed brightness
* value is meaningless and we should assume max brightness
*/
- if (!intel_dp_aux_backlight_dpcd_mode(connector))
+ if (!intel_dp_aux_vesa_backlight_dpcd_mode(connector))
return connector->panel.backlight.max;
if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_BACKLIGHT_BRIGHTNESS_MSB,
@@ -107,7 +350,8 @@ static u32 intel_dp_aux_get_backlight(struct intel_connector *connector)
* 8-bit or 16 bit value (MSB and LSB)
*/
static void
-intel_dp_aux_set_backlight(const struct drm_connector_state *conn_state, u32 level)
+intel_dp_aux_vesa_set_backlight(const struct drm_connector_state *conn_state,
+ u32 level)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
struct intel_dp *intel_dp = intel_attached_dp(connector);
@@ -137,11 +381,11 @@ intel_dp_aux_set_backlight(const struct drm_connector_state *conn_state, u32 lev
* - Where P = 2^Pn, where Pn is the value programmed by field 4:0 of the
* EDP_PWMGEN_BIT_COUNT register (DPCD Address 00724h)
*/
-static bool intel_dp_aux_set_pwm_freq(struct intel_connector *connector)
+static bool intel_dp_aux_vesa_set_pwm_freq(struct intel_connector *connector)
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_dp *intel_dp = intel_attached_dp(connector);
- const u8 pn = connector->panel.backlight.pwmgen_bit_count;
+ const u8 pn = connector->panel.backlight.edp.vesa.pwmgen_bit_count;
int freq, fxp, f, fxp_actual, fxp_min, fxp_max;
freq = dev_priv->vbt.backlight.pwm_freq_hz;
@@ -173,14 +417,16 @@ static bool intel_dp_aux_set_pwm_freq(struct intel_connector *connector)
return true;
}
-static void intel_dp_aux_enable_backlight(const struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state)
+static void
+intel_dp_aux_vesa_enable_backlight(const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state, u32 level)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
struct intel_dp *intel_dp = intel_attached_dp(connector);
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
struct intel_panel *panel = &connector->panel;
u8 dpcd_buf, new_dpcd_buf, edp_backlight_mode;
+ u8 pwmgen_bit_count = panel->backlight.edp.vesa.pwmgen_bit_count;
if (drm_dp_dpcd_readb(&intel_dp->aux,
DP_EDP_BACKLIGHT_MODE_SET_REGISTER, &dpcd_buf) != 1) {
@@ -201,7 +447,7 @@ static void intel_dp_aux_enable_backlight(const struct intel_crtc_state *crtc_st
if (drm_dp_dpcd_writeb(&intel_dp->aux,
DP_EDP_PWMGEN_BIT_COUNT,
- panel->backlight.pwmgen_bit_count) < 0)
+ pwmgen_bit_count) < 0)
drm_dbg_kms(&i915->drm,
"Failed to write aux pwmgen bit count\n");
@@ -214,7 +460,7 @@ static void intel_dp_aux_enable_backlight(const struct intel_crtc_state *crtc_st
}
if (intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_FREQ_AUX_SET_CAP)
- if (intel_dp_aux_set_pwm_freq(connector))
+ if (intel_dp_aux_vesa_set_pwm_freq(connector))
new_dpcd_buf |= DP_EDP_BACKLIGHT_FREQ_AUX_SET_ENABLE;
if (new_dpcd_buf != dpcd_buf) {
@@ -225,18 +471,18 @@ static void intel_dp_aux_enable_backlight(const struct intel_crtc_state *crtc_st
}
}
- intel_dp_aux_set_backlight(conn_state,
- connector->panel.backlight.level);
- set_aux_backlight_enable(intel_dp, true);
+ intel_dp_aux_vesa_set_backlight(conn_state, level);
+ set_vesa_backlight_enable(intel_dp, true);
}
-static void intel_dp_aux_disable_backlight(const struct drm_connector_state *old_conn_state)
+static void intel_dp_aux_vesa_disable_backlight(const struct drm_connector_state *old_conn_state,
+ u32 level)
{
- set_aux_backlight_enable(enc_to_intel_dp(to_intel_encoder(old_conn_state->best_encoder)),
- false);
+ set_vesa_backlight_enable(enc_to_intel_dp(to_intel_encoder(old_conn_state->best_encoder)),
+ false);
}
-static u32 intel_dp_aux_calc_max_backlight(struct intel_connector *connector)
+static u32 intel_dp_aux_vesa_calc_max_backlight(struct intel_connector *connector)
{
struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_dp *intel_dp = intel_attached_dp(connector);
@@ -309,40 +555,45 @@ static u32 intel_dp_aux_calc_max_backlight(struct intel_connector *connector)
"Failed to write aux pwmgen bit count\n");
return max_backlight;
}
- panel->backlight.pwmgen_bit_count = pn;
+ panel->backlight.edp.vesa.pwmgen_bit_count = pn;
max_backlight = (1 << pn) - 1;
return max_backlight;
}
-static int intel_dp_aux_setup_backlight(struct intel_connector *connector,
- enum pipe pipe)
+static int intel_dp_aux_vesa_setup_backlight(struct intel_connector *connector,
+ enum pipe pipe)
{
struct intel_panel *panel = &connector->panel;
- panel->backlight.max = intel_dp_aux_calc_max_backlight(connector);
+ panel->backlight.max = intel_dp_aux_vesa_calc_max_backlight(connector);
if (!panel->backlight.max)
return -ENODEV;
panel->backlight.min = 0;
- panel->backlight.level = intel_dp_aux_get_backlight(connector);
- panel->backlight.enabled = intel_dp_aux_backlight_dpcd_mode(connector) &&
+ panel->backlight.level = intel_dp_aux_vesa_get_backlight(connector, pipe);
+ panel->backlight.enabled = intel_dp_aux_vesa_backlight_dpcd_mode(connector) &&
panel->backlight.level != 0;
return 0;
}
static bool
-intel_dp_aux_display_control_capable(struct intel_connector *connector)
+intel_dp_aux_supports_vesa_backlight(struct intel_connector *connector)
{
struct intel_dp *intel_dp = intel_attached_dp(connector);
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
/* Check the eDP Display control capabilities registers to determine if
- * the panel can support backlight control over the aux channel
+ * the panel can support backlight control over the aux channel.
+ *
+ * TODO: We currently only support AUX only backlight configurations, not backlights which
+ * require a mix of PWM and AUX controls to work. In the mean time, these machines typically
+ * work just fine using normal PWM controls anyway.
*/
if (intel_dp->edp_dpcd[1] & DP_EDP_TCON_BACKLIGHT_ADJUSTMENT_CAP &&
+ (intel_dp->edp_dpcd[1] & DP_EDP_BACKLIGHT_AUX_ENABLE_CAP) &&
(intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_AUX_SET_CAP)) {
drm_dbg_kms(&i915->drm, "AUX Backlight Control Supported!\n");
return true;
@@ -350,40 +601,89 @@ intel_dp_aux_display_control_capable(struct intel_connector *connector)
return false;
}
-int intel_dp_aux_init_backlight_funcs(struct intel_connector *intel_connector)
+static const struct intel_panel_bl_funcs intel_dp_hdr_bl_funcs = {
+ .setup = intel_dp_aux_hdr_setup_backlight,
+ .enable = intel_dp_aux_hdr_enable_backlight,
+ .disable = intel_dp_aux_hdr_disable_backlight,
+ .set = intel_dp_aux_hdr_set_backlight,
+ .get = intel_dp_aux_hdr_get_backlight,
+};
+
+static const struct intel_panel_bl_funcs intel_dp_vesa_bl_funcs = {
+ .setup = intel_dp_aux_vesa_setup_backlight,
+ .enable = intel_dp_aux_vesa_enable_backlight,
+ .disable = intel_dp_aux_vesa_disable_backlight,
+ .set = intel_dp_aux_vesa_set_backlight,
+ .get = intel_dp_aux_vesa_get_backlight,
+};
+
+enum intel_dp_aux_backlight_modparam {
+ INTEL_DP_AUX_BACKLIGHT_AUTO = -1,
+ INTEL_DP_AUX_BACKLIGHT_OFF = 0,
+ INTEL_DP_AUX_BACKLIGHT_ON = 1,
+ INTEL_DP_AUX_BACKLIGHT_FORCE_VESA = 2,
+ INTEL_DP_AUX_BACKLIGHT_FORCE_INTEL = 3,
+};
+
+int intel_dp_aux_init_backlight_funcs(struct intel_connector *connector)
{
- struct intel_panel *panel = &intel_connector->panel;
- struct intel_dp *intel_dp = enc_to_intel_dp(intel_connector->encoder);
+ struct drm_device *dev = connector->base.dev;
+ struct intel_panel *panel = &connector->panel;
+ struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ bool try_intel_interface = false, try_vesa_interface = false;
- if (i915->params.enable_dpcd_backlight == 0 ||
- !intel_dp_aux_display_control_capable(intel_connector))
+ /* Check the VBT and user's module parameters to figure out which
+ * interfaces to probe
+ */
+ switch (i915->params.enable_dpcd_backlight) {
+ case INTEL_DP_AUX_BACKLIGHT_OFF:
return -ENODEV;
+ case INTEL_DP_AUX_BACKLIGHT_AUTO:
+ switch (i915->vbt.backlight.type) {
+ case INTEL_BACKLIGHT_VESA_EDP_AUX_INTERFACE:
+ try_vesa_interface = true;
+ break;
+ case INTEL_BACKLIGHT_DISPLAY_DDI:
+ try_intel_interface = true;
+ try_vesa_interface = true;
+ break;
+ default:
+ return -ENODEV;
+ }
+ break;
+ case INTEL_DP_AUX_BACKLIGHT_ON:
+ if (i915->vbt.backlight.type != INTEL_BACKLIGHT_VESA_EDP_AUX_INTERFACE)
+ try_intel_interface = true;
+
+ try_vesa_interface = true;
+ break;
+ case INTEL_DP_AUX_BACKLIGHT_FORCE_VESA:
+ try_vesa_interface = true;
+ break;
+ case INTEL_DP_AUX_BACKLIGHT_FORCE_INTEL:
+ try_intel_interface = true;
+ break;
+ }
/*
- * There are a lot of machines that don't advertise the backlight
- * control interface to use properly in their VBIOS, :\
+ * A lot of eDP panels in the wild will report supporting both the
+ * Intel proprietary backlight control interface, and the VESA
+ * backlight control interface. Many of these panels are liars though,
+ * and will only work with the Intel interface. So, always probe for
+ * that first.
*/
- if (i915->vbt.backlight.type !=
- INTEL_BACKLIGHT_VESA_EDP_AUX_INTERFACE &&
- i915->params.enable_dpcd_backlight != 1 &&
- !drm_dp_has_quirk(&intel_dp->desc, intel_dp->edid_quirks,
- DP_QUIRK_FORCE_DPCD_BACKLIGHT)) {
- drm_info(&i915->drm,
- "Panel advertises DPCD backlight support, but "
- "VBT disagrees. If your backlight controls "
- "don't work try booting with "
- "i915.enable_dpcd_backlight=1. If your machine "
- "needs this, please file a _new_ bug report on "
- "drm/i915, see " FDO_BUG_URL " for details.\n");
- return -ENODEV;
+ if (try_intel_interface && intel_dp_aux_supports_hdr_backlight(connector)) {
+ drm_dbg_kms(dev, "Using Intel proprietary eDP backlight controls\n");
+ panel->backlight.funcs = &intel_dp_hdr_bl_funcs;
+ return 0;
}
- panel->backlight.setup = intel_dp_aux_setup_backlight;
- panel->backlight.enable = intel_dp_aux_enable_backlight;
- panel->backlight.disable = intel_dp_aux_disable_backlight;
- panel->backlight.set = intel_dp_aux_set_backlight;
- panel->backlight.get = intel_dp_aux_get_backlight;
+ if (try_vesa_interface && intel_dp_aux_supports_vesa_backlight(connector)) {
+ drm_dbg_kms(dev, "Using VESA eDP backlight controls\n");
+ panel->backlight.funcs = &intel_dp_vesa_bl_funcs;
+ return 0;
+ }
- return 0;
+ return -ENODEV;
}
diff --git a/drivers/gpu/drm/i915/display/intel_dp_hdcp.c b/drivers/gpu/drm/i915/display/intel_dp_hdcp.c
index 03424d20e9f7..4dba5bb15af5 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_hdcp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_hdcp.c
@@ -16,6 +16,30 @@
#include "intel_dp.h"
#include "intel_hdcp.h"
+static unsigned int transcoder_to_stream_enc_status(enum transcoder cpu_transcoder)
+{
+ u32 stream_enc_mask;
+
+ switch (cpu_transcoder) {
+ case TRANSCODER_A:
+ stream_enc_mask = HDCP_STATUS_STREAM_A_ENC;
+ break;
+ case TRANSCODER_B:
+ stream_enc_mask = HDCP_STATUS_STREAM_B_ENC;
+ break;
+ case TRANSCODER_C:
+ stream_enc_mask = HDCP_STATUS_STREAM_C_ENC;
+ break;
+ case TRANSCODER_D:
+ stream_enc_mask = HDCP_STATUS_STREAM_D_ENC;
+ break;
+ default:
+ stream_enc_mask = 0;
+ }
+
+ return stream_enc_mask;
+}
+
static void intel_dp_hdcp_wait_for_cp_irq(struct intel_hdcp *hdcp, int timeout)
{
long ret;
@@ -561,7 +585,8 @@ int intel_dp_hdcp2_config_stream_type(struct intel_digital_port *dig_port,
}
static
-int intel_dp_hdcp2_check_link(struct intel_digital_port *dig_port)
+int intel_dp_hdcp2_check_link(struct intel_digital_port *dig_port,
+ struct intel_connector *connector)
{
u8 rx_status;
int ret;
@@ -622,48 +647,143 @@ static const struct intel_hdcp_shim intel_dp_hdcp_shim = {
};
static int
-intel_dp_mst_hdcp_toggle_signalling(struct intel_digital_port *dig_port,
- enum transcoder cpu_transcoder,
- bool enable)
+intel_dp_mst_toggle_hdcp_stream_select(struct intel_connector *connector,
+ bool enable)
{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_hdcp *hdcp = &connector->hdcp;
int ret;
- if (!enable)
- usleep_range(6, 60); /* Bspec says >= 6us */
-
- ret = intel_ddi_toggle_hdcp_signalling(&dig_port->base,
- cpu_transcoder, enable);
+ ret = intel_ddi_toggle_hdcp_bits(&dig_port->base,
+ hdcp->stream_transcoder, enable,
+ TRANS_DDI_HDCP_SELECT);
if (ret)
- drm_dbg_kms(&i915->drm, "%s HDCP signalling failed (%d)\n",
- enable ? "Enable" : "Disable", ret);
+ drm_err(&i915->drm, "%s HDCP stream select failed (%d)\n",
+ enable ? "Enable" : "Disable", ret);
return ret;
}
-static
-bool intel_dp_mst_hdcp_check_link(struct intel_digital_port *dig_port,
- struct intel_connector *connector)
+static int
+intel_dp_mst_hdcp_stream_encryption(struct intel_connector *connector,
+ bool enable)
+{
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_hdcp *hdcp = &connector->hdcp;
+ enum port port = dig_port->base.port;
+ enum transcoder cpu_transcoder = hdcp->stream_transcoder;
+ u32 stream_enc_status;
+ int ret;
+
+ ret = intel_dp_mst_toggle_hdcp_stream_select(connector, enable);
+ if (ret)
+ return ret;
+
+ stream_enc_status = transcoder_to_stream_enc_status(cpu_transcoder);
+ if (!stream_enc_status)
+ return -EINVAL;
+
+ /* Wait for encryption confirmation */
+ if (intel_de_wait_for_register(i915,
+ HDCP_STATUS(i915, cpu_transcoder, port),
+ stream_enc_status,
+ enable ? stream_enc_status : 0,
+ HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
+ drm_err(&i915->drm, "Timed out waiting for transcoder: %s stream encryption %s\n",
+ transcoder_name(cpu_transcoder), enable ? "enabled" : "disabled");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static bool intel_dp_mst_get_qses_status(struct intel_digital_port *dig_port,
+ struct intel_connector *connector)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
- struct intel_dp *intel_dp = &dig_port->dp;
struct drm_dp_query_stream_enc_status_ack_reply reply;
+ struct intel_dp *intel_dp = &dig_port->dp;
int ret;
- if (!intel_dp_hdcp_check_link(dig_port, connector))
- return false;
-
ret = drm_dp_send_query_stream_enc_status(&intel_dp->mst_mgr,
connector->port, &reply);
if (ret) {
drm_dbg_kms(&i915->drm,
- "[CONNECTOR:%d:%s] failed QSES ret=%d\n",
- connector->base.base.id, connector->base.name, ret);
+ "[%s:%d] failed QSES ret=%d\n",
+ connector->base.name, connector->base.base.id, ret);
return false;
}
+ drm_dbg_kms(&i915->drm, "[%s:%d] QSES stream auth: %d stream enc: %d\n",
+ connector->base.name, connector->base.base.id,
+ reply.auth_completed, reply.encryption_enabled);
+
return reply.auth_completed && reply.encryption_enabled;
}
+static int
+intel_dp_mst_hdcp2_stream_encryption(struct intel_connector *connector,
+ bool enable)
+{
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct hdcp_port_data *data = &dig_port->hdcp_port_data;
+ struct intel_hdcp *hdcp = &connector->hdcp;
+ enum transcoder cpu_transcoder = hdcp->stream_transcoder;
+ enum pipe pipe = (enum pipe)cpu_transcoder;
+ enum port port = dig_port->base.port;
+ int ret;
+
+ drm_WARN_ON(&i915->drm, enable &&
+ !!(intel_de_read(i915, HDCP2_AUTH_STREAM(i915, cpu_transcoder, port))
+ & AUTH_STREAM_TYPE) != data->streams[0].stream_type);
+
+ ret = intel_dp_mst_toggle_hdcp_stream_select(connector, enable);
+ if (ret)
+ return ret;
+
+ /* Wait for encryption confirmation */
+ if (intel_de_wait_for_register(i915,
+ HDCP2_STREAM_STATUS(i915, cpu_transcoder, pipe),
+ STREAM_ENCRYPTION_STATUS,
+ enable ? STREAM_ENCRYPTION_STATUS : 0,
+ HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
+ drm_err(&i915->drm, "Timed out waiting for transcoder: %s stream encryption %s\n",
+ transcoder_name(cpu_transcoder), enable ? "enabled" : "disabled");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+/*
+ * DP v2.0 I.3.3 ignore the stream signature L' in QSES reply msg reply.
+ * I.3.5 MST source device may use a QSES msg to query downstream status
+ * for a particular stream.
+ */
+static
+int intel_dp_mst_hdcp2_check_link(struct intel_digital_port *dig_port,
+ struct intel_connector *connector)
+{
+ struct intel_hdcp *hdcp = &connector->hdcp;
+ int ret;
+
+ /*
+ * We do need to do the Link Check only for the connector involved with
+ * HDCP port authentication and encryption.
+ * We can re-use the hdcp->is_repeater flag to know that the connector
+ * involved with HDCP port authentication and encryption.
+ */
+ if (hdcp->is_repeater) {
+ ret = intel_dp_hdcp2_check_link(dig_port, connector);
+ if (ret)
+ return ret;
+ }
+
+ return intel_dp_mst_get_qses_status(dig_port, connector) ? 0 : -EINVAL;
+}
+
static const struct intel_hdcp_shim intel_dp_mst_hdcp_shim = {
.write_an_aksv = intel_dp_hdcp_write_an_aksv,
.read_bksv = intel_dp_hdcp_read_bksv,
@@ -673,10 +793,16 @@ static const struct intel_hdcp_shim intel_dp_mst_hdcp_shim = {
.read_ksv_ready = intel_dp_hdcp_read_ksv_ready,
.read_ksv_fifo = intel_dp_hdcp_read_ksv_fifo,
.read_v_prime_part = intel_dp_hdcp_read_v_prime_part,
- .toggle_signalling = intel_dp_mst_hdcp_toggle_signalling,
- .check_link = intel_dp_mst_hdcp_check_link,
+ .toggle_signalling = intel_dp_hdcp_toggle_signalling,
+ .stream_encryption = intel_dp_mst_hdcp_stream_encryption,
+ .check_link = intel_dp_hdcp_check_link,
.hdcp_capable = intel_dp_hdcp_capable,
-
+ .write_2_2_msg = intel_dp_hdcp2_write_msg,
+ .read_2_2_msg = intel_dp_hdcp2_read_msg,
+ .config_stream_type = intel_dp_hdcp2_config_stream_type,
+ .stream_2_2_encryption = intel_dp_mst_hdcp2_stream_encryption,
+ .check_2_2_link = intel_dp_mst_hdcp2_check_link,
+ .hdcp_2_2_capable = intel_dp_hdcp2_capable,
.protocol = HDCP_PROTOCOL_DP,
};
@@ -693,10 +819,10 @@ int intel_dp_init_hdcp(struct intel_digital_port *dig_port,
return 0;
if (intel_connector->mst_port)
- return intel_hdcp_init(intel_connector, port,
+ return intel_hdcp_init(intel_connector, dig_port,
&intel_dp_mst_hdcp_shim);
else if (!intel_dp_is_edp(intel_dp))
- return intel_hdcp_init(intel_connector, port,
+ return intel_hdcp_init(intel_connector, dig_port,
&intel_dp_hdcp_shim);
return 0;
diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
index 91d3979902d0..892d7db7d94f 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
@@ -34,18 +34,6 @@ intel_dp_dump_link_status(const u8 link_status[DP_LINK_STATUS_SIZE])
link_status[3], link_status[4], link_status[5]);
}
-static int intel_dp_lttpr_count(struct intel_dp *intel_dp)
-{
- int count = drm_dp_lttpr_count(intel_dp->lttpr_common_caps);
-
- /*
- * Pretend no LTTPRs in case of LTTPR detection error, or
- * if too many (>8) LTTPRs are detected. This translates to link
- * training in transparent mode.
- */
- return count <= 0 ? 0 : count;
-}
-
static void intel_dp_reset_lttpr_count(struct intel_dp *intel_dp)
{
intel_dp->lttpr_common_caps[DP_PHY_REPEATER_CNT -
@@ -142,6 +130,17 @@ int intel_dp_lttpr_init(struct intel_dp *intel_dp)
return 0;
ret = intel_dp_read_lttpr_common_caps(intel_dp);
+ if (!ret)
+ return 0;
+
+ lttpr_count = drm_dp_lttpr_count(intel_dp->lttpr_common_caps);
+ /*
+ * Prevent setting LTTPR transparent mode explicitly if no LTTPRs are
+ * detected as this breaks link training at least on the Dell WD19TB
+ * dock.
+ */
+ if (lttpr_count == 0)
+ return 0;
/*
* See DP Standard v2.0 3.6.6.1. about the explicit disabling of
@@ -150,17 +149,12 @@ int intel_dp_lttpr_init(struct intel_dp *intel_dp)
*/
intel_dp_set_lttpr_transparent_mode(intel_dp, true);
- if (!ret)
- return 0;
-
- lttpr_count = intel_dp_lttpr_count(intel_dp);
-
/*
* In case of unsupported number of LTTPRs or failing to switch to
* non-transparent mode fall-back to transparent link training mode,
* still taking into account any LTTPR common lane- rate/count limits.
*/
- if (lttpr_count == 0)
+ if (lttpr_count < 0)
return 0;
if (!intel_dp_set_lttpr_transparent_mode(intel_dp, false)) {
@@ -222,11 +216,11 @@ intel_dp_phy_is_downstream_of_source(struct intel_dp *intel_dp,
enum drm_dp_phy dp_phy)
{
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
- int lttpr_count = intel_dp_lttpr_count(intel_dp);
+ int lttpr_count = drm_dp_lttpr_count(intel_dp->lttpr_common_caps);
- drm_WARN_ON_ONCE(&i915->drm, lttpr_count == 0 && dp_phy != DP_PHY_DPRX);
+ drm_WARN_ON_ONCE(&i915->drm, lttpr_count <= 0 && dp_phy != DP_PHY_DPRX);
- return lttpr_count == 0 || dp_phy == DP_PHY_LTTPR(lttpr_count - 1);
+ return lttpr_count <= 0 || dp_phy == DP_PHY_LTTPR(lttpr_count - 1);
}
static u8 intel_dp_phy_voltage_max(struct intel_dp *intel_dp,
@@ -334,6 +328,27 @@ intel_dp_set_link_train(struct intel_dp *intel_dp,
return drm_dp_dpcd_write(&intel_dp->aux, reg, buf, len) == len;
}
+void intel_dp_set_signal_levels(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state,
+ enum drm_dp_phy dp_phy)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ u8 train_set = intel_dp->train_set[0];
+ char phy_name[10];
+
+ drm_dbg_kms(&dev_priv->drm, "Using vswing level %d%s, pre-emphasis level %d%s, at %s\n",
+ train_set & DP_TRAIN_VOLTAGE_SWING_MASK,
+ train_set & DP_TRAIN_MAX_SWING_REACHED ? " (max)" : "",
+ (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
+ DP_TRAIN_PRE_EMPHASIS_SHIFT,
+ train_set & DP_TRAIN_MAX_PRE_EMPHASIS_REACHED ?
+ " (max)" : "",
+ intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name)));
+
+ if (intel_dp_phy_is_downstream_of_source(intel_dp, dp_phy))
+ intel_dp->set_signal_levels(intel_dp, crtc_state);
+}
+
static bool
intel_dp_reset_link_train(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
@@ -341,7 +356,7 @@ intel_dp_reset_link_train(struct intel_dp *intel_dp,
u8 dp_train_pat)
{
memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
- intel_dp_set_signal_levels(intel_dp, crtc_state);
+ intel_dp_set_signal_levels(intel_dp, crtc_state, dp_phy);
return intel_dp_set_link_train(intel_dp, crtc_state, dp_phy, dp_train_pat);
}
@@ -355,7 +370,7 @@ intel_dp_update_link_train(struct intel_dp *intel_dp,
DP_TRAINING_LANE0_SET_PHY_REPEATER(dp_phy);
int ret;
- intel_dp_set_signal_levels(intel_dp, crtc_state);
+ intel_dp_set_signal_levels(intel_dp, crtc_state, dp_phy);
ret = drm_dp_dpcd_write(&intel_dp->aux, reg,
intel_dp->train_set, crtc_state->lane_count);
@@ -413,7 +428,7 @@ intel_dp_prepare_link_train(struct intel_dp *intel_dp,
drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_RATE_SET,
&rate_select, 1);
- link_config[0] = 0;
+ link_config[0] = crtc_state->vrr.enable ? DP_MSA_TIMING_PAR_IGNORE_EN : 0;
link_config[1] = DP_SET_ANSI_8B10B;
drm_dp_dpcd_write(&intel_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2);
@@ -676,9 +691,9 @@ static bool intel_dp_disable_dpcd_training_pattern(struct intel_dp *intel_dp,
* @intel_dp: DP struct
* @crtc_state: state for CRTC attached to the encoder
*
- * Stop the link training of the @intel_dp port, disabling the test pattern
- * symbol generation on the port and disabling the training pattern in
- * the sink's DPCD.
+ * Stop the link training of the @intel_dp port, disabling the training
+ * pattern in the sink's DPCD, and disabling the test pattern symbol
+ * generation on the port.
*
* What symbols are output on the port after this point is
* platform specific: On DDI/VLV/CHV platforms it will be the idle pattern
@@ -692,10 +707,9 @@ void intel_dp_stop_link_train(struct intel_dp *intel_dp,
{
intel_dp->link_trained = true;
- intel_dp_program_link_training_pattern(intel_dp,
- crtc_state,
- DP_TRAINING_PATTERN_DISABLE);
intel_dp_disable_dpcd_training_pattern(intel_dp, DP_PHY_DPRX);
+ intel_dp_program_link_training_pattern(intel_dp, crtc_state,
+ DP_TRAINING_PATTERN_DISABLE);
}
static bool
diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.h b/drivers/gpu/drm/i915/display/intel_dp_link_training.h
index 86905aa24db7..6a1f76bd8c75 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_link_training.h
+++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.h
@@ -17,6 +17,9 @@ void intel_dp_get_adjust_train(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
enum drm_dp_phy dp_phy,
const u8 link_status[DP_LINK_STATUS_SIZE]);
+void intel_dp_set_signal_levels(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state,
+ enum drm_dp_phy dp_phy);
void intel_dp_start_link_train(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state);
void intel_dp_stop_link_train(struct intel_dp *intel_dp,
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c
index 27f04aed8764..b4621ed0127e 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c
@@ -53,8 +53,7 @@ static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder,
struct drm_i915_private *i915 = to_i915(connector->base.dev);
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
- bool constant_n = drm_dp_has_quirk(&intel_dp->desc, 0,
- DP_DPCD_QUIRK_CONSTANT_N);
+ bool constant_n = drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_CONSTANT_N);
int bpp, slots = -EINVAL;
crtc_state->lane_count = limits->max_lane_count;
@@ -69,7 +68,9 @@ static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder,
slots = drm_dp_atomic_find_vcpi_slots(state, &intel_dp->mst_mgr,
connector->port,
- crtc_state->pbn, 0);
+ crtc_state->pbn,
+ drm_dp_get_vc_payload_bw(crtc_state->port_clock,
+ crtc_state->lane_count));
if (slots == -EDEADLK)
return slots;
if (slots >= 0)
@@ -569,7 +570,7 @@ static void intel_mst_enable_dp(struct intel_atomic_state *state,
if (conn_state->content_protection ==
DRM_MODE_CONTENT_PROTECTION_DESIRED)
intel_hdcp_enable(to_intel_connector(conn_state->connector),
- pipe_config->cpu_transcoder,
+ pipe_config,
(u8)conn_state->hdcp_content_type);
}
@@ -829,12 +830,11 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo
intel_attach_force_audio_property(connector);
intel_attach_broadcast_rgb_property(connector);
-
- /* TODO: Figure out how to make HDCP work on GEN12+ */
- if (INTEL_GEN(dev_priv) < 12) {
+ if (INTEL_GEN(dev_priv) <= 12) {
ret = intel_dp_init_hdcp(dig_port, intel_connector);
if (ret)
- DRM_DEBUG_KMS("HDCP init failed, skipping.\n");
+ drm_dbg_kms(&dev_priv->drm, "[%s:%d] HDCP MST init failed, skipping.\n",
+ connector->name, connector->base.id);
}
/*
diff --git a/drivers/gpu/drm/i915/display/intel_dpll.c b/drivers/gpu/drm/i915/display/intel_dpll.c
new file mode 100644
index 000000000000..7ba7f315aaee
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_dpll.c
@@ -0,0 +1,1363 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+#include <linux/kernel.h>
+#include "intel_display_types.h"
+#include "intel_display.h"
+#include "intel_dpll.h"
+#include "intel_lvds.h"
+#include "intel_panel.h"
+
+struct intel_limit {
+ struct {
+ int min, max;
+ } dot, vco, n, m, m1, m2, p, p1;
+
+ struct {
+ int dot_limit;
+ int p2_slow, p2_fast;
+ } p2;
+};
+static const struct intel_limit intel_limits_i8xx_dac = {
+ .dot = { .min = 25000, .max = 350000 },
+ .vco = { .min = 908000, .max = 1512000 },
+ .n = { .min = 2, .max = 16 },
+ .m = { .min = 96, .max = 140 },
+ .m1 = { .min = 18, .max = 26 },
+ .m2 = { .min = 6, .max = 16 },
+ .p = { .min = 4, .max = 128 },
+ .p1 = { .min = 2, .max = 33 },
+ .p2 = { .dot_limit = 165000,
+ .p2_slow = 4, .p2_fast = 2 },
+};
+
+static const struct intel_limit intel_limits_i8xx_dvo = {
+ .dot = { .min = 25000, .max = 350000 },
+ .vco = { .min = 908000, .max = 1512000 },
+ .n = { .min = 2, .max = 16 },
+ .m = { .min = 96, .max = 140 },
+ .m1 = { .min = 18, .max = 26 },
+ .m2 = { .min = 6, .max = 16 },
+ .p = { .min = 4, .max = 128 },
+ .p1 = { .min = 2, .max = 33 },
+ .p2 = { .dot_limit = 165000,
+ .p2_slow = 4, .p2_fast = 4 },
+};
+
+static const struct intel_limit intel_limits_i8xx_lvds = {
+ .dot = { .min = 25000, .max = 350000 },
+ .vco = { .min = 908000, .max = 1512000 },
+ .n = { .min = 2, .max = 16 },
+ .m = { .min = 96, .max = 140 },
+ .m1 = { .min = 18, .max = 26 },
+ .m2 = { .min = 6, .max = 16 },
+ .p = { .min = 4, .max = 128 },
+ .p1 = { .min = 1, .max = 6 },
+ .p2 = { .dot_limit = 165000,
+ .p2_slow = 14, .p2_fast = 7 },
+};
+
+static const struct intel_limit intel_limits_i9xx_sdvo = {
+ .dot = { .min = 20000, .max = 400000 },
+ .vco = { .min = 1400000, .max = 2800000 },
+ .n = { .min = 1, .max = 6 },
+ .m = { .min = 70, .max = 120 },
+ .m1 = { .min = 8, .max = 18 },
+ .m2 = { .min = 3, .max = 7 },
+ .p = { .min = 5, .max = 80 },
+ .p1 = { .min = 1, .max = 8 },
+ .p2 = { .dot_limit = 200000,
+ .p2_slow = 10, .p2_fast = 5 },
+};
+
+static const struct intel_limit intel_limits_i9xx_lvds = {
+ .dot = { .min = 20000, .max = 400000 },
+ .vco = { .min = 1400000, .max = 2800000 },
+ .n = { .min = 1, .max = 6 },
+ .m = { .min = 70, .max = 120 },
+ .m1 = { .min = 8, .max = 18 },
+ .m2 = { .min = 3, .max = 7 },
+ .p = { .min = 7, .max = 98 },
+ .p1 = { .min = 1, .max = 8 },
+ .p2 = { .dot_limit = 112000,
+ .p2_slow = 14, .p2_fast = 7 },
+};
+
+
+static const struct intel_limit intel_limits_g4x_sdvo = {
+ .dot = { .min = 25000, .max = 270000 },
+ .vco = { .min = 1750000, .max = 3500000},
+ .n = { .min = 1, .max = 4 },
+ .m = { .min = 104, .max = 138 },
+ .m1 = { .min = 17, .max = 23 },
+ .m2 = { .min = 5, .max = 11 },
+ .p = { .min = 10, .max = 30 },
+ .p1 = { .min = 1, .max = 3},
+ .p2 = { .dot_limit = 270000,
+ .p2_slow = 10,
+ .p2_fast = 10
+ },
+};
+
+static const struct intel_limit intel_limits_g4x_hdmi = {
+ .dot = { .min = 22000, .max = 400000 },
+ .vco = { .min = 1750000, .max = 3500000},
+ .n = { .min = 1, .max = 4 },
+ .m = { .min = 104, .max = 138 },
+ .m1 = { .min = 16, .max = 23 },
+ .m2 = { .min = 5, .max = 11 },
+ .p = { .min = 5, .max = 80 },
+ .p1 = { .min = 1, .max = 8},
+ .p2 = { .dot_limit = 165000,
+ .p2_slow = 10, .p2_fast = 5 },
+};
+
+static const struct intel_limit intel_limits_g4x_single_channel_lvds = {
+ .dot = { .min = 20000, .max = 115000 },
+ .vco = { .min = 1750000, .max = 3500000 },
+ .n = { .min = 1, .max = 3 },
+ .m = { .min = 104, .max = 138 },
+ .m1 = { .min = 17, .max = 23 },
+ .m2 = { .min = 5, .max = 11 },
+ .p = { .min = 28, .max = 112 },
+ .p1 = { .min = 2, .max = 8 },
+ .p2 = { .dot_limit = 0,
+ .p2_slow = 14, .p2_fast = 14
+ },
+};
+
+static const struct intel_limit intel_limits_g4x_dual_channel_lvds = {
+ .dot = { .min = 80000, .max = 224000 },
+ .vco = { .min = 1750000, .max = 3500000 },
+ .n = { .min = 1, .max = 3 },
+ .m = { .min = 104, .max = 138 },
+ .m1 = { .min = 17, .max = 23 },
+ .m2 = { .min = 5, .max = 11 },
+ .p = { .min = 14, .max = 42 },
+ .p1 = { .min = 2, .max = 6 },
+ .p2 = { .dot_limit = 0,
+ .p2_slow = 7, .p2_fast = 7
+ },
+};
+
+static const struct intel_limit pnv_limits_sdvo = {
+ .dot = { .min = 20000, .max = 400000},
+ .vco = { .min = 1700000, .max = 3500000 },
+ /* Pineview's Ncounter is a ring counter */
+ .n = { .min = 3, .max = 6 },
+ .m = { .min = 2, .max = 256 },
+ /* Pineview only has one combined m divider, which we treat as m2. */
+ .m1 = { .min = 0, .max = 0 },
+ .m2 = { .min = 0, .max = 254 },
+ .p = { .min = 5, .max = 80 },
+ .p1 = { .min = 1, .max = 8 },
+ .p2 = { .dot_limit = 200000,
+ .p2_slow = 10, .p2_fast = 5 },
+};
+
+static const struct intel_limit pnv_limits_lvds = {
+ .dot = { .min = 20000, .max = 400000 },
+ .vco = { .min = 1700000, .max = 3500000 },
+ .n = { .min = 3, .max = 6 },
+ .m = { .min = 2, .max = 256 },
+ .m1 = { .min = 0, .max = 0 },
+ .m2 = { .min = 0, .max = 254 },
+ .p = { .min = 7, .max = 112 },
+ .p1 = { .min = 1, .max = 8 },
+ .p2 = { .dot_limit = 112000,
+ .p2_slow = 14, .p2_fast = 14 },
+};
+
+/* Ironlake / Sandybridge
+ *
+ * We calculate clock using (register_value + 2) for N/M1/M2, so here
+ * the range value for them is (actual_value - 2).
+ */
+static const struct intel_limit ilk_limits_dac = {
+ .dot = { .min = 25000, .max = 350000 },
+ .vco = { .min = 1760000, .max = 3510000 },
+ .n = { .min = 1, .max = 5 },
+ .m = { .min = 79, .max = 127 },
+ .m1 = { .min = 12, .max = 22 },
+ .m2 = { .min = 5, .max = 9 },
+ .p = { .min = 5, .max = 80 },
+ .p1 = { .min = 1, .max = 8 },
+ .p2 = { .dot_limit = 225000,
+ .p2_slow = 10, .p2_fast = 5 },
+};
+
+static const struct intel_limit ilk_limits_single_lvds = {
+ .dot = { .min = 25000, .max = 350000 },
+ .vco = { .min = 1760000, .max = 3510000 },
+ .n = { .min = 1, .max = 3 },
+ .m = { .min = 79, .max = 118 },
+ .m1 = { .min = 12, .max = 22 },
+ .m2 = { .min = 5, .max = 9 },
+ .p = { .min = 28, .max = 112 },
+ .p1 = { .min = 2, .max = 8 },
+ .p2 = { .dot_limit = 225000,
+ .p2_slow = 14, .p2_fast = 14 },
+};
+
+static const struct intel_limit ilk_limits_dual_lvds = {
+ .dot = { .min = 25000, .max = 350000 },
+ .vco = { .min = 1760000, .max = 3510000 },
+ .n = { .min = 1, .max = 3 },
+ .m = { .min = 79, .max = 127 },
+ .m1 = { .min = 12, .max = 22 },
+ .m2 = { .min = 5, .max = 9 },
+ .p = { .min = 14, .max = 56 },
+ .p1 = { .min = 2, .max = 8 },
+ .p2 = { .dot_limit = 225000,
+ .p2_slow = 7, .p2_fast = 7 },
+};
+
+/* LVDS 100mhz refclk limits. */
+static const struct intel_limit ilk_limits_single_lvds_100m = {
+ .dot = { .min = 25000, .max = 350000 },
+ .vco = { .min = 1760000, .max = 3510000 },
+ .n = { .min = 1, .max = 2 },
+ .m = { .min = 79, .max = 126 },
+ .m1 = { .min = 12, .max = 22 },
+ .m2 = { .min = 5, .max = 9 },
+ .p = { .min = 28, .max = 112 },
+ .p1 = { .min = 2, .max = 8 },
+ .p2 = { .dot_limit = 225000,
+ .p2_slow = 14, .p2_fast = 14 },
+};
+
+static const struct intel_limit ilk_limits_dual_lvds_100m = {
+ .dot = { .min = 25000, .max = 350000 },
+ .vco = { .min = 1760000, .max = 3510000 },
+ .n = { .min = 1, .max = 3 },
+ .m = { .min = 79, .max = 126 },
+ .m1 = { .min = 12, .max = 22 },
+ .m2 = { .min = 5, .max = 9 },
+ .p = { .min = 14, .max = 42 },
+ .p1 = { .min = 2, .max = 6 },
+ .p2 = { .dot_limit = 225000,
+ .p2_slow = 7, .p2_fast = 7 },
+};
+
+static const struct intel_limit intel_limits_vlv = {
+ /*
+ * These are the data rate limits (measured in fast clocks)
+ * since those are the strictest limits we have. The fast
+ * clock and actual rate limits are more relaxed, so checking
+ * them would make no difference.
+ */
+ .dot = { .min = 25000 * 5, .max = 270000 * 5 },
+ .vco = { .min = 4000000, .max = 6000000 },
+ .n = { .min = 1, .max = 7 },
+ .m1 = { .min = 2, .max = 3 },
+ .m2 = { .min = 11, .max = 156 },
+ .p1 = { .min = 2, .max = 3 },
+ .p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */
+};
+
+static const struct intel_limit intel_limits_chv = {
+ /*
+ * These are the data rate limits (measured in fast clocks)
+ * since those are the strictest limits we have. The fast
+ * clock and actual rate limits are more relaxed, so checking
+ * them would make no difference.
+ */
+ .dot = { .min = 25000 * 5, .max = 540000 * 5},
+ .vco = { .min = 4800000, .max = 6480000 },
+ .n = { .min = 1, .max = 1 },
+ .m1 = { .min = 2, .max = 2 },
+ .m2 = { .min = 24 << 22, .max = 175 << 22 },
+ .p1 = { .min = 2, .max = 4 },
+ .p2 = { .p2_slow = 1, .p2_fast = 14 },
+};
+
+static const struct intel_limit intel_limits_bxt = {
+ /* FIXME: find real dot limits */
+ .dot = { .min = 0, .max = INT_MAX },
+ .vco = { .min = 4800000, .max = 6700000 },
+ .n = { .min = 1, .max = 1 },
+ .m1 = { .min = 2, .max = 2 },
+ /* FIXME: find real m2 limits */
+ .m2 = { .min = 2 << 22, .max = 255 << 22 },
+ .p1 = { .min = 2, .max = 4 },
+ .p2 = { .p2_slow = 1, .p2_fast = 20 },
+};
+
+/*
+ * Platform specific helpers to calculate the port PLL loopback- (clock.m),
+ * and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast
+ * (clock.dot) clock rates. This fast dot clock is fed to the port's IO logic.
+ * The helpers' return value is the rate of the clock that is fed to the
+ * display engine's pipe which can be the above fast dot clock rate or a
+ * divided-down version of it.
+ */
+/* m1 is reserved as 0 in Pineview, n is a ring counter */
+int pnv_calc_dpll_params(int refclk, struct dpll *clock)
+{
+ clock->m = clock->m2 + 2;
+ clock->p = clock->p1 * clock->p2;
+ if (WARN_ON(clock->n == 0 || clock->p == 0))
+ return 0;
+ clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
+ clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
+
+ return clock->dot;
+}
+
+static u32 i9xx_dpll_compute_m(struct dpll *dpll)
+{
+ return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
+}
+
+int i9xx_calc_dpll_params(int refclk, struct dpll *clock)
+{
+ clock->m = i9xx_dpll_compute_m(clock);
+ clock->p = clock->p1 * clock->p2;
+ if (WARN_ON(clock->n + 2 == 0 || clock->p == 0))
+ return 0;
+ clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2);
+ clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
+
+ return clock->dot;
+}
+
+int vlv_calc_dpll_params(int refclk, struct dpll *clock)
+{
+ clock->m = clock->m1 * clock->m2;
+ clock->p = clock->p1 * clock->p2;
+ if (WARN_ON(clock->n == 0 || clock->p == 0))
+ return 0;
+ clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
+ clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
+
+ return clock->dot / 5;
+}
+
+int chv_calc_dpll_params(int refclk, struct dpll *clock)
+{
+ clock->m = clock->m1 * clock->m2;
+ clock->p = clock->p1 * clock->p2;
+ if (WARN_ON(clock->n == 0 || clock->p == 0))
+ return 0;
+ clock->vco = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(refclk, clock->m),
+ clock->n << 22);
+ clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
+
+ return clock->dot / 5;
+}
+
+/*
+ * Returns whether the given set of divisors are valid for a given refclk with
+ * the given connectors.
+ */
+static bool intel_pll_is_valid(struct drm_i915_private *dev_priv,
+ const struct intel_limit *limit,
+ const struct dpll *clock)
+{
+ if (clock->n < limit->n.min || limit->n.max < clock->n)
+ return false;
+ if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
+ return false;
+ if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2)
+ return false;
+ if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
+ return false;
+
+ if (!IS_PINEVIEW(dev_priv) && !IS_VALLEYVIEW(dev_priv) &&
+ !IS_CHERRYVIEW(dev_priv) && !IS_GEN9_LP(dev_priv))
+ if (clock->m1 <= clock->m2)
+ return false;
+
+ if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
+ !IS_GEN9_LP(dev_priv)) {
+ if (clock->p < limit->p.min || limit->p.max < clock->p)
+ return false;
+ if (clock->m < limit->m.min || limit->m.max < clock->m)
+ return false;
+ }
+
+ if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
+ return false;
+ /* XXX: We may need to be checking "Dot clock" depending on the multiplier,
+ * connector, etc., rather than just a single range.
+ */
+ if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
+ return false;
+
+ return true;
+}
+
+static int
+i9xx_select_p2_div(const struct intel_limit *limit,
+ const struct intel_crtc_state *crtc_state,
+ int target)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
+ /*
+ * For LVDS just rely on its current settings for dual-channel.
+ * We haven't figured out how to reliably set up different
+ * single/dual channel state, if we even can.
+ */
+ if (intel_is_dual_link_lvds(dev_priv))
+ return limit->p2.p2_fast;
+ else
+ return limit->p2.p2_slow;
+ } else {
+ if (target < limit->p2.dot_limit)
+ return limit->p2.p2_slow;
+ else
+ return limit->p2.p2_fast;
+ }
+}
+
+/*
+ * Returns a set of divisors for the desired target clock with the given
+ * refclk, or FALSE. The returned values represent the clock equation:
+ * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
+ *
+ * Target and reference clocks are specified in kHz.
+ *
+ * If match_clock is provided, then best_clock P divider must match the P
+ * divider from @match_clock used for LVDS downclocking.
+ */
+static bool
+i9xx_find_best_dpll(const struct intel_limit *limit,
+ struct intel_crtc_state *crtc_state,
+ int target, int refclk, struct dpll *match_clock,
+ struct dpll *best_clock)
+{
+ struct drm_device *dev = crtc_state->uapi.crtc->dev;
+ struct dpll clock;
+ int err = target;
+
+ memset(best_clock, 0, sizeof(*best_clock));
+
+ clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
+
+ for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
+ clock.m1++) {
+ for (clock.m2 = limit->m2.min;
+ clock.m2 <= limit->m2.max; clock.m2++) {
+ if (clock.m2 >= clock.m1)
+ break;
+ for (clock.n = limit->n.min;
+ clock.n <= limit->n.max; clock.n++) {
+ for (clock.p1 = limit->p1.min;
+ clock.p1 <= limit->p1.max; clock.p1++) {
+ int this_err;
+
+ i9xx_calc_dpll_params(refclk, &clock);
+ if (!intel_pll_is_valid(to_i915(dev),
+ limit,
+ &clock))
+ continue;
+ if (match_clock &&
+ clock.p != match_clock->p)
+ continue;
+
+ this_err = abs(clock.dot - target);
+ if (this_err < err) {
+ *best_clock = clock;
+ err = this_err;
+ }
+ }
+ }
+ }
+ }
+
+ return (err != target);
+}
+
+/*
+ * Returns a set of divisors for the desired target clock with the given
+ * refclk, or FALSE. The returned values represent the clock equation:
+ * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
+ *
+ * Target and reference clocks are specified in kHz.
+ *
+ * If match_clock is provided, then best_clock P divider must match the P
+ * divider from @match_clock used for LVDS downclocking.
+ */
+static bool
+pnv_find_best_dpll(const struct intel_limit *limit,
+ struct intel_crtc_state *crtc_state,
+ int target, int refclk, struct dpll *match_clock,
+ struct dpll *best_clock)
+{
+ struct drm_device *dev = crtc_state->uapi.crtc->dev;
+ struct dpll clock;
+ int err = target;
+
+ memset(best_clock, 0, sizeof(*best_clock));
+
+ clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
+
+ for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
+ clock.m1++) {
+ for (clock.m2 = limit->m2.min;
+ clock.m2 <= limit->m2.max; clock.m2++) {
+ for (clock.n = limit->n.min;
+ clock.n <= limit->n.max; clock.n++) {
+ for (clock.p1 = limit->p1.min;
+ clock.p1 <= limit->p1.max; clock.p1++) {
+ int this_err;
+
+ pnv_calc_dpll_params(refclk, &clock);
+ if (!intel_pll_is_valid(to_i915(dev),
+ limit,
+ &clock))
+ continue;
+ if (match_clock &&
+ clock.p != match_clock->p)
+ continue;
+
+ this_err = abs(clock.dot - target);
+ if (this_err < err) {
+ *best_clock = clock;
+ err = this_err;
+ }
+ }
+ }
+ }
+ }
+
+ return (err != target);
+}
+
+/*
+ * Returns a set of divisors for the desired target clock with the given
+ * refclk, or FALSE. The returned values represent the clock equation:
+ * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
+ *
+ * Target and reference clocks are specified in kHz.
+ *
+ * If match_clock is provided, then best_clock P divider must match the P
+ * divider from @match_clock used for LVDS downclocking.
+ */
+static bool
+g4x_find_best_dpll(const struct intel_limit *limit,
+ struct intel_crtc_state *crtc_state,
+ int target, int refclk, struct dpll *match_clock,
+ struct dpll *best_clock)
+{
+ struct drm_device *dev = crtc_state->uapi.crtc->dev;
+ struct dpll clock;
+ int max_n;
+ bool found = false;
+ /* approximately equals target * 0.00585 */
+ int err_most = (target >> 8) + (target >> 9);
+
+ memset(best_clock, 0, sizeof(*best_clock));
+
+ clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
+
+ max_n = limit->n.max;
+ /* based on hardware requirement, prefer smaller n to precision */
+ for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
+ /* based on hardware requirement, prefere larger m1,m2 */
+ for (clock.m1 = limit->m1.max;
+ clock.m1 >= limit->m1.min; clock.m1--) {
+ for (clock.m2 = limit->m2.max;
+ clock.m2 >= limit->m2.min; clock.m2--) {
+ for (clock.p1 = limit->p1.max;
+ clock.p1 >= limit->p1.min; clock.p1--) {
+ int this_err;
+
+ i9xx_calc_dpll_params(refclk, &clock);
+ if (!intel_pll_is_valid(to_i915(dev),
+ limit,
+ &clock))
+ continue;
+
+ this_err = abs(clock.dot - target);
+ if (this_err < err_most) {
+ *best_clock = clock;
+ err_most = this_err;
+ max_n = clock.n;
+ found = true;
+ }
+ }
+ }
+ }
+ }
+ return found;
+}
+
+/*
+ * Check if the calculated PLL configuration is more optimal compared to the
+ * best configuration and error found so far. Return the calculated error.
+ */
+static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq,
+ const struct dpll *calculated_clock,
+ const struct dpll *best_clock,
+ unsigned int best_error_ppm,
+ unsigned int *error_ppm)
+{
+ /*
+ * For CHV ignore the error and consider only the P value.
+ * Prefer a bigger P value based on HW requirements.
+ */
+ if (IS_CHERRYVIEW(to_i915(dev))) {
+ *error_ppm = 0;
+
+ return calculated_clock->p > best_clock->p;
+ }
+
+ if (drm_WARN_ON_ONCE(dev, !target_freq))
+ return false;
+
+ *error_ppm = div_u64(1000000ULL *
+ abs(target_freq - calculated_clock->dot),
+ target_freq);
+ /*
+ * Prefer a better P value over a better (smaller) error if the error
+ * is small. Ensure this preference for future configurations too by
+ * setting the error to 0.
+ */
+ if (*error_ppm < 100 && calculated_clock->p > best_clock->p) {
+ *error_ppm = 0;
+
+ return true;
+ }
+
+ return *error_ppm + 10 < best_error_ppm;
+}
+
+/*
+ * Returns a set of divisors for the desired target clock with the given
+ * refclk, or FALSE. The returned values represent the clock equation:
+ * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
+ */
+static bool
+vlv_find_best_dpll(const struct intel_limit *limit,
+ struct intel_crtc_state *crtc_state,
+ int target, int refclk, struct dpll *match_clock,
+ struct dpll *best_clock)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_device *dev = crtc->base.dev;
+ struct dpll clock;
+ unsigned int bestppm = 1000000;
+ /* min update 19.2 MHz */
+ int max_n = min(limit->n.max, refclk / 19200);
+ bool found = false;
+
+ target *= 5; /* fast clock */
+
+ memset(best_clock, 0, sizeof(*best_clock));
+
+ /* based on hardware requirement, prefer smaller n to precision */
+ for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
+ for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
+ for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow;
+ clock.p2 -= clock.p2 > 10 ? 2 : 1) {
+ clock.p = clock.p1 * clock.p2;
+ /* based on hardware requirement, prefer bigger m1,m2 values */
+ for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
+ unsigned int ppm;
+
+ clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n,
+ refclk * clock.m1);
+
+ vlv_calc_dpll_params(refclk, &clock);
+
+ if (!intel_pll_is_valid(to_i915(dev),
+ limit,
+ &clock))
+ continue;
+
+ if (!vlv_PLL_is_optimal(dev, target,
+ &clock,
+ best_clock,
+ bestppm, &ppm))
+ continue;
+
+ *best_clock = clock;
+ bestppm = ppm;
+ found = true;
+ }
+ }
+ }
+ }
+
+ return found;
+}
+
+/*
+ * Returns a set of divisors for the desired target clock with the given
+ * refclk, or FALSE. The returned values represent the clock equation:
+ * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
+ */
+static bool
+chv_find_best_dpll(const struct intel_limit *limit,
+ struct intel_crtc_state *crtc_state,
+ int target, int refclk, struct dpll *match_clock,
+ struct dpll *best_clock)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_device *dev = crtc->base.dev;
+ unsigned int best_error_ppm;
+ struct dpll clock;
+ u64 m2;
+ int found = false;
+
+ memset(best_clock, 0, sizeof(*best_clock));
+ best_error_ppm = 1000000;
+
+ /*
+ * Based on hardware doc, the n always set to 1, and m1 always
+ * set to 2. If requires to support 200Mhz refclk, we need to
+ * revisit this because n may not 1 anymore.
+ */
+ clock.n = 1;
+ clock.m1 = 2;
+ target *= 5; /* fast clock */
+
+ for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
+ for (clock.p2 = limit->p2.p2_fast;
+ clock.p2 >= limit->p2.p2_slow;
+ clock.p2 -= clock.p2 > 10 ? 2 : 1) {
+ unsigned int error_ppm;
+
+ clock.p = clock.p1 * clock.p2;
+
+ m2 = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(target, clock.p * clock.n) << 22,
+ refclk * clock.m1);
+
+ if (m2 > INT_MAX/clock.m1)
+ continue;
+
+ clock.m2 = m2;
+
+ chv_calc_dpll_params(refclk, &clock);
+
+ if (!intel_pll_is_valid(to_i915(dev), limit, &clock))
+ continue;
+
+ if (!vlv_PLL_is_optimal(dev, target, &clock, best_clock,
+ best_error_ppm, &error_ppm))
+ continue;
+
+ *best_clock = clock;
+ best_error_ppm = error_ppm;
+ found = true;
+ }
+ }
+
+ return found;
+}
+
+bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state,
+ struct dpll *best_clock)
+{
+ int refclk = 100000;
+ const struct intel_limit *limit = &intel_limits_bxt;
+
+ return chv_find_best_dpll(limit, crtc_state,
+ crtc_state->port_clock, refclk,
+ NULL, best_clock);
+}
+
+static u32 pnv_dpll_compute_fp(struct dpll *dpll)
+{
+ return (1 << dpll->n) << 16 | dpll->m2;
+}
+
+static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state,
+ struct dpll *reduced_clock)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ u32 fp, fp2 = 0;
+
+ if (IS_PINEVIEW(dev_priv)) {
+ fp = pnv_dpll_compute_fp(&crtc_state->dpll);
+ if (reduced_clock)
+ fp2 = pnv_dpll_compute_fp(reduced_clock);
+ } else {
+ fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
+ if (reduced_clock)
+ fp2 = i9xx_dpll_compute_fp(reduced_clock);
+ }
+
+ crtc_state->dpll_hw_state.fp0 = fp;
+
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
+ reduced_clock) {
+ crtc_state->dpll_hw_state.fp1 = fp2;
+ } else {
+ crtc_state->dpll_hw_state.fp1 = fp;
+ }
+}
+
+static void i9xx_compute_dpll(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state,
+ struct dpll *reduced_clock)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ u32 dpll;
+ struct dpll *clock = &crtc_state->dpll;
+
+ i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
+
+ dpll = DPLL_VGA_MODE_DIS;
+
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
+ dpll |= DPLLB_MODE_LVDS;
+ else
+ dpll |= DPLLB_MODE_DAC_SERIAL;
+
+ if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
+ IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
+ dpll |= (crtc_state->pixel_multiplier - 1)
+ << SDVO_MULTIPLIER_SHIFT_HIRES;
+ }
+
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
+ intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
+ dpll |= DPLL_SDVO_HIGH_SPEED;
+
+ if (intel_crtc_has_dp_encoder(crtc_state))
+ dpll |= DPLL_SDVO_HIGH_SPEED;
+
+ /* compute bitmask from p1 value */
+ if (IS_PINEVIEW(dev_priv))
+ dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
+ else {
+ dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
+ if (IS_G4X(dev_priv) && reduced_clock)
+ dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
+ }
+ switch (clock->p2) {
+ case 5:
+ dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
+ break;
+ case 7:
+ dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
+ break;
+ case 10:
+ dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
+ break;
+ case 14:
+ dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
+ break;
+ }
+ if (INTEL_GEN(dev_priv) >= 4)
+ dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
+
+ if (crtc_state->sdvo_tv_clock)
+ dpll |= PLL_REF_INPUT_TVCLKINBC;
+ else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
+ intel_panel_use_ssc(dev_priv))
+ dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
+ else
+ dpll |= PLL_REF_INPUT_DREFCLK;
+
+ dpll |= DPLL_VCO_ENABLE;
+ crtc_state->dpll_hw_state.dpll = dpll;
+
+ if (INTEL_GEN(dev_priv) >= 4) {
+ u32 dpll_md = (crtc_state->pixel_multiplier - 1)
+ << DPLL_MD_UDI_MULTIPLIER_SHIFT;
+ crtc_state->dpll_hw_state.dpll_md = dpll_md;
+ }
+}
+
+static void i8xx_compute_dpll(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state,
+ struct dpll *reduced_clock)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ u32 dpll;
+ struct dpll *clock = &crtc_state->dpll;
+
+ i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
+
+ dpll = DPLL_VGA_MODE_DIS;
+
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
+ dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
+ } else {
+ if (clock->p1 == 2)
+ dpll |= PLL_P1_DIVIDE_BY_TWO;
+ else
+ dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
+ if (clock->p2 == 4)
+ dpll |= PLL_P2_DIVIDE_BY_4;
+ }
+
+ /*
+ * Bspec:
+ * "[Almador Errata}: For the correct operation of the muxed DVO pins
+ * (GDEVSELB/I2Cdata, GIRDBY/I2CClk) and (GFRAMEB/DVI_Data,
+ * GTRDYB/DVI_Clk): Bit 31 (DPLL VCO Enable) and Bit 30 (2X Clock
+ * Enable) must be set to “1” in both the DPLL A Control Register
+ * (06014h-06017h) and DPLL B Control Register (06018h-0601Bh)."
+ *
+ * For simplicity We simply keep both bits always enabled in
+ * both DPLLS. The spec says we should disable the DVO 2X clock
+ * when not needed, but this seems to work fine in practice.
+ */
+ if (IS_I830(dev_priv) ||
+ intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO))
+ dpll |= DPLL_DVO_2X_MODE;
+
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
+ intel_panel_use_ssc(dev_priv))
+ dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
+ else
+ dpll |= PLL_REF_INPUT_DREFCLK;
+
+ dpll |= DPLL_VCO_ENABLE;
+ crtc_state->dpll_hw_state.dpll = dpll;
+}
+
+static int hsw_crtc_compute_clock(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_atomic_state *state =
+ to_intel_atomic_state(crtc_state->uapi.state);
+
+ if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI) ||
+ INTEL_GEN(dev_priv) >= 11) {
+ struct intel_encoder *encoder =
+ intel_get_crtc_new_encoder(state, crtc_state);
+
+ if (!intel_reserve_shared_dplls(state, crtc, encoder)) {
+ drm_dbg_kms(&dev_priv->drm,
+ "failed to find PLL for pipe %c\n",
+ pipe_name(crtc->pipe));
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static bool ilk_needs_fb_cb_tune(struct dpll *dpll, int factor)
+{
+ return i9xx_dpll_compute_m(dpll) < factor * dpll->n;
+}
+
+
+static void ilk_compute_dpll(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state,
+ struct dpll *reduced_clock)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ u32 dpll, fp, fp2;
+ int factor;
+
+ /* Enable autotuning of the PLL clock (if permissible) */
+ factor = 21;
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
+ if ((intel_panel_use_ssc(dev_priv) &&
+ dev_priv->vbt.lvds_ssc_freq == 100000) ||
+ (HAS_PCH_IBX(dev_priv) &&
+ intel_is_dual_link_lvds(dev_priv)))
+ factor = 25;
+ } else if (crtc_state->sdvo_tv_clock) {
+ factor = 20;
+ }
+
+ fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
+
+ if (ilk_needs_fb_cb_tune(&crtc_state->dpll, factor))
+ fp |= FP_CB_TUNE;
+
+ if (reduced_clock) {
+ fp2 = i9xx_dpll_compute_fp(reduced_clock);
+
+ if (reduced_clock->m < factor * reduced_clock->n)
+ fp2 |= FP_CB_TUNE;
+ } else {
+ fp2 = fp;
+ }
+
+ dpll = 0;
+
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
+ dpll |= DPLLB_MODE_LVDS;
+ else
+ dpll |= DPLLB_MODE_DAC_SERIAL;
+
+ dpll |= (crtc_state->pixel_multiplier - 1)
+ << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
+
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
+ intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
+ dpll |= DPLL_SDVO_HIGH_SPEED;
+
+ if (intel_crtc_has_dp_encoder(crtc_state))
+ dpll |= DPLL_SDVO_HIGH_SPEED;
+
+ /*
+ * The high speed IO clock is only really required for
+ * SDVO/HDMI/DP, but we also enable it for CRT to make it
+ * possible to share the DPLL between CRT and HDMI. Enabling
+ * the clock needlessly does no real harm, except use up a
+ * bit of power potentially.
+ *
+ * We'll limit this to IVB with 3 pipes, since it has only two
+ * DPLLs and so DPLL sharing is the only way to get three pipes
+ * driving PCH ports at the same time. On SNB we could do this,
+ * and potentially avoid enabling the second DPLL, but it's not
+ * clear if it''s a win or loss power wise. No point in doing
+ * this on ILK at all since it has a fixed DPLL<->pipe mapping.
+ */
+ if (INTEL_NUM_PIPES(dev_priv) == 3 &&
+ intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
+ dpll |= DPLL_SDVO_HIGH_SPEED;
+
+ /* compute bitmask from p1 value */
+ dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
+ /* also FPA1 */
+ dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
+
+ switch (crtc_state->dpll.p2) {
+ case 5:
+ dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
+ break;
+ case 7:
+ dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
+ break;
+ case 10:
+ dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
+ break;
+ case 14:
+ dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
+ break;
+ }
+
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
+ intel_panel_use_ssc(dev_priv))
+ dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
+ else
+ dpll |= PLL_REF_INPUT_DREFCLK;
+
+ dpll |= DPLL_VCO_ENABLE;
+
+ crtc_state->dpll_hw_state.dpll = dpll;
+ crtc_state->dpll_hw_state.fp0 = fp;
+ crtc_state->dpll_hw_state.fp1 = fp2;
+}
+
+static int ilk_crtc_compute_clock(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_atomic_state *state =
+ to_intel_atomic_state(crtc_state->uapi.state);
+ const struct intel_limit *limit;
+ int refclk = 120000;
+
+ memset(&crtc_state->dpll_hw_state, 0,
+ sizeof(crtc_state->dpll_hw_state));
+
+ /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
+ if (!crtc_state->has_pch_encoder)
+ return 0;
+
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
+ if (intel_panel_use_ssc(dev_priv)) {
+ drm_dbg_kms(&dev_priv->drm,
+ "using SSC reference clock of %d kHz\n",
+ dev_priv->vbt.lvds_ssc_freq);
+ refclk = dev_priv->vbt.lvds_ssc_freq;
+ }
+
+ if (intel_is_dual_link_lvds(dev_priv)) {
+ if (refclk == 100000)
+ limit = &ilk_limits_dual_lvds_100m;
+ else
+ limit = &ilk_limits_dual_lvds;
+ } else {
+ if (refclk == 100000)
+ limit = &ilk_limits_single_lvds_100m;
+ else
+ limit = &ilk_limits_single_lvds;
+ }
+ } else {
+ limit = &ilk_limits_dac;
+ }
+
+ if (!crtc_state->clock_set &&
+ !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
+ refclk, NULL, &crtc_state->dpll)) {
+ drm_err(&dev_priv->drm,
+ "Couldn't find PLL settings for mode!\n");
+ return -EINVAL;
+ }
+
+ ilk_compute_dpll(crtc, crtc_state, NULL);
+
+ if (!intel_reserve_shared_dplls(state, crtc, NULL)) {
+ drm_dbg_kms(&dev_priv->drm,
+ "failed to find PLL for pipe %c\n",
+ pipe_name(crtc->pipe));
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+void vlv_compute_dpll(struct intel_crtc *crtc,
+ struct intel_crtc_state *pipe_config)
+{
+ pipe_config->dpll_hw_state.dpll = DPLL_INTEGRATED_REF_CLK_VLV |
+ DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
+ if (crtc->pipe != PIPE_A)
+ pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
+
+ /* DPLL not used with DSI, but still need the rest set up */
+ if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI))
+ pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE |
+ DPLL_EXT_BUFFER_ENABLE_VLV;
+
+ pipe_config->dpll_hw_state.dpll_md =
+ (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
+}
+
+void chv_compute_dpll(struct intel_crtc *crtc,
+ struct intel_crtc_state *pipe_config)
+{
+ pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLK_CHV |
+ DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
+ if (crtc->pipe != PIPE_A)
+ pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
+
+ /* DPLL not used with DSI, but still need the rest set up */
+ if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI))
+ pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE;
+
+ pipe_config->dpll_hw_state.dpll_md =
+ (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
+}
+
+static int chv_crtc_compute_clock(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state)
+{
+ int refclk = 100000;
+ const struct intel_limit *limit = &intel_limits_chv;
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+
+ memset(&crtc_state->dpll_hw_state, 0,
+ sizeof(crtc_state->dpll_hw_state));
+
+ if (!crtc_state->clock_set &&
+ !chv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
+ refclk, NULL, &crtc_state->dpll)) {
+ drm_err(&i915->drm, "Couldn't find PLL settings for mode!\n");
+ return -EINVAL;
+ }
+
+ chv_compute_dpll(crtc, crtc_state);
+
+ return 0;
+}
+
+static int vlv_crtc_compute_clock(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state)
+{
+ int refclk = 100000;
+ const struct intel_limit *limit = &intel_limits_vlv;
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+
+ memset(&crtc_state->dpll_hw_state, 0,
+ sizeof(crtc_state->dpll_hw_state));
+
+ if (!crtc_state->clock_set &&
+ !vlv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
+ refclk, NULL, &crtc_state->dpll)) {
+ drm_err(&i915->drm, "Couldn't find PLL settings for mode!\n");
+ return -EINVAL;
+ }
+
+ vlv_compute_dpll(crtc, crtc_state);
+
+ return 0;
+}
+
+static int g4x_crtc_compute_clock(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct intel_limit *limit;
+ int refclk = 96000;
+
+ memset(&crtc_state->dpll_hw_state, 0,
+ sizeof(crtc_state->dpll_hw_state));
+
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
+ if (intel_panel_use_ssc(dev_priv)) {
+ refclk = dev_priv->vbt.lvds_ssc_freq;
+ drm_dbg_kms(&dev_priv->drm,
+ "using SSC reference clock of %d kHz\n",
+ refclk);
+ }
+
+ if (intel_is_dual_link_lvds(dev_priv))
+ limit = &intel_limits_g4x_dual_channel_lvds;
+ else
+ limit = &intel_limits_g4x_single_channel_lvds;
+ } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
+ intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
+ limit = &intel_limits_g4x_hdmi;
+ } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) {
+ limit = &intel_limits_g4x_sdvo;
+ } else {
+ /* The option is for other outputs */
+ limit = &intel_limits_i9xx_sdvo;
+ }
+
+ if (!crtc_state->clock_set &&
+ !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
+ refclk, NULL, &crtc_state->dpll)) {
+ drm_err(&dev_priv->drm,
+ "Couldn't find PLL settings for mode!\n");
+ return -EINVAL;
+ }
+
+ i9xx_compute_dpll(crtc, crtc_state, NULL);
+
+ return 0;
+}
+
+static int pnv_crtc_compute_clock(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ const struct intel_limit *limit;
+ int refclk = 96000;
+
+ memset(&crtc_state->dpll_hw_state, 0,
+ sizeof(crtc_state->dpll_hw_state));
+
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
+ if (intel_panel_use_ssc(dev_priv)) {
+ refclk = dev_priv->vbt.lvds_ssc_freq;
+ drm_dbg_kms(&dev_priv->drm,
+ "using SSC reference clock of %d kHz\n",
+ refclk);
+ }
+
+ limit = &pnv_limits_lvds;
+ } else {
+ limit = &pnv_limits_sdvo;
+ }
+
+ if (!crtc_state->clock_set &&
+ !pnv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
+ refclk, NULL, &crtc_state->dpll)) {
+ drm_err(&dev_priv->drm,
+ "Couldn't find PLL settings for mode!\n");
+ return -EINVAL;
+ }
+
+ i9xx_compute_dpll(crtc, crtc_state, NULL);
+
+ return 0;
+}
+
+static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ const struct intel_limit *limit;
+ int refclk = 96000;
+
+ memset(&crtc_state->dpll_hw_state, 0,
+ sizeof(crtc_state->dpll_hw_state));
+
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
+ if (intel_panel_use_ssc(dev_priv)) {
+ refclk = dev_priv->vbt.lvds_ssc_freq;
+ drm_dbg_kms(&dev_priv->drm,
+ "using SSC reference clock of %d kHz\n",
+ refclk);
+ }
+
+ limit = &intel_limits_i9xx_lvds;
+ } else {
+ limit = &intel_limits_i9xx_sdvo;
+ }
+
+ if (!crtc_state->clock_set &&
+ !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
+ refclk, NULL, &crtc_state->dpll)) {
+ drm_err(&dev_priv->drm,
+ "Couldn't find PLL settings for mode!\n");
+ return -EINVAL;
+ }
+
+ i9xx_compute_dpll(crtc, crtc_state, NULL);
+
+ return 0;
+}
+
+static int i8xx_crtc_compute_clock(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ const struct intel_limit *limit;
+ int refclk = 48000;
+
+ memset(&crtc_state->dpll_hw_state, 0,
+ sizeof(crtc_state->dpll_hw_state));
+
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
+ if (intel_panel_use_ssc(dev_priv)) {
+ refclk = dev_priv->vbt.lvds_ssc_freq;
+ drm_dbg_kms(&dev_priv->drm,
+ "using SSC reference clock of %d kHz\n",
+ refclk);
+ }
+
+ limit = &intel_limits_i8xx_lvds;
+ } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO)) {
+ limit = &intel_limits_i8xx_dvo;
+ } else {
+ limit = &intel_limits_i8xx_dac;
+ }
+
+ if (!crtc_state->clock_set &&
+ !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
+ refclk, NULL, &crtc_state->dpll)) {
+ drm_err(&dev_priv->drm,
+ "Couldn't find PLL settings for mode!\n");
+ return -EINVAL;
+ }
+
+ i8xx_compute_dpll(crtc, crtc_state, NULL);
+
+ return 0;
+}
+
+void
+intel_dpll_init_clock_hook(struct drm_i915_private *dev_priv)
+{
+ if (INTEL_GEN(dev_priv) >= 9 || HAS_DDI(dev_priv))
+ dev_priv->display.crtc_compute_clock = hsw_crtc_compute_clock;
+ else if (HAS_PCH_SPLIT(dev_priv))
+ dev_priv->display.crtc_compute_clock = ilk_crtc_compute_clock;
+ else if (IS_CHERRYVIEW(dev_priv))
+ dev_priv->display.crtc_compute_clock = chv_crtc_compute_clock;
+ else if (IS_VALLEYVIEW(dev_priv))
+ dev_priv->display.crtc_compute_clock = vlv_crtc_compute_clock;
+ else if (IS_G4X(dev_priv))
+ dev_priv->display.crtc_compute_clock = g4x_crtc_compute_clock;
+ else if (IS_PINEVIEW(dev_priv))
+ dev_priv->display.crtc_compute_clock = pnv_crtc_compute_clock;
+ else if (!IS_GEN(dev_priv, 2))
+ dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
+ else
+ dev_priv->display.crtc_compute_clock = i8xx_crtc_compute_clock;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_dpll.h b/drivers/gpu/drm/i915/display/intel_dpll.h
new file mode 100644
index 000000000000..caf4615092e1
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_dpll.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef _INTEL_DPLL_H_
+#define _INTEL_DPLL_H_
+
+struct dpll;
+struct drm_i915_private;
+struct intel_crtc;
+struct intel_crtc_state;
+
+void intel_dpll_init_clock_hook(struct drm_i915_private *dev_priv);
+int vlv_calc_dpll_params(int refclk, struct dpll *clock);
+int pnv_calc_dpll_params(int refclk, struct dpll *clock);
+int i9xx_calc_dpll_params(int refclk, struct dpll *clock);
+void vlv_compute_dpll(struct intel_crtc *crtc,
+ struct intel_crtc_state *pipe_config);
+void chv_compute_dpll(struct intel_crtc *crtc,
+ struct intel_crtc_state *pipe_config);
+
+#endif
diff --git a/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c b/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c
index b53c50372918..584c14c4cbd0 100644
--- a/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c
@@ -43,7 +43,7 @@
#define PANEL_PWM_MAX_VALUE 0xFF
-static u32 dcs_get_backlight(struct intel_connector *connector)
+static u32 dcs_get_backlight(struct intel_connector *connector, enum pipe unused)
{
struct intel_encoder *encoder = intel_attached_encoder(connector);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
@@ -77,7 +77,7 @@ static void dcs_set_backlight(const struct drm_connector_state *conn_state, u32
}
}
-static void dcs_disable_backlight(const struct drm_connector_state *conn_state)
+static void dcs_disable_backlight(const struct drm_connector_state *conn_state, u32 level)
{
struct intel_dsi *intel_dsi = enc_to_intel_dsi(to_intel_encoder(conn_state->best_encoder));
struct mipi_dsi_device *dsi_device;
@@ -111,10 +111,9 @@ static void dcs_disable_backlight(const struct drm_connector_state *conn_state)
}
static void dcs_enable_backlight(const struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state)
+ const struct drm_connector_state *conn_state, u32 level)
{
struct intel_dsi *intel_dsi = enc_to_intel_dsi(to_intel_encoder(conn_state->best_encoder));
- struct intel_panel *panel = &to_intel_connector(conn_state->connector)->panel;
struct mipi_dsi_device *dsi_device;
enum port port;
@@ -142,7 +141,7 @@ static void dcs_enable_backlight(const struct intel_crtc_state *crtc_state,
&cabc, sizeof(cabc));
}
- dcs_set_backlight(conn_state, panel->backlight.level);
+ dcs_set_backlight(conn_state, level);
}
static int dcs_setup_backlight(struct intel_connector *connector,
@@ -156,6 +155,14 @@ static int dcs_setup_backlight(struct intel_connector *connector,
return 0;
}
+static const struct intel_panel_bl_funcs dcs_bl_funcs = {
+ .setup = dcs_setup_backlight,
+ .enable = dcs_enable_backlight,
+ .disable = dcs_disable_backlight,
+ .set = dcs_set_backlight,
+ .get = dcs_get_backlight,
+};
+
int intel_dsi_dcs_init_backlight_funcs(struct intel_connector *intel_connector)
{
struct drm_device *dev = intel_connector->base.dev;
@@ -169,11 +176,7 @@ int intel_dsi_dcs_init_backlight_funcs(struct intel_connector *intel_connector)
if (drm_WARN_ON(dev, encoder->type != INTEL_OUTPUT_DSI))
return -EINVAL;
- panel->backlight.setup = dcs_setup_backlight;
- panel->backlight.enable = dcs_enable_backlight;
- panel->backlight.disable = dcs_disable_backlight;
- panel->backlight.set = dcs_set_backlight;
- panel->backlight.get = dcs_get_backlight;
+ panel->backlight.funcs = &dcs_bl_funcs;
return 0;
}
diff --git a/drivers/gpu/drm/i915/display/intel_dvo.c b/drivers/gpu/drm/i915/display/intel_dvo.c
index 237dbb1ba0ee..090cd76266c6 100644
--- a/drivers/gpu/drm/i915/display/intel_dvo.c
+++ b/drivers/gpu/drm/i915/display/intel_dvo.c
@@ -301,12 +301,8 @@ static void intel_dvo_pre_enable(struct intel_atomic_state *state,
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
dvo_val |= DVO_VSYNC_ACTIVE_HIGH;
- /*I915_WRITE(DVOB_SRCDIM,
- (adjusted_mode->crtc_hdisplay << DVO_SRCDIM_HORIZONTAL_SHIFT) |
- (adjusted_mode->crtc_vdisplay << DVO_SRCDIM_VERTICAL_SHIFT));*/
intel_de_write(dev_priv, dvo_srcdim_reg,
(adjusted_mode->crtc_hdisplay << DVO_SRCDIM_HORIZONTAL_SHIFT) | (adjusted_mode->crtc_vdisplay << DVO_SRCDIM_VERTICAL_SHIFT));
- /*I915_WRITE(DVOB, dvo_val);*/
intel_de_write(dev_priv, dvo_reg, dvo_val);
}
diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c
index a5b072816a7b..5fd4fa4805ef 100644
--- a/drivers/gpu/drm/i915/display/intel_fbc.c
+++ b/drivers/gpu/drm/i915/display/intel_fbc.c
@@ -676,7 +676,7 @@ static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc)
}
static bool tiling_is_valid(struct drm_i915_private *dev_priv,
- uint64_t modifier)
+ u64 modifier)
{
switch (modifier) {
case DRM_FORMAT_MOD_LINEAR:
@@ -742,6 +742,8 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
cache->fence_id = plane_state->vma->fence->id;
else
cache->fence_id = -1;
+
+ cache->psr2_active = crtc_state->has_psr2;
}
static bool intel_fbc_cfb_size_changed(struct drm_i915_private *dev_priv)
@@ -914,6 +916,16 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc)
return false;
}
+ /*
+ * Tigerlake is not supporting FBC with PSR2.
+ * Recommendation is to keep this combination disabled
+ * Bspec: 50422 HSD: 14010260002
+ */
+ if (fbc->state_cache.psr2_active && IS_TIGERLAKE(dev_priv)) {
+ fbc->no_fbc_reason = "not supported with PSR2";
+ return false;
+ }
+
return true;
}
@@ -1433,13 +1445,6 @@ static int intel_sanitize_fbc_option(struct drm_i915_private *dev_priv)
if (!HAS_FBC(dev_priv))
return 0;
- /*
- * Fbc is causing random underruns in CI execution on TGL platforms.
- * Disabling the same while the problem is being debugged and analyzed.
- */
- if (IS_TIGERLAKE(dev_priv))
- return 0;
-
if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9)
return 1;
diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.c b/drivers/gpu/drm/i915/display/intel_fbdev.c
index 842c04e63214..84f853f113b9 100644
--- a/drivers/gpu/drm/i915/display/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/display/intel_fbdev.c
@@ -256,7 +256,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
* If the object is stolen however, it will be full of whatever
* garbage was left in there.
*/
- if (vma->obj->stolen && !prealloc)
+ if (!i915_gem_object_is_shmem(vma->obj) && !prealloc)
memset_io(info->screen_base, 0, info->screen_size);
/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
@@ -595,7 +595,7 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous
* full of whatever garbage was left in there.
*/
if (state == FBINFO_STATE_RUNNING &&
- intel_fb_obj(&ifbdev->fb->base)->stolen)
+ !i915_gem_object_is_shmem(intel_fb_obj(&ifbdev->fb->base)))
memset_io(info->screen_base, 0, info->screen_size);
drm_fb_helper_set_suspend(&ifbdev->helper, state);
diff --git a/drivers/gpu/drm/i915/display/intel_fdi.c b/drivers/gpu/drm/i915/display/intel_fdi.c
new file mode 100644
index 000000000000..b2eb96ae10a2
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_fdi.c
@@ -0,0 +1,683 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+#include "intel_atomic.h"
+#include "intel_display_types.h"
+#include "intel_fdi.h"
+
+/* units of 100MHz */
+static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state)
+{
+ if (crtc_state->hw.enable && crtc_state->has_pch_encoder)
+ return crtc_state->fdi_lanes;
+
+ return 0;
+}
+
+static int ilk_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
+ struct intel_crtc_state *pipe_config)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_atomic_state *state = pipe_config->uapi.state;
+ struct intel_crtc *other_crtc;
+ struct intel_crtc_state *other_crtc_state;
+
+ drm_dbg_kms(&dev_priv->drm,
+ "checking fdi config on pipe %c, lanes %i\n",
+ pipe_name(pipe), pipe_config->fdi_lanes);
+ if (pipe_config->fdi_lanes > 4) {
+ drm_dbg_kms(&dev_priv->drm,
+ "invalid fdi lane config on pipe %c: %i lanes\n",
+ pipe_name(pipe), pipe_config->fdi_lanes);
+ return -EINVAL;
+ }
+
+ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
+ if (pipe_config->fdi_lanes > 2) {
+ drm_dbg_kms(&dev_priv->drm,
+ "only 2 lanes on haswell, required: %i lanes\n",
+ pipe_config->fdi_lanes);
+ return -EINVAL;
+ } else {
+ return 0;
+ }
+ }
+
+ if (INTEL_NUM_PIPES(dev_priv) == 2)
+ return 0;
+
+ /* Ivybridge 3 pipe is really complicated */
+ switch (pipe) {
+ case PIPE_A:
+ return 0;
+ case PIPE_B:
+ if (pipe_config->fdi_lanes <= 2)
+ return 0;
+
+ other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_C);
+ other_crtc_state =
+ intel_atomic_get_crtc_state(state, other_crtc);
+ if (IS_ERR(other_crtc_state))
+ return PTR_ERR(other_crtc_state);
+
+ if (pipe_required_fdi_lanes(other_crtc_state) > 0) {
+ drm_dbg_kms(&dev_priv->drm,
+ "invalid shared fdi lane config on pipe %c: %i lanes\n",
+ pipe_name(pipe), pipe_config->fdi_lanes);
+ return -EINVAL;
+ }
+ return 0;
+ case PIPE_C:
+ if (pipe_config->fdi_lanes > 2) {
+ drm_dbg_kms(&dev_priv->drm,
+ "only 2 lanes on pipe %c: required %i lanes\n",
+ pipe_name(pipe), pipe_config->fdi_lanes);
+ return -EINVAL;
+ }
+
+ other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_B);
+ other_crtc_state =
+ intel_atomic_get_crtc_state(state, other_crtc);
+ if (IS_ERR(other_crtc_state))
+ return PTR_ERR(other_crtc_state);
+
+ if (pipe_required_fdi_lanes(other_crtc_state) > 2) {
+ drm_dbg_kms(&dev_priv->drm,
+ "fdi link B uses too many lanes to enable link C\n");
+ return -EINVAL;
+ }
+ return 0;
+ default:
+ BUG();
+ }
+}
+
+int ilk_fdi_compute_config(struct intel_crtc *intel_crtc,
+ struct intel_crtc_state *pipe_config)
+{
+ struct drm_device *dev = intel_crtc->base.dev;
+ struct drm_i915_private *i915 = to_i915(dev);
+ const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
+ int lane, link_bw, fdi_dotclock, ret;
+ bool needs_recompute = false;
+
+retry:
+ /* FDI is a binary signal running at ~2.7GHz, encoding
+ * each output octet as 10 bits. The actual frequency
+ * is stored as a divider into a 100MHz clock, and the
+ * mode pixel clock is stored in units of 1KHz.
+ * Hence the bw of each lane in terms of the mode signal
+ * is:
+ */
+ link_bw = intel_fdi_link_freq(i915, pipe_config);
+
+ fdi_dotclock = adjusted_mode->crtc_clock;
+
+ lane = ilk_get_lanes_required(fdi_dotclock, link_bw,
+ pipe_config->pipe_bpp);
+
+ pipe_config->fdi_lanes = lane;
+
+ intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
+ link_bw, &pipe_config->fdi_m_n, false, false);
+
+ ret = ilk_check_fdi_lanes(dev, intel_crtc->pipe, pipe_config);
+ if (ret == -EDEADLK)
+ return ret;
+
+ if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) {
+ pipe_config->pipe_bpp -= 2*3;
+ drm_dbg_kms(&i915->drm,
+ "fdi link bw constraint, reducing pipe bpp to %i\n",
+ pipe_config->pipe_bpp);
+ needs_recompute = true;
+ pipe_config->bw_constrained = true;
+
+ goto retry;
+ }
+
+ if (needs_recompute)
+ return I915_DISPLAY_CONFIG_RETRY;
+
+ return ret;
+}
+
+void intel_fdi_normal_train(struct intel_crtc *crtc)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ enum pipe pipe = crtc->pipe;
+ i915_reg_t reg;
+ u32 temp;
+
+ /* enable normal train */
+ reg = FDI_TX_CTL(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ if (IS_IVYBRIDGE(dev_priv)) {
+ temp &= ~FDI_LINK_TRAIN_NONE_IVB;
+ temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
+ } else {
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
+ }
+ intel_de_write(dev_priv, reg, temp);
+
+ reg = FDI_RX_CTL(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ if (HAS_PCH_CPT(dev_priv)) {
+ temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+ temp |= FDI_LINK_TRAIN_NORMAL_CPT;
+ } else {
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_NONE;
+ }
+ intel_de_write(dev_priv, reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
+
+ /* wait one idle pattern time */
+ intel_de_posting_read(dev_priv, reg);
+ udelay(1000);
+
+ /* IVB wants error correction enabled */
+ if (IS_IVYBRIDGE(dev_priv))
+ intel_de_write(dev_priv, reg,
+ intel_de_read(dev_priv, reg) | FDI_FS_ERRC_ENABLE | FDI_FE_ERRC_ENABLE);
+}
+
+/* The FDI link training functions for ILK/Ibexpeak. */
+static void ilk_fdi_link_train(struct intel_crtc *crtc,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ enum pipe pipe = crtc->pipe;
+ i915_reg_t reg;
+ u32 temp, tries;
+
+ /* FDI needs bits from pipe first */
+ assert_pipe_enabled(dev_priv, crtc_state->cpu_transcoder);
+
+ /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
+ for train result */
+ reg = FDI_RX_IMR(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ temp &= ~FDI_RX_SYMBOL_LOCK;
+ temp &= ~FDI_RX_BIT_LOCK;
+ intel_de_write(dev_priv, reg, temp);
+ intel_de_read(dev_priv, reg);
+ udelay(150);
+
+ /* enable CPU FDI TX and PCH FDI RX */
+ reg = FDI_TX_CTL(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ temp &= ~FDI_DP_PORT_WIDTH_MASK;
+ temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_1;
+ intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE);
+
+ reg = FDI_RX_CTL(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_1;
+ intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE);
+
+ intel_de_posting_read(dev_priv, reg);
+ udelay(150);
+
+ /* Ironlake workaround, enable clock pointer after FDI enable*/
+ intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe),
+ FDI_RX_PHASE_SYNC_POINTER_OVR);
+ intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe),
+ FDI_RX_PHASE_SYNC_POINTER_OVR | FDI_RX_PHASE_SYNC_POINTER_EN);
+
+ reg = FDI_RX_IIR(pipe);
+ for (tries = 0; tries < 5; tries++) {
+ temp = intel_de_read(dev_priv, reg);
+ drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
+
+ if ((temp & FDI_RX_BIT_LOCK)) {
+ drm_dbg_kms(&dev_priv->drm, "FDI train 1 done.\n");
+ intel_de_write(dev_priv, reg, temp | FDI_RX_BIT_LOCK);
+ break;
+ }
+ }
+ if (tries == 5)
+ drm_err(&dev_priv->drm, "FDI train 1 fail!\n");
+
+ /* Train 2 */
+ reg = FDI_TX_CTL(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_2;
+ intel_de_write(dev_priv, reg, temp);
+
+ reg = FDI_RX_CTL(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_2;
+ intel_de_write(dev_priv, reg, temp);
+
+ intel_de_posting_read(dev_priv, reg);
+ udelay(150);
+
+ reg = FDI_RX_IIR(pipe);
+ for (tries = 0; tries < 5; tries++) {
+ temp = intel_de_read(dev_priv, reg);
+ drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
+
+ if (temp & FDI_RX_SYMBOL_LOCK) {
+ intel_de_write(dev_priv, reg,
+ temp | FDI_RX_SYMBOL_LOCK);
+ drm_dbg_kms(&dev_priv->drm, "FDI train 2 done.\n");
+ break;
+ }
+ }
+ if (tries == 5)
+ drm_err(&dev_priv->drm, "FDI train 2 fail!\n");
+
+ drm_dbg_kms(&dev_priv->drm, "FDI train done\n");
+
+}
+
+static const int snb_b_fdi_train_param[] = {
+ FDI_LINK_TRAIN_400MV_0DB_SNB_B,
+ FDI_LINK_TRAIN_400MV_6DB_SNB_B,
+ FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
+ FDI_LINK_TRAIN_800MV_0DB_SNB_B,
+};
+
+/* The FDI link training functions for SNB/Cougarpoint. */
+static void gen6_fdi_link_train(struct intel_crtc *crtc,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ enum pipe pipe = crtc->pipe;
+ i915_reg_t reg;
+ u32 temp, i, retry;
+
+ /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
+ for train result */
+ reg = FDI_RX_IMR(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ temp &= ~FDI_RX_SYMBOL_LOCK;
+ temp &= ~FDI_RX_BIT_LOCK;
+ intel_de_write(dev_priv, reg, temp);
+
+ intel_de_posting_read(dev_priv, reg);
+ udelay(150);
+
+ /* enable CPU FDI TX and PCH FDI RX */
+ reg = FDI_TX_CTL(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ temp &= ~FDI_DP_PORT_WIDTH_MASK;
+ temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_1;
+ temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
+ /* SNB-B */
+ temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
+ intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE);
+
+ intel_de_write(dev_priv, FDI_RX_MISC(pipe),
+ FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
+
+ reg = FDI_RX_CTL(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ if (HAS_PCH_CPT(dev_priv)) {
+ temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+ temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
+ } else {
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_1;
+ }
+ intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE);
+
+ intel_de_posting_read(dev_priv, reg);
+ udelay(150);
+
+ for (i = 0; i < 4; i++) {
+ reg = FDI_TX_CTL(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
+ temp |= snb_b_fdi_train_param[i];
+ intel_de_write(dev_priv, reg, temp);
+
+ intel_de_posting_read(dev_priv, reg);
+ udelay(500);
+
+ for (retry = 0; retry < 5; retry++) {
+ reg = FDI_RX_IIR(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
+ if (temp & FDI_RX_BIT_LOCK) {
+ intel_de_write(dev_priv, reg,
+ temp | FDI_RX_BIT_LOCK);
+ drm_dbg_kms(&dev_priv->drm,
+ "FDI train 1 done.\n");
+ break;
+ }
+ udelay(50);
+ }
+ if (retry < 5)
+ break;
+ }
+ if (i == 4)
+ drm_err(&dev_priv->drm, "FDI train 1 fail!\n");
+
+ /* Train 2 */
+ reg = FDI_TX_CTL(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_2;
+ if (IS_GEN(dev_priv, 6)) {
+ temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
+ /* SNB-B */
+ temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
+ }
+ intel_de_write(dev_priv, reg, temp);
+
+ reg = FDI_RX_CTL(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ if (HAS_PCH_CPT(dev_priv)) {
+ temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+ temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
+ } else {
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_2;
+ }
+ intel_de_write(dev_priv, reg, temp);
+
+ intel_de_posting_read(dev_priv, reg);
+ udelay(150);
+
+ for (i = 0; i < 4; i++) {
+ reg = FDI_TX_CTL(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
+ temp |= snb_b_fdi_train_param[i];
+ intel_de_write(dev_priv, reg, temp);
+
+ intel_de_posting_read(dev_priv, reg);
+ udelay(500);
+
+ for (retry = 0; retry < 5; retry++) {
+ reg = FDI_RX_IIR(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
+ if (temp & FDI_RX_SYMBOL_LOCK) {
+ intel_de_write(dev_priv, reg,
+ temp | FDI_RX_SYMBOL_LOCK);
+ drm_dbg_kms(&dev_priv->drm,
+ "FDI train 2 done.\n");
+ break;
+ }
+ udelay(50);
+ }
+ if (retry < 5)
+ break;
+ }
+ if (i == 4)
+ drm_err(&dev_priv->drm, "FDI train 2 fail!\n");
+
+ drm_dbg_kms(&dev_priv->drm, "FDI train done.\n");
+}
+
+/* Manual link training for Ivy Bridge A0 parts */
+static void ivb_manual_fdi_link_train(struct intel_crtc *crtc,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ enum pipe pipe = crtc->pipe;
+ i915_reg_t reg;
+ u32 temp, i, j;
+
+ /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
+ for train result */
+ reg = FDI_RX_IMR(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ temp &= ~FDI_RX_SYMBOL_LOCK;
+ temp &= ~FDI_RX_BIT_LOCK;
+ intel_de_write(dev_priv, reg, temp);
+
+ intel_de_posting_read(dev_priv, reg);
+ udelay(150);
+
+ drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR before link train 0x%x\n",
+ intel_de_read(dev_priv, FDI_RX_IIR(pipe)));
+
+ /* Try each vswing and preemphasis setting twice before moving on */
+ for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) {
+ /* disable first in case we need to retry */
+ reg = FDI_TX_CTL(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
+ temp &= ~FDI_TX_ENABLE;
+ intel_de_write(dev_priv, reg, temp);
+
+ reg = FDI_RX_CTL(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ temp &= ~FDI_LINK_TRAIN_AUTO;
+ temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+ temp &= ~FDI_RX_ENABLE;
+ intel_de_write(dev_priv, reg, temp);
+
+ /* enable CPU FDI TX and PCH FDI RX */
+ reg = FDI_TX_CTL(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ temp &= ~FDI_DP_PORT_WIDTH_MASK;
+ temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
+ temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
+ temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
+ temp |= snb_b_fdi_train_param[j/2];
+ temp |= FDI_COMPOSITE_SYNC;
+ intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE);
+
+ intel_de_write(dev_priv, FDI_RX_MISC(pipe),
+ FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
+
+ reg = FDI_RX_CTL(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
+ temp |= FDI_COMPOSITE_SYNC;
+ intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE);
+
+ intel_de_posting_read(dev_priv, reg);
+ udelay(1); /* should be 0.5us */
+
+ for (i = 0; i < 4; i++) {
+ reg = FDI_RX_IIR(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
+
+ if (temp & FDI_RX_BIT_LOCK ||
+ (intel_de_read(dev_priv, reg) & FDI_RX_BIT_LOCK)) {
+ intel_de_write(dev_priv, reg,
+ temp | FDI_RX_BIT_LOCK);
+ drm_dbg_kms(&dev_priv->drm,
+ "FDI train 1 done, level %i.\n",
+ i);
+ break;
+ }
+ udelay(1); /* should be 0.5us */
+ }
+ if (i == 4) {
+ drm_dbg_kms(&dev_priv->drm,
+ "FDI train 1 fail on vswing %d\n", j / 2);
+ continue;
+ }
+
+ /* Train 2 */
+ reg = FDI_TX_CTL(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ temp &= ~FDI_LINK_TRAIN_NONE_IVB;
+ temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
+ intel_de_write(dev_priv, reg, temp);
+
+ reg = FDI_RX_CTL(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+ temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
+ intel_de_write(dev_priv, reg, temp);
+
+ intel_de_posting_read(dev_priv, reg);
+ udelay(2); /* should be 1.5us */
+
+ for (i = 0; i < 4; i++) {
+ reg = FDI_RX_IIR(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
+
+ if (temp & FDI_RX_SYMBOL_LOCK ||
+ (intel_de_read(dev_priv, reg) & FDI_RX_SYMBOL_LOCK)) {
+ intel_de_write(dev_priv, reg,
+ temp | FDI_RX_SYMBOL_LOCK);
+ drm_dbg_kms(&dev_priv->drm,
+ "FDI train 2 done, level %i.\n",
+ i);
+ goto train_done;
+ }
+ udelay(2); /* should be 1.5us */
+ }
+ if (i == 4)
+ drm_dbg_kms(&dev_priv->drm,
+ "FDI train 2 fail on vswing %d\n", j / 2);
+ }
+
+train_done:
+ drm_dbg_kms(&dev_priv->drm, "FDI train done.\n");
+}
+
+void ilk_fdi_pll_enable(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
+ enum pipe pipe = intel_crtc->pipe;
+ i915_reg_t reg;
+ u32 temp;
+
+ /* enable PCH FDI RX PLL, wait warmup plus DMI latency */
+ reg = FDI_RX_CTL(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
+ temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
+ temp |= (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
+ intel_de_write(dev_priv, reg, temp | FDI_RX_PLL_ENABLE);
+
+ intel_de_posting_read(dev_priv, reg);
+ udelay(200);
+
+ /* Switch from Rawclk to PCDclk */
+ temp = intel_de_read(dev_priv, reg);
+ intel_de_write(dev_priv, reg, temp | FDI_PCDCLK);
+
+ intel_de_posting_read(dev_priv, reg);
+ udelay(200);
+
+ /* Enable CPU FDI TX PLL, always on for Ironlake */
+ reg = FDI_TX_CTL(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ if ((temp & FDI_TX_PLL_ENABLE) == 0) {
+ intel_de_write(dev_priv, reg, temp | FDI_TX_PLL_ENABLE);
+
+ intel_de_posting_read(dev_priv, reg);
+ udelay(100);
+ }
+}
+
+void ilk_fdi_pll_disable(struct intel_crtc *intel_crtc)
+{
+ struct drm_device *dev = intel_crtc->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ enum pipe pipe = intel_crtc->pipe;
+ i915_reg_t reg;
+ u32 temp;
+
+ /* Switch from PCDclk to Rawclk */
+ reg = FDI_RX_CTL(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ intel_de_write(dev_priv, reg, temp & ~FDI_PCDCLK);
+
+ /* Disable CPU FDI TX PLL */
+ reg = FDI_TX_CTL(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ intel_de_write(dev_priv, reg, temp & ~FDI_TX_PLL_ENABLE);
+
+ intel_de_posting_read(dev_priv, reg);
+ udelay(100);
+
+ reg = FDI_RX_CTL(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ intel_de_write(dev_priv, reg, temp & ~FDI_RX_PLL_ENABLE);
+
+ /* Wait for the clocks to turn off. */
+ intel_de_posting_read(dev_priv, reg);
+ udelay(100);
+}
+
+void ilk_fdi_disable(struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+ i915_reg_t reg;
+ u32 temp;
+
+ /* disable CPU FDI tx and PCH FDI rx */
+ reg = FDI_TX_CTL(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ intel_de_write(dev_priv, reg, temp & ~FDI_TX_ENABLE);
+ intel_de_posting_read(dev_priv, reg);
+
+ reg = FDI_RX_CTL(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ temp &= ~(0x7 << 16);
+ temp |= (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
+ intel_de_write(dev_priv, reg, temp & ~FDI_RX_ENABLE);
+
+ intel_de_posting_read(dev_priv, reg);
+ udelay(100);
+
+ /* Ironlake workaround, disable clock pointer after downing FDI */
+ if (HAS_PCH_IBX(dev_priv))
+ intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe),
+ FDI_RX_PHASE_SYNC_POINTER_OVR);
+
+ /* still set train pattern 1 */
+ reg = FDI_TX_CTL(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_1;
+ intel_de_write(dev_priv, reg, temp);
+
+ reg = FDI_RX_CTL(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ if (HAS_PCH_CPT(dev_priv)) {
+ temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+ temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
+ } else {
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_1;
+ }
+ /* BPC in FDI rx is consistent with that in PIPECONF */
+ temp &= ~(0x07 << 16);
+ temp |= (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
+ intel_de_write(dev_priv, reg, temp);
+
+ intel_de_posting_read(dev_priv, reg);
+ udelay(100);
+}
+
+void
+intel_fdi_init_hook(struct drm_i915_private *dev_priv)
+{
+ if (IS_GEN(dev_priv, 5)) {
+ dev_priv->display.fdi_link_train = ilk_fdi_link_train;
+ } else if (IS_GEN(dev_priv, 6)) {
+ dev_priv->display.fdi_link_train = gen6_fdi_link_train;
+ } else if (IS_IVYBRIDGE(dev_priv)) {
+ /* FIXME: detect B0+ stepping and use auto training */
+ dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
+ }
+}
diff --git a/drivers/gpu/drm/i915/display/intel_fdi.h b/drivers/gpu/drm/i915/display/intel_fdi.h
new file mode 100644
index 000000000000..a9cd21663eb8
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_fdi.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef _INTEL_FDI_H_
+#define _INTEL_FDI_H_
+
+struct drm_i915_private;
+struct intel_crtc;
+struct intel_crtc_state;
+
+#define I915_DISPLAY_CONFIG_RETRY 1
+int ilk_fdi_compute_config(struct intel_crtc *intel_crtc,
+ struct intel_crtc_state *pipe_config);
+void intel_fdi_normal_train(struct intel_crtc *crtc);
+void ilk_fdi_disable(struct intel_crtc *crtc);
+void ilk_fdi_pll_disable(struct intel_crtc *intel_crtc);
+void ilk_fdi_pll_enable(const struct intel_crtc_state *crtc_state);
+void intel_fdi_init_hook(struct drm_i915_private *dev_priv);
+
+#endif
diff --git a/drivers/gpu/drm/i915/display/intel_frontbuffer.c b/drivers/gpu/drm/i915/display/intel_frontbuffer.c
index d898b370d7a4..7b38eee9980f 100644
--- a/drivers/gpu/drm/i915/display/intel_frontbuffer.c
+++ b/drivers/gpu/drm/i915/display/intel_frontbuffer.c
@@ -225,8 +225,10 @@ static void frontbuffer_release(struct kref *ref)
struct i915_vma *vma;
spin_lock(&obj->vma.lock);
- for_each_ggtt_vma(vma, obj)
+ for_each_ggtt_vma(vma, obj) {
+ i915_vma_clear_scanout(vma);
vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
+ }
spin_unlock(&obj->vma.lock);
RCU_INIT_POINTER(obj->frontbuffer, NULL);
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c
index b2a4bbcfdcd2..ae1371c36a32 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp.c
+++ b/drivers/gpu/drm/i915/display/intel_hdcp.c
@@ -15,6 +15,7 @@
#include <drm/drm_hdcp.h>
#include <drm/i915_component.h>
+#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_display_power.h"
#include "intel_display_types.h"
@@ -23,9 +24,78 @@
#include "intel_connector.h"
#define KEY_LOAD_TRIES 5
-#define ENCRYPT_STATUS_CHANGE_TIMEOUT_MS 50
#define HDCP2_LC_RETRY_CNT 3
+static int intel_conn_to_vcpi(struct intel_connector *connector)
+{
+ /* For HDMI this is forced to be 0x0. For DP SST also this is 0x0. */
+ return connector->port ? connector->port->vcpi.vcpi : 0;
+}
+
+/*
+ * intel_hdcp_required_content_stream selects the most highest common possible HDCP
+ * content_type for all streams in DP MST topology because security f/w doesn't
+ * have any provision to mark content_type for each stream separately, it marks
+ * all available streams with the content_type proivided at the time of port
+ * authentication. This may prohibit the userspace to use type1 content on
+ * HDCP 2.2 capable sink because of other sink are not capable of HDCP 2.2 in
+ * DP MST topology. Though it is not compulsory, security fw should change its
+ * policy to mark different content_types for different streams.
+ */
+static int
+intel_hdcp_required_content_stream(struct intel_digital_port *dig_port)
+{
+ struct drm_connector_list_iter conn_iter;
+ struct intel_digital_port *conn_dig_port;
+ struct intel_connector *connector;
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct hdcp_port_data *data = &dig_port->hdcp_port_data;
+ bool enforce_type0 = false;
+ int k;
+
+ data->k = 0;
+
+ if (dig_port->hdcp_auth_status)
+ return 0;
+
+ drm_connector_list_iter_begin(&i915->drm, &conn_iter);
+ for_each_intel_connector_iter(connector, &conn_iter) {
+ if (connector->base.status == connector_status_disconnected)
+ continue;
+
+ if (!intel_encoder_is_mst(intel_attached_encoder(connector)))
+ continue;
+
+ conn_dig_port = intel_attached_dig_port(connector);
+ if (conn_dig_port != dig_port)
+ continue;
+
+ if (!enforce_type0 && !intel_hdcp2_capable(connector))
+ enforce_type0 = true;
+
+ data->streams[data->k].stream_id = intel_conn_to_vcpi(connector);
+ data->k++;
+
+ /* if there is only one active stream */
+ if (dig_port->dp.active_mst_links <= 1)
+ break;
+ }
+ drm_connector_list_iter_end(&conn_iter);
+
+ if (drm_WARN_ON(&i915->drm, data->k > INTEL_NUM_PIPES(i915) || data->k == 0))
+ return -EINVAL;
+
+ /*
+ * Apply common protection level across all streams in DP MST Topology.
+ * Use highest supported content type for all streams in DP MST Topology.
+ */
+ for (k = 0; k < data->k; k++)
+ data->streams[k].stream_type =
+ enforce_type0 ? DRM_MODE_HDCP_CONTENT_TYPE0 : DRM_MODE_HDCP_CONTENT_TYPE1;
+
+ return 0;
+}
+
static
bool intel_hdcp_is_ksv_valid(u8 *ksv)
{
@@ -762,15 +832,22 @@ static int intel_hdcp_auth(struct intel_connector *connector)
if (intel_de_wait_for_set(dev_priv,
HDCP_STATUS(dev_priv, cpu_transcoder, port),
HDCP_STATUS_ENC,
- ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
+ HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
drm_err(&dev_priv->drm, "Timed out waiting for encryption\n");
return -ETIMEDOUT;
}
- /*
- * XXX: If we have MST-connected devices, we need to enable encryption
- * on those as well.
- */
+ /* DP MST Auth Part 1 Step 2.a and Step 2.b */
+ if (shim->stream_encryption) {
+ ret = shim->stream_encryption(connector, true);
+ if (ret) {
+ drm_err(&dev_priv->drm, "[%s:%d] Failed to enable HDCP 1.4 stream enc\n",
+ connector->base.name, connector->base.base.id);
+ return ret;
+ }
+ drm_dbg_kms(&dev_priv->drm, "HDCP 1.4 transcoder: %s stream encrypted\n",
+ transcoder_name(hdcp->stream_transcoder));
+ }
if (repeater_present)
return intel_hdcp_auth_downstream(connector);
@@ -792,24 +869,29 @@ static int _intel_hdcp_disable(struct intel_connector *connector)
drm_dbg_kms(&dev_priv->drm, "[%s:%d] HDCP is being disabled...\n",
connector->base.name, connector->base.base.id);
- /*
- * If there are other connectors on this port using HDCP, don't disable
- * it. Instead, toggle the HDCP signalling off on that particular
- * connector/pipe and exit.
- */
- if (dig_port->num_hdcp_streams > 0) {
- ret = hdcp->shim->toggle_signalling(dig_port,
- cpu_transcoder, false);
- if (ret)
- DRM_ERROR("Failed to disable HDCP signalling\n");
- return ret;
+ if (hdcp->shim->stream_encryption) {
+ ret = hdcp->shim->stream_encryption(connector, false);
+ if (ret) {
+ drm_err(&dev_priv->drm, "[%s:%d] Failed to disable HDCP 1.4 stream enc\n",
+ connector->base.name, connector->base.base.id);
+ return ret;
+ }
+ drm_dbg_kms(&dev_priv->drm, "HDCP 1.4 transcoder: %s stream encryption disabled\n",
+ transcoder_name(hdcp->stream_transcoder));
+ /*
+ * If there are other connectors on this port using HDCP,
+ * don't disable it until it disabled HDCP encryption for
+ * all connectors in MST topology.
+ */
+ if (dig_port->num_hdcp_streams > 0)
+ return 0;
}
hdcp->hdcp_encrypted = false;
intel_de_write(dev_priv, HDCP_CONF(dev_priv, cpu_transcoder, port), 0);
if (intel_de_wait_for_clear(dev_priv,
HDCP_STATUS(dev_priv, cpu_transcoder, port),
- ~0, ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
+ ~0, HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
drm_err(&dev_priv->drm,
"Failed to disable HDCP, timeout clearing status\n");
return -ETIMEDOUT;
@@ -1014,7 +1096,8 @@ static int
hdcp2_prepare_ake_init(struct intel_connector *connector,
struct hdcp2_ake_init *ake_data)
{
- struct hdcp_port_data *data = &connector->hdcp.port_data;
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+ struct hdcp_port_data *data = &dig_port->hdcp_port_data;
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct i915_hdcp_comp_master *comp;
int ret;
@@ -1043,7 +1126,8 @@ hdcp2_verify_rx_cert_prepare_km(struct intel_connector *connector,
struct hdcp2_ake_no_stored_km *ek_pub_km,
size_t *msg_sz)
{
- struct hdcp_port_data *data = &connector->hdcp.port_data;
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+ struct hdcp_port_data *data = &dig_port->hdcp_port_data;
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct i915_hdcp_comp_master *comp;
int ret;
@@ -1070,7 +1154,8 @@ hdcp2_verify_rx_cert_prepare_km(struct intel_connector *connector,
static int hdcp2_verify_hprime(struct intel_connector *connector,
struct hdcp2_ake_send_hprime *rx_hprime)
{
- struct hdcp_port_data *data = &connector->hdcp.port_data;
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+ struct hdcp_port_data *data = &dig_port->hdcp_port_data;
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct i915_hdcp_comp_master *comp;
int ret;
@@ -1095,7 +1180,8 @@ static int
hdcp2_store_pairing_info(struct intel_connector *connector,
struct hdcp2_ake_send_pairing_info *pairing_info)
{
- struct hdcp_port_data *data = &connector->hdcp.port_data;
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+ struct hdcp_port_data *data = &dig_port->hdcp_port_data;
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct i915_hdcp_comp_master *comp;
int ret;
@@ -1121,7 +1207,8 @@ static int
hdcp2_prepare_lc_init(struct intel_connector *connector,
struct hdcp2_lc_init *lc_init)
{
- struct hdcp_port_data *data = &connector->hdcp.port_data;
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+ struct hdcp_port_data *data = &dig_port->hdcp_port_data;
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct i915_hdcp_comp_master *comp;
int ret;
@@ -1147,7 +1234,8 @@ static int
hdcp2_verify_lprime(struct intel_connector *connector,
struct hdcp2_lc_send_lprime *rx_lprime)
{
- struct hdcp_port_data *data = &connector->hdcp.port_data;
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+ struct hdcp_port_data *data = &dig_port->hdcp_port_data;
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct i915_hdcp_comp_master *comp;
int ret;
@@ -1172,7 +1260,8 @@ hdcp2_verify_lprime(struct intel_connector *connector,
static int hdcp2_prepare_skey(struct intel_connector *connector,
struct hdcp2_ske_send_eks *ske_data)
{
- struct hdcp_port_data *data = &connector->hdcp.port_data;
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+ struct hdcp_port_data *data = &dig_port->hdcp_port_data;
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct i915_hdcp_comp_master *comp;
int ret;
@@ -1200,7 +1289,8 @@ hdcp2_verify_rep_topology_prepare_ack(struct intel_connector *connector,
*rep_topology,
struct hdcp2_rep_send_ack *rep_send_ack)
{
- struct hdcp_port_data *data = &connector->hdcp.port_data;
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+ struct hdcp_port_data *data = &dig_port->hdcp_port_data;
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct i915_hdcp_comp_master *comp;
int ret;
@@ -1228,7 +1318,8 @@ static int
hdcp2_verify_mprime(struct intel_connector *connector,
struct hdcp2_rep_stream_ready *stream_ready)
{
- struct hdcp_port_data *data = &connector->hdcp.port_data;
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+ struct hdcp_port_data *data = &dig_port->hdcp_port_data;
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct i915_hdcp_comp_master *comp;
int ret;
@@ -1251,7 +1342,8 @@ hdcp2_verify_mprime(struct intel_connector *connector,
static int hdcp2_authenticate_port(struct intel_connector *connector)
{
- struct hdcp_port_data *data = &connector->hdcp.port_data;
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+ struct hdcp_port_data *data = &dig_port->hdcp_port_data;
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct i915_hdcp_comp_master *comp;
int ret;
@@ -1275,6 +1367,7 @@ static int hdcp2_authenticate_port(struct intel_connector *connector)
static int hdcp2_close_mei_session(struct intel_connector *connector)
{
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct i915_hdcp_comp_master *comp;
int ret;
@@ -1288,7 +1381,7 @@ static int hdcp2_close_mei_session(struct intel_connector *connector)
}
ret = comp->ops->close_hdcp_session(comp->mei_dev,
- &connector->hdcp.port_data);
+ &dig_port->hdcp_port_data);
mutex_unlock(&dev_priv->hdcp_comp_mutex);
return ret;
@@ -1448,13 +1541,14 @@ static
int _hdcp2_propagate_stream_management_info(struct intel_connector *connector)
{
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+ struct hdcp_port_data *data = &dig_port->hdcp_port_data;
struct intel_hdcp *hdcp = &connector->hdcp;
union {
struct hdcp2_rep_stream_manage stream_manage;
struct hdcp2_rep_stream_ready stream_ready;
} msgs;
const struct intel_hdcp_shim *shim = hdcp->shim;
- int ret;
+ int ret, streams_size_delta, i;
if (connector->hdcp.seq_num_m > HDCP_2_2_SEQ_NUM_MAX)
return -ERANGE;
@@ -1463,16 +1557,18 @@ int _hdcp2_propagate_stream_management_info(struct intel_connector *connector)
msgs.stream_manage.msg_id = HDCP_2_2_REP_STREAM_MANAGE;
drm_hdcp_cpu_to_be24(msgs.stream_manage.seq_num_m, hdcp->seq_num_m);
- /* K no of streams is fixed as 1. Stored as big-endian. */
- msgs.stream_manage.k = cpu_to_be16(1);
+ msgs.stream_manage.k = cpu_to_be16(data->k);
- /* For HDMI this is forced to be 0x0. For DP SST also this is 0x0. */
- msgs.stream_manage.streams[0].stream_id = 0;
- msgs.stream_manage.streams[0].stream_type = hdcp->content_type;
+ for (i = 0; i < data->k; i++) {
+ msgs.stream_manage.streams[i].stream_id = data->streams[i].stream_id;
+ msgs.stream_manage.streams[i].stream_type = data->streams[i].stream_type;
+ }
+ streams_size_delta = (HDCP_2_2_MAX_CONTENT_STREAMS_CNT - data->k) *
+ sizeof(struct hdcp2_streamid_type);
/* Send it to Repeater */
ret = shim->write_2_2_msg(dig_port, &msgs.stream_manage,
- sizeof(msgs.stream_manage));
+ sizeof(msgs.stream_manage) - streams_size_delta);
if (ret < 0)
goto out;
@@ -1481,8 +1577,8 @@ int _hdcp2_propagate_stream_management_info(struct intel_connector *connector)
if (ret < 0)
goto out;
- hdcp->port_data.seq_num_m = hdcp->seq_num_m;
- hdcp->port_data.streams[0].stream_type = hdcp->content_type;
+ data->seq_num_m = hdcp->seq_num_m;
+
ret = hdcp2_verify_mprime(connector, &msgs.stream_ready);
out:
@@ -1606,6 +1702,36 @@ static int hdcp2_authenticate_sink(struct intel_connector *connector)
return ret;
}
+static int hdcp2_enable_stream_encryption(struct intel_connector *connector)
+{
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_hdcp *hdcp = &connector->hdcp;
+ enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
+ enum port port = dig_port->base.port;
+ int ret = 0;
+
+ if (!(intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
+ LINK_ENCRYPTION_STATUS)) {
+ drm_err(&dev_priv->drm, "[%s:%d] HDCP 2.2 Link is not encrypted\n",
+ connector->base.name, connector->base.base.id);
+ return -EPERM;
+ }
+
+ if (hdcp->shim->stream_2_2_encryption) {
+ ret = hdcp->shim->stream_2_2_encryption(connector, true);
+ if (ret) {
+ drm_err(&dev_priv->drm, "[%s:%d] Failed to enable HDCP 2.2 stream enc\n",
+ connector->base.name, connector->base.base.id);
+ return ret;
+ }
+ drm_dbg_kms(&dev_priv->drm, "HDCP 2.2 transcoder: %s stream encrypted\n",
+ transcoder_name(hdcp->stream_transcoder));
+ }
+
+ return ret;
+}
+
static int hdcp2_enable_encryption(struct intel_connector *connector)
{
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
@@ -1641,7 +1767,8 @@ static int hdcp2_enable_encryption(struct intel_connector *connector)
HDCP2_STATUS(dev_priv, cpu_transcoder,
port),
LINK_ENCRYPTION_STATUS,
- ENCRYPT_STATUS_CHANGE_TIMEOUT_MS);
+ HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS);
+ dig_port->hdcp_auth_status = true;
return ret;
}
@@ -1665,7 +1792,7 @@ static int hdcp2_disable_encryption(struct intel_connector *connector)
HDCP2_STATUS(dev_priv, cpu_transcoder,
port),
LINK_ENCRYPTION_STATUS,
- ENCRYPT_STATUS_CHANGE_TIMEOUT_MS);
+ HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS);
if (ret == -ETIMEDOUT)
drm_dbg_kms(&dev_priv->drm, "Disable Encryption Timedout");
@@ -1714,11 +1841,11 @@ hdcp2_propagate_stream_management_info(struct intel_connector *connector)
static int hdcp2_authenticate_and_encrypt(struct intel_connector *connector)
{
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
struct drm_i915_private *i915 = to_i915(connector->base.dev);
- struct intel_hdcp *hdcp = &connector->hdcp;
- int ret, i, tries = 3;
+ int ret = 0, i, tries = 3;
- for (i = 0; i < tries; i++) {
+ for (i = 0; i < tries && !dig_port->hdcp_auth_status; i++) {
ret = hdcp2_authenticate_sink(connector);
if (!ret) {
ret = hdcp2_propagate_stream_management_info(connector);
@@ -1728,8 +1855,7 @@ static int hdcp2_authenticate_and_encrypt(struct intel_connector *connector)
ret);
break;
}
- hdcp->port_data.streams[0].stream_type =
- hdcp->content_type;
+
ret = hdcp2_authenticate_port(connector);
if (!ret)
break;
@@ -1744,7 +1870,7 @@ static int hdcp2_authenticate_and_encrypt(struct intel_connector *connector)
drm_dbg_kms(&i915->drm, "Port deauth failed.\n");
}
- if (!ret) {
+ if (!ret && !dig_port->hdcp_auth_status) {
/*
* Ensuring the required 200mSec min time interval between
* Session Key Exchange and encryption.
@@ -1759,12 +1885,16 @@ static int hdcp2_authenticate_and_encrypt(struct intel_connector *connector)
}
}
+ ret = hdcp2_enable_stream_encryption(connector);
+
return ret;
}
static int _intel_hdcp2_enable(struct intel_connector *connector)
{
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct hdcp_port_data *data = &dig_port->hdcp_port_data;
struct intel_hdcp *hdcp = &connector->hdcp;
int ret;
@@ -1772,6 +1902,16 @@ static int _intel_hdcp2_enable(struct intel_connector *connector)
connector->base.name, connector->base.base.id,
hdcp->content_type);
+ /* Stream which requires encryption */
+ if (!intel_encoder_is_mst(intel_attached_encoder(connector))) {
+ data->k = 1;
+ data->streams[0].stream_type = hdcp->content_type;
+ } else {
+ ret = intel_hdcp_required_content_stream(dig_port);
+ if (ret)
+ return ret;
+ }
+
ret = hdcp2_authenticate_and_encrypt(connector);
if (ret) {
drm_dbg_kms(&i915->drm, "HDCP2 Type%d Enabling Failed. (%d)\n",
@@ -1789,18 +1929,37 @@ static int _intel_hdcp2_enable(struct intel_connector *connector)
static int _intel_hdcp2_disable(struct intel_connector *connector)
{
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct hdcp_port_data *data = &dig_port->hdcp_port_data;
+ struct intel_hdcp *hdcp = &connector->hdcp;
int ret;
drm_dbg_kms(&i915->drm, "[%s:%d] HDCP2.2 is being Disabled\n",
connector->base.name, connector->base.base.id);
+ if (hdcp->shim->stream_2_2_encryption) {
+ ret = hdcp->shim->stream_2_2_encryption(connector, false);
+ if (ret) {
+ drm_err(&i915->drm, "[%s:%d] Failed to disable HDCP 2.2 stream enc\n",
+ connector->base.name, connector->base.base.id);
+ return ret;
+ }
+ drm_dbg_kms(&i915->drm, "HDCP 2.2 transcoder: %s stream encryption disabled\n",
+ transcoder_name(hdcp->stream_transcoder));
+
+ if (dig_port->num_hdcp_streams > 0)
+ return 0;
+ }
+
ret = hdcp2_disable_encryption(connector);
if (hdcp2_deauthenticate_port(connector) < 0)
drm_dbg_kms(&i915->drm, "Port deauth failed.\n");
connector->hdcp.hdcp2_encrypted = false;
+ dig_port->hdcp_auth_status = false;
+ data->k = 0;
return ret;
}
@@ -1816,6 +1975,7 @@ static int intel_hdcp2_check_link(struct intel_connector *connector)
int ret = 0;
mutex_lock(&hdcp->mutex);
+ mutex_lock(&dig_port->hdcp_mutex);
cpu_transcoder = hdcp->cpu_transcoder;
/* hdcp2_check_link is expected only when HDCP2.2 is Enabled */
@@ -1837,7 +1997,7 @@ static int intel_hdcp2_check_link(struct intel_connector *connector)
goto out;
}
- ret = hdcp->shim->check_2_2_link(dig_port);
+ ret = hdcp->shim->check_2_2_link(dig_port, connector);
if (ret == HDCP_LINK_PROTECTED) {
if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
intel_hdcp_update_value(connector,
@@ -1893,6 +2053,7 @@ static int intel_hdcp2_check_link(struct intel_connector *connector)
}
out:
+ mutex_unlock(&dig_port->hdcp_mutex);
mutex_unlock(&hdcp->mutex);
return ret;
}
@@ -1968,12 +2129,13 @@ static enum mei_fw_tc intel_get_mei_fw_tc(enum transcoder cpu_transcoder)
}
static int initialize_hdcp_port_data(struct intel_connector *connector,
- enum port port,
+ struct intel_digital_port *dig_port,
const struct intel_hdcp_shim *shim)
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct hdcp_port_data *data = &dig_port->hdcp_port_data;
struct intel_hdcp *hdcp = &connector->hdcp;
- struct hdcp_port_data *data = &hdcp->port_data;
+ enum port port = dig_port->base.port;
if (INTEL_GEN(dev_priv) < 12)
data->fw_ddi = intel_get_mei_fw_ddi_index(port);
@@ -1994,16 +2156,15 @@ static int initialize_hdcp_port_data(struct intel_connector *connector,
data->port_type = (u8)HDCP_PORT_TYPE_INTEGRATED;
data->protocol = (u8)shim->protocol;
- data->k = 1;
if (!data->streams)
- data->streams = kcalloc(data->k,
+ data->streams = kcalloc(INTEL_NUM_PIPES(dev_priv),
sizeof(struct hdcp2_streamid_type),
GFP_KERNEL);
if (!data->streams) {
drm_err(&dev_priv->drm, "Out of Memory\n");
return -ENOMEM;
}
-
+ /* For SST */
data->streams[0].stream_id = 0;
data->streams[0].stream_type = hdcp->content_type;
@@ -2046,14 +2207,15 @@ void intel_hdcp_component_init(struct drm_i915_private *dev_priv)
}
}
-static void intel_hdcp2_init(struct intel_connector *connector, enum port port,
+static void intel_hdcp2_init(struct intel_connector *connector,
+ struct intel_digital_port *dig_port,
const struct intel_hdcp_shim *shim)
{
struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_hdcp *hdcp = &connector->hdcp;
int ret;
- ret = initialize_hdcp_port_data(connector, port, shim);
+ ret = initialize_hdcp_port_data(connector, dig_port, shim);
if (ret) {
drm_dbg_kms(&i915->drm, "Mei hdcp data init failed\n");
return;
@@ -2063,7 +2225,7 @@ static void intel_hdcp2_init(struct intel_connector *connector, enum port port,
}
int intel_hdcp_init(struct intel_connector *connector,
- enum port port,
+ struct intel_digital_port *dig_port,
const struct intel_hdcp_shim *shim)
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
@@ -2073,15 +2235,15 @@ int intel_hdcp_init(struct intel_connector *connector,
if (!shim)
return -EINVAL;
- if (is_hdcp2_supported(dev_priv) && !connector->mst_port)
- intel_hdcp2_init(connector, port, shim);
+ if (is_hdcp2_supported(dev_priv))
+ intel_hdcp2_init(connector, dig_port, shim);
ret =
drm_connector_attach_content_protection_property(&connector->base,
hdcp->hdcp2_supported);
if (ret) {
hdcp->hdcp2_supported = false;
- kfree(hdcp->port_data.streams);
+ kfree(dig_port->hdcp_port_data.streams);
return ret;
}
@@ -2095,7 +2257,7 @@ int intel_hdcp_init(struct intel_connector *connector,
}
int intel_hdcp_enable(struct intel_connector *connector,
- enum transcoder cpu_transcoder, u8 content_type)
+ const struct intel_crtc_state *pipe_config, u8 content_type)
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
@@ -2106,15 +2268,28 @@ int intel_hdcp_enable(struct intel_connector *connector,
if (!hdcp->shim)
return -ENOENT;
+ if (!connector->encoder) {
+ drm_err(&dev_priv->drm, "[%s:%d] encoder is not initialized\n",
+ connector->base.name, connector->base.base.id);
+ return -ENODEV;
+ }
+
mutex_lock(&hdcp->mutex);
mutex_lock(&dig_port->hdcp_mutex);
drm_WARN_ON(&dev_priv->drm,
hdcp->value == DRM_MODE_CONTENT_PROTECTION_ENABLED);
hdcp->content_type = content_type;
- hdcp->cpu_transcoder = cpu_transcoder;
+
+ if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DP_MST)) {
+ hdcp->cpu_transcoder = pipe_config->mst_master_transcoder;
+ hdcp->stream_transcoder = pipe_config->cpu_transcoder;
+ } else {
+ hdcp->cpu_transcoder = pipe_config->cpu_transcoder;
+ hdcp->stream_transcoder = INVALID_TRANSCODER;
+ }
if (INTEL_GEN(dev_priv) >= 12)
- hdcp->port_data.fw_tc = intel_get_mei_fw_tc(cpu_transcoder);
+ dig_port->hdcp_port_data.fw_tc = intel_get_mei_fw_tc(hdcp->cpu_transcoder);
/*
* Considering that HDCP2.2 is more secure than HDCP1.4, If the setup
@@ -2210,6 +2385,7 @@ void intel_hdcp_update_pipe(struct intel_atomic_state *state,
if (content_protection_type_changed) {
mutex_lock(&hdcp->mutex);
hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+ drm_connector_get(&connector->base);
schedule_work(&hdcp->prop_work);
mutex_unlock(&hdcp->mutex);
}
@@ -2221,11 +2397,19 @@ void intel_hdcp_update_pipe(struct intel_atomic_state *state,
desired_and_not_enabled =
hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED;
mutex_unlock(&hdcp->mutex);
+ /*
+ * If HDCP already ENABLED and CP property is DESIRED, schedule
+ * prop_work to update correct CP property to user space.
+ */
+ if (!desired_and_not_enabled && !content_protection_type_changed) {
+ drm_connector_get(&connector->base);
+ schedule_work(&hdcp->prop_work);
+ }
}
if (desired_and_not_enabled || content_protection_type_changed)
intel_hdcp_enable(connector,
- crtc_state->cpu_transcoder,
+ crtc_state,
(u8)conn_state->hdcp_content_type);
}
@@ -2275,7 +2459,6 @@ void intel_hdcp_cleanup(struct intel_connector *connector)
drm_WARN_ON(connector->base.dev, work_pending(&hdcp->prop_work));
mutex_lock(&hdcp->mutex);
- kfree(hdcp->port_data.streams);
hdcp->shim = NULL;
mutex_unlock(&hdcp->mutex);
}
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.h b/drivers/gpu/drm/i915/display/intel_hdcp.h
index 1bbf5b67ed0a..8f53b0c7fe5c 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp.h
+++ b/drivers/gpu/drm/i915/display/intel_hdcp.h
@@ -8,6 +8,8 @@
#include <linux/types.h>
+#define HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS 50
+
struct drm_connector;
struct drm_connector_state;
struct drm_i915_private;
@@ -16,16 +18,18 @@ struct intel_connector;
struct intel_crtc_state;
struct intel_encoder;
struct intel_hdcp_shim;
+struct intel_digital_port;
enum port;
enum transcoder;
void intel_hdcp_atomic_check(struct drm_connector *connector,
struct drm_connector_state *old_state,
struct drm_connector_state *new_state);
-int intel_hdcp_init(struct intel_connector *connector, enum port port,
+int intel_hdcp_init(struct intel_connector *connector,
+ struct intel_digital_port *dig_port,
const struct intel_hdcp_shim *hdcp_shim);
int intel_hdcp_enable(struct intel_connector *connector,
- enum transcoder cpu_transcoder, u8 content_type);
+ const struct intel_crtc_state *pipe_config, u8 content_type);
int intel_hdcp_disable(struct intel_connector *connector);
void intel_hdcp_update_pipe(struct intel_atomic_state *state,
struct intel_encoder *encoder,
diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c
index 82674a8853c6..d5f4b40a8460 100644
--- a/drivers/gpu/drm/i915/display/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/display/intel_hdmi.c
@@ -518,10 +518,10 @@ static u32 vlv_infoframes_enabled(struct intel_encoder *encoder,
VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP);
}
-static void hsw_write_infoframe(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state,
- unsigned int type,
- const void *frame, ssize_t len)
+void hsw_write_infoframe(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ unsigned int type,
+ const void *frame, ssize_t len)
{
const u32 *data = frame;
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
@@ -555,10 +555,9 @@ static void hsw_write_infoframe(struct intel_encoder *encoder,
intel_de_posting_read(dev_priv, ctl_reg);
}
-static void hsw_read_infoframe(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state,
- unsigned int type,
- void *frame, ssize_t len)
+void hsw_read_infoframe(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ unsigned int type, void *frame, ssize_t len)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
@@ -1495,15 +1494,16 @@ static int kbl_repositioning_enc_en_signal(struct intel_connector *connector,
usleep_range(25, 50);
}
- ret = intel_ddi_toggle_hdcp_signalling(&dig_port->base, cpu_transcoder,
- false);
+ ret = intel_ddi_toggle_hdcp_bits(&dig_port->base, cpu_transcoder,
+ false, TRANS_DDI_HDCP_SIGNALLING);
if (ret) {
drm_err(&dev_priv->drm,
"Disable HDCP signalling failed (%d)\n", ret);
return ret;
}
- ret = intel_ddi_toggle_hdcp_signalling(&dig_port->base, cpu_transcoder,
- true);
+
+ ret = intel_ddi_toggle_hdcp_bits(&dig_port->base, cpu_transcoder,
+ true, TRANS_DDI_HDCP_SIGNALLING);
if (ret) {
drm_err(&dev_priv->drm,
"Enable HDCP signalling failed (%d)\n", ret);
@@ -1526,8 +1526,9 @@ int intel_hdmi_hdcp_toggle_signalling(struct intel_digital_port *dig_port,
if (!enable)
usleep_range(6, 60); /* Bspec says >= 6us */
- ret = intel_ddi_toggle_hdcp_signalling(&dig_port->base, cpu_transcoder,
- enable);
+ ret = intel_ddi_toggle_hdcp_bits(&dig_port->base,
+ cpu_transcoder, enable,
+ TRANS_DDI_HDCP_SIGNALLING);
if (ret) {
drm_err(&dev_priv->drm, "%s HDCP signalling failed (%d)\n",
enable ? "Enable" : "Disable", ret);
@@ -1732,7 +1733,8 @@ int intel_hdmi_hdcp2_read_msg(struct intel_digital_port *dig_port,
}
static
-int intel_hdmi_hdcp2_check_link(struct intel_digital_port *dig_port)
+int intel_hdmi_hdcp2_check_link(struct intel_digital_port *dig_port,
+ struct intel_connector *connector)
{
u8 rx_status[HDCP_2_2_HDMI_RXSTATUS_LEN];
int ret;
@@ -2950,21 +2952,12 @@ static void
intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *connector)
{
struct drm_i915_private *dev_priv = to_i915(connector->dev);
- struct intel_digital_port *dig_port =
- hdmi_to_dig_port(intel_hdmi);
intel_attach_force_audio_property(connector);
intel_attach_broadcast_rgb_property(connector);
intel_attach_aspect_ratio_property(connector);
- /*
- * Attach Colorspace property for Non LSPCON based device
- * ToDo: This needs to be extended for LSPCON implementation
- * as well. Will be implemented separately.
- */
- if (!dig_port->lspcon.active)
- intel_attach_colorspace_property(connector);
-
+ intel_attach_hdmi_colorspace_property(connector);
drm_connector_attach_content_type_property(connector);
if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
@@ -3300,7 +3293,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *dig_port,
intel_hdmi->attached_connector = intel_connector;
if (is_hdcp_supported(dev_priv, port)) {
- int ret = intel_hdcp_init(intel_connector, port,
+ int ret = intel_hdcp_init(intel_connector, dig_port,
&intel_hdmi_hdcp_shim);
if (ret)
drm_dbg_kms(&dev_priv->drm,
@@ -3438,3 +3431,236 @@ void intel_hdmi_init(struct drm_i915_private *dev_priv,
dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port);
intel_hdmi_init_connector(dig_port, intel_connector);
}
+
+/*
+ * intel_hdmi_dsc_get_slice_height - get the dsc slice_height
+ * @vactive: Vactive of a display mode
+ *
+ * @return: appropriate dsc slice height for a given mode.
+ */
+int intel_hdmi_dsc_get_slice_height(int vactive)
+{
+ int slice_height;
+
+ /*
+ * Slice Height determination : HDMI2.1 Section 7.7.5.2
+ * Select smallest slice height >=96, that results in a valid PPS and
+ * requires minimum padding lines required for final slice.
+ *
+ * Assumption : Vactive is even.
+ */
+ for (slice_height = 96; slice_height <= vactive; slice_height += 2)
+ if (vactive % slice_height == 0)
+ return slice_height;
+
+ return 0;
+}
+
+/*
+ * intel_hdmi_dsc_get_num_slices - get no. of dsc slices based on dsc encoder
+ * and dsc decoder capabilities
+ *
+ * @crtc_state: intel crtc_state
+ * @src_max_slices: maximum slices supported by the DSC encoder
+ * @src_max_slice_width: maximum slice width supported by DSC encoder
+ * @hdmi_max_slices: maximum slices supported by sink DSC decoder
+ * @hdmi_throughput: maximum clock per slice (MHz) supported by HDMI sink
+ *
+ * @return: num of dsc slices that can be supported by the dsc encoder
+ * and decoder.
+ */
+int
+intel_hdmi_dsc_get_num_slices(const struct intel_crtc_state *crtc_state,
+ int src_max_slices, int src_max_slice_width,
+ int hdmi_max_slices, int hdmi_throughput)
+{
+/* Pixel rates in KPixels/sec */
+#define HDMI_DSC_PEAK_PIXEL_RATE 2720000
+/*
+ * Rates at which the source and sink are required to process pixels in each
+ * slice, can be two levels: either atleast 340000KHz or atleast 40000KHz.
+ */
+#define HDMI_DSC_MAX_ENC_THROUGHPUT_0 340000
+#define HDMI_DSC_MAX_ENC_THROUGHPUT_1 400000
+
+/* Spec limits the slice width to 2720 pixels */
+#define MAX_HDMI_SLICE_WIDTH 2720
+ int kslice_adjust;
+ int adjusted_clk_khz;
+ int min_slices;
+ int target_slices;
+ int max_throughput; /* max clock freq. in khz per slice */
+ int max_slice_width;
+ int slice_width;
+ int pixel_clock = crtc_state->hw.adjusted_mode.crtc_clock;
+
+ if (!hdmi_throughput)
+ return 0;
+
+ /*
+ * Slice Width determination : HDMI2.1 Section 7.7.5.1
+ * kslice_adjust factor for 4:2:0, and 4:2:2 formats is 0.5, where as
+ * for 4:4:4 is 1.0. Multiplying these factors by 10 and later
+ * dividing adjusted clock value by 10.
+ */
+ if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444 ||
+ crtc_state->output_format == INTEL_OUTPUT_FORMAT_RGB)
+ kslice_adjust = 10;
+ else
+ kslice_adjust = 5;
+
+ /*
+ * As per spec, the rate at which the source and the sink process
+ * the pixels per slice are at two levels: atleast 340Mhz or 400Mhz.
+ * This depends upon the pixel clock rate and output formats
+ * (kslice adjust).
+ * If pixel clock * kslice adjust >= 2720MHz slices can be processed
+ * at max 340MHz, otherwise they can be processed at max 400MHz.
+ */
+
+ adjusted_clk_khz = DIV_ROUND_UP(kslice_adjust * pixel_clock, 10);
+
+ if (adjusted_clk_khz <= HDMI_DSC_PEAK_PIXEL_RATE)
+ max_throughput = HDMI_DSC_MAX_ENC_THROUGHPUT_0;
+ else
+ max_throughput = HDMI_DSC_MAX_ENC_THROUGHPUT_1;
+
+ /*
+ * Taking into account the sink's capability for maximum
+ * clock per slice (in MHz) as read from HF-VSDB.
+ */
+ max_throughput = min(max_throughput, hdmi_throughput * 1000);
+
+ min_slices = DIV_ROUND_UP(adjusted_clk_khz, max_throughput);
+ max_slice_width = min(MAX_HDMI_SLICE_WIDTH, src_max_slice_width);
+
+ /*
+ * Keep on increasing the num of slices/line, starting from min_slices
+ * per line till we get such a number, for which the slice_width is
+ * just less than max_slice_width. The slices/line selected should be
+ * less than or equal to the max horizontal slices that the combination
+ * of PCON encoder and HDMI decoder can support.
+ */
+ slice_width = max_slice_width;
+
+ do {
+ if (min_slices <= 1 && src_max_slices >= 1 && hdmi_max_slices >= 1)
+ target_slices = 1;
+ else if (min_slices <= 2 && src_max_slices >= 2 && hdmi_max_slices >= 2)
+ target_slices = 2;
+ else if (min_slices <= 4 && src_max_slices >= 4 && hdmi_max_slices >= 4)
+ target_slices = 4;
+ else if (min_slices <= 8 && src_max_slices >= 8 && hdmi_max_slices >= 8)
+ target_slices = 8;
+ else if (min_slices <= 12 && src_max_slices >= 12 && hdmi_max_slices >= 12)
+ target_slices = 12;
+ else if (min_slices <= 16 && src_max_slices >= 16 && hdmi_max_slices >= 16)
+ target_slices = 16;
+ else
+ return 0;
+
+ slice_width = DIV_ROUND_UP(crtc_state->hw.adjusted_mode.hdisplay, target_slices);
+ if (slice_width >= max_slice_width)
+ min_slices = target_slices + 1;
+ } while (slice_width >= max_slice_width);
+
+ return target_slices;
+}
+
+/*
+ * intel_hdmi_dsc_get_bpp - get the appropriate compressed bits_per_pixel based on
+ * source and sink capabilities.
+ *
+ * @src_fraction_bpp: fractional bpp supported by the source
+ * @slice_width: dsc slice width supported by the source and sink
+ * @num_slices: num of slices supported by the source and sink
+ * @output_format: video output format
+ * @hdmi_all_bpp: sink supports decoding of 1/16th bpp setting
+ * @hdmi_max_chunk_bytes: max bytes in a line of chunks supported by sink
+ *
+ * @return: compressed bits_per_pixel in step of 1/16 of bits_per_pixel
+ */
+int
+intel_hdmi_dsc_get_bpp(int src_fractional_bpp, int slice_width, int num_slices,
+ int output_format, bool hdmi_all_bpp,
+ int hdmi_max_chunk_bytes)
+{
+ int max_dsc_bpp, min_dsc_bpp;
+ int target_bytes;
+ bool bpp_found = false;
+ int bpp_decrement_x16;
+ int bpp_target;
+ int bpp_target_x16;
+
+ /*
+ * Get min bpp and max bpp as per Table 7.23, in HDMI2.1 spec
+ * Start with the max bpp and keep on decrementing with
+ * fractional bpp, if supported by PCON DSC encoder
+ *
+ * for each bpp we check if no of bytes can be supported by HDMI sink
+ */
+
+ /* Assuming: bpc as 8*/
+ if (output_format == INTEL_OUTPUT_FORMAT_YCBCR420) {
+ min_dsc_bpp = 6;
+ max_dsc_bpp = 3 * 4; /* 3*bpc/2 */
+ } else if (output_format == INTEL_OUTPUT_FORMAT_YCBCR444 ||
+ output_format == INTEL_OUTPUT_FORMAT_RGB) {
+ min_dsc_bpp = 8;
+ max_dsc_bpp = 3 * 8; /* 3*bpc */
+ } else {
+ /* Assuming 4:2:2 encoding */
+ min_dsc_bpp = 7;
+ max_dsc_bpp = 2 * 8; /* 2*bpc */
+ }
+
+ /*
+ * Taking into account if all dsc_all_bpp supported by HDMI2.1 sink
+ * Section 7.7.34 : Source shall not enable compressed Video
+ * Transport with bpp_target settings above 12 bpp unless
+ * DSC_all_bpp is set to 1.
+ */
+ if (!hdmi_all_bpp)
+ max_dsc_bpp = min(max_dsc_bpp, 12);
+
+ /*
+ * The Sink has a limit of compressed data in bytes for a scanline,
+ * as described in max_chunk_bytes field in HFVSDB block of edid.
+ * The no. of bytes depend on the target bits per pixel that the
+ * source configures. So we start with the max_bpp and calculate
+ * the target_chunk_bytes. We keep on decrementing the target_bpp,
+ * till we get the target_chunk_bytes just less than what the sink's
+ * max_chunk_bytes, or else till we reach the min_dsc_bpp.
+ *
+ * The decrement is according to the fractional support from PCON DSC
+ * encoder. For fractional BPP we use bpp_target as a multiple of 16.
+ *
+ * bpp_target_x16 = bpp_target * 16
+ * So we need to decrement by {1, 2, 4, 8, 16} for fractional bpps
+ * {1/16, 1/8, 1/4, 1/2, 1} respectively.
+ */
+
+ bpp_target = max_dsc_bpp;
+
+ /* src does not support fractional bpp implies decrement by 16 for bppx16 */
+ if (!src_fractional_bpp)
+ src_fractional_bpp = 1;
+ bpp_decrement_x16 = DIV_ROUND_UP(16, src_fractional_bpp);
+ bpp_target_x16 = (bpp_target * 16) - bpp_decrement_x16;
+
+ while (bpp_target_x16 > (min_dsc_bpp * 16)) {
+ int bpp;
+
+ bpp = DIV_ROUND_UP(bpp_target_x16, 16);
+ target_bytes = DIV_ROUND_UP((num_slices * slice_width * bpp), 8);
+ if (target_bytes <= hdmi_max_chunk_bytes) {
+ bpp_found = true;
+ break;
+ }
+ bpp_target_x16 -= bpp_decrement_x16;
+ }
+ if (bpp_found)
+ return bpp_target_x16;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.h b/drivers/gpu/drm/i915/display/intel_hdmi.h
index 15eb0ccde76e..fa1a9b030850 100644
--- a/drivers/gpu/drm/i915/display/intel_hdmi.h
+++ b/drivers/gpu/drm/i915/display/intel_hdmi.h
@@ -50,5 +50,12 @@ bool intel_hdmi_limited_color_range(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state);
bool intel_hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state, int bpc,
bool has_hdmi_sink, bool ycbcr420_output);
+int intel_hdmi_dsc_get_bpp(int src_fractional_bpp, int slice_width,
+ int num_slices, int output_format, bool hdmi_all_bpp,
+ int hdmi_max_chunk_bytes);
+int intel_hdmi_dsc_get_num_slices(const struct intel_crtc_state *crtc_state,
+ int src_max_slices, int src_max_slice_width,
+ int hdmi_max_slices, int hdmi_throughput);
+int intel_hdmi_dsc_get_slice_height(int vactive);
#endif /* __INTEL_HDMI_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_lspcon.c b/drivers/gpu/drm/i915/display/intel_lspcon.c
index e37d45e531df..e4ff533e3a69 100644
--- a/drivers/gpu/drm/i915/display/intel_lspcon.c
+++ b/drivers/gpu/drm/i915/display/intel_lspcon.c
@@ -30,11 +30,15 @@
#include "intel_display_types.h"
#include "intel_dp.h"
#include "intel_lspcon.h"
+#include "intel_hdmi.h"
/* LSPCON OUI Vendor ID(signatures) */
#define LSPCON_VENDOR_PARADE_OUI 0x001CF8
#define LSPCON_VENDOR_MCA_OUI 0x0060AD
+#define DPCD_MCA_LSPCON_HDR_STATUS 0x70003
+#define DPCD_PARADE_LSPCON_HDR_STATUS 0x00511
+
/* AUX addresses to write MCA AVI IF */
#define LSPCON_MCA_AVI_IF_WRITE_OFFSET 0x5C0
#define LSPCON_MCA_AVI_IF_CTRL 0x5DF
@@ -104,6 +108,35 @@ static bool lspcon_detect_vendor(struct intel_lspcon *lspcon)
return true;
}
+static u32 get_hdr_status_reg(struct intel_lspcon *lspcon)
+{
+ if (lspcon->vendor == LSPCON_VENDOR_MCA)
+ return DPCD_MCA_LSPCON_HDR_STATUS;
+ else
+ return DPCD_PARADE_LSPCON_HDR_STATUS;
+}
+
+void lspcon_detect_hdr_capability(struct intel_lspcon *lspcon)
+{
+ struct intel_digital_port *dig_port =
+ container_of(lspcon, struct intel_digital_port, lspcon);
+ struct drm_device *dev = dig_port->base.base.dev;
+ struct intel_dp *dp = lspcon_to_intel_dp(lspcon);
+ u8 hdr_caps;
+ int ret;
+
+ ret = drm_dp_dpcd_read(&dp->aux, get_hdr_status_reg(lspcon),
+ &hdr_caps, 1);
+
+ if (ret < 0) {
+ drm_dbg_kms(dev, "HDR capability detection failed\n");
+ lspcon->hdr_supported = false;
+ } else if (hdr_caps & 0x1) {
+ drm_dbg_kms(dev, "LSPCON capable of HDR\n");
+ lspcon->hdr_supported = true;
+ }
+}
+
static enum drm_lspcon_mode lspcon_get_current_mode(struct intel_lspcon *lspcon)
{
enum drm_lspcon_mode current_mode;
@@ -418,27 +451,32 @@ void lspcon_write_infoframe(struct intel_encoder *encoder,
unsigned int type,
const void *frame, ssize_t len)
{
- bool ret;
+ bool ret = true;
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct intel_lspcon *lspcon = enc_to_intel_lspcon(encoder);
- /* LSPCON only needs AVI IF */
- if (type != HDMI_INFOFRAME_TYPE_AVI)
+ switch (type) {
+ case HDMI_INFOFRAME_TYPE_AVI:
+ if (lspcon->vendor == LSPCON_VENDOR_MCA)
+ ret = _lspcon_write_avi_infoframe_mca(&intel_dp->aux,
+ frame, len);
+ else
+ ret = _lspcon_write_avi_infoframe_parade(&intel_dp->aux,
+ frame, len);
+ break;
+ case HDMI_PACKET_TYPE_GAMUT_METADATA:
+ drm_dbg_kms(encoder->base.dev, "Update HDR metadata for lspcon\n");
+ /* It uses the legacy hsw implementation for the same */
+ hsw_write_infoframe(encoder, crtc_state, type, frame, len);
+ break;
+ default:
return;
-
- if (lspcon->vendor == LSPCON_VENDOR_MCA)
- ret = _lspcon_write_avi_infoframe_mca(&intel_dp->aux,
- frame, len);
- else
- ret = _lspcon_write_avi_infoframe_parade(&intel_dp->aux,
- frame, len);
+ }
if (!ret) {
- DRM_ERROR("Failed to write AVI infoframes\n");
+ DRM_ERROR("Failed to write infoframes\n");
return;
}
-
- DRM_DEBUG_DRIVER("AVI infoframes updated successfully\n");
}
void lspcon_read_infoframe(struct intel_encoder *encoder,
@@ -446,7 +484,10 @@ void lspcon_read_infoframe(struct intel_encoder *encoder,
unsigned int type,
void *frame, ssize_t len)
{
- /* FIXME implement this */
+ /* FIXME implement for AVI Infoframe as well */
+ if (type == HDMI_PACKET_TYPE_GAMUT_METADATA)
+ hsw_read_infoframe(encoder, crtc_state, type,
+ frame, len);
}
void lspcon_set_infoframes(struct intel_encoder *encoder,
@@ -491,12 +532,26 @@ void lspcon_set_infoframes(struct intel_encoder *encoder,
else
frame.avi.colorspace = HDMI_COLORSPACE_RGB;
- drm_hdmi_avi_infoframe_quant_range(&frame.avi,
- conn_state->connector,
- adjusted_mode,
- crtc_state->limited_color_range ?
- HDMI_QUANTIZATION_RANGE_LIMITED :
- HDMI_QUANTIZATION_RANGE_FULL);
+ /* Set the Colorspace as per the HDMI spec */
+ drm_hdmi_avi_infoframe_colorspace(&frame.avi, conn_state);
+
+ /* nonsense combination */
+ drm_WARN_ON(encoder->base.dev, crtc_state->limited_color_range &&
+ crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB);
+
+ if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_RGB) {
+ drm_hdmi_avi_infoframe_quant_range(&frame.avi,
+ conn_state->connector,
+ adjusted_mode,
+ crtc_state->limited_color_range ?
+ HDMI_QUANTIZATION_RANGE_LIMITED :
+ HDMI_QUANTIZATION_RANGE_FULL);
+ } else {
+ frame.avi.quantization_range = HDMI_QUANTIZATION_RANGE_DEFAULT;
+ frame.avi.ycc_quantization_range = HDMI_YCC_QUANTIZATION_RANGE_LIMITED;
+ }
+
+ drm_hdmi_avi_infoframe_content_type(&frame.avi, conn_state);
ret = hdmi_infoframe_pack(&frame, buf, sizeof(buf));
if (ret < 0) {
@@ -508,11 +563,64 @@ void lspcon_set_infoframes(struct intel_encoder *encoder,
buf, ret);
}
+static bool _lspcon_read_avi_infoframe_enabled_mca(struct drm_dp_aux *aux)
+{
+ int ret;
+ u32 val = 0;
+ u16 reg = LSPCON_MCA_AVI_IF_CTRL;
+
+ ret = drm_dp_dpcd_read(aux, reg, &val, 1);
+ if (ret < 0) {
+ DRM_ERROR("DPCD read failed, address 0x%x\n", reg);
+ return false;
+ }
+
+ return val & LSPCON_MCA_AVI_IF_KICKOFF;
+}
+
+static bool _lspcon_read_avi_infoframe_enabled_parade(struct drm_dp_aux *aux)
+{
+ int ret;
+ u32 val = 0;
+ u16 reg = LSPCON_PARADE_AVI_IF_CTRL;
+
+ ret = drm_dp_dpcd_read(aux, reg, &val, 1);
+ if (ret < 0) {
+ DRM_ERROR("DPCD read failed, address 0x%x\n", reg);
+ return false;
+ }
+
+ return val & LSPCON_PARADE_AVI_IF_KICKOFF;
+}
+
u32 lspcon_infoframes_enabled(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config)
{
- /* FIXME actually read this from the hw */
- return 0;
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ struct intel_lspcon *lspcon = enc_to_intel_lspcon(encoder);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ bool infoframes_enabled;
+ u32 val = 0;
+ u32 mask, tmp;
+
+ if (lspcon->vendor == LSPCON_VENDOR_MCA)
+ infoframes_enabled = _lspcon_read_avi_infoframe_enabled_mca(&intel_dp->aux);
+ else
+ infoframes_enabled = _lspcon_read_avi_infoframe_enabled_parade(&intel_dp->aux);
+
+ if (infoframes_enabled)
+ val |= intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI);
+
+ if (lspcon->hdr_supported) {
+ tmp = intel_de_read(dev_priv,
+ HSW_TVIDEO_DIP_CTL(pipe_config->cpu_transcoder));
+ mask = VIDEO_DIP_ENABLE_GMP_HSW;
+
+ if (tmp & mask)
+ val |= intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GAMUT_METADATA);
+ }
+
+ return val;
}
void lspcon_wait_pcon_mode(struct intel_lspcon *lspcon)
@@ -520,7 +628,7 @@ void lspcon_wait_pcon_mode(struct intel_lspcon *lspcon)
lspcon_wait_mode(lspcon, DRM_LSPCON_MODE_PCON);
}
-static bool lspcon_init(struct intel_digital_port *dig_port)
+bool lspcon_init(struct intel_digital_port *dig_port)
{
struct intel_dp *dp = &dig_port->dp;
struct intel_lspcon *lspcon = &dig_port->lspcon;
@@ -550,6 +658,14 @@ static bool lspcon_init(struct intel_digital_port *dig_port)
return true;
}
+u32 intel_lspcon_infoframes_enabled(struct intel_encoder *encoder,
+ const struct intel_crtc_state *pipe_config)
+{
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+
+ return dig_port->infoframes_enabled(encoder, pipe_config);
+}
+
void lspcon_resume(struct intel_digital_port *dig_port)
{
struct intel_lspcon *lspcon = &dig_port->lspcon;
diff --git a/drivers/gpu/drm/i915/display/intel_lspcon.h b/drivers/gpu/drm/i915/display/intel_lspcon.h
index b03dcb7076d8..e19e10492b05 100644
--- a/drivers/gpu/drm/i915/display/intel_lspcon.h
+++ b/drivers/gpu/drm/i915/display/intel_lspcon.h
@@ -15,6 +15,8 @@ struct intel_digital_port;
struct intel_encoder;
struct intel_lspcon;
+bool lspcon_init(struct intel_digital_port *dig_port);
+void lspcon_detect_hdr_capability(struct intel_lspcon *lspcon);
void lspcon_resume(struct intel_digital_port *dig_port);
void lspcon_wait_pcon_mode(struct intel_lspcon *lspcon);
void lspcon_write_infoframe(struct intel_encoder *encoder,
@@ -31,5 +33,15 @@ void lspcon_set_infoframes(struct intel_encoder *encoder,
const struct drm_connector_state *conn_state);
u32 lspcon_infoframes_enabled(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config);
+u32 intel_lspcon_infoframes_enabled(struct intel_encoder *encoder,
+ const struct intel_crtc_state *pipe_config);
+void hsw_write_infoframe(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ unsigned int type,
+ const void *frame, ssize_t len);
+void hsw_read_infoframe(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ unsigned int type,
+ void *frame, ssize_t len);
#endif /* __INTEL_LSPCON_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_overlay.c b/drivers/gpu/drm/i915/display/intel_overlay.c
index 52b4f6193b4c..8edb9b2cdbeb 100644
--- a/drivers/gpu/drm/i915/display/intel_overlay.c
+++ b/drivers/gpu/drm/i915/display/intel_overlay.c
@@ -29,6 +29,7 @@
#include <drm/drm_fourcc.h>
#include "gem/i915_gem_pm.h"
+#include "gt/intel_gpu_commands.h"
#include "gt/intel_ring.h"
#include "i915_drv.h"
@@ -359,7 +360,7 @@ static void intel_overlay_release_old_vma(struct intel_overlay *overlay)
intel_frontbuffer_flip_complete(overlay->i915,
INTEL_FRONTBUFFER_OVERLAY(overlay->crtc->pipe));
- i915_gem_object_unpin_from_display_plane(vma);
+ i915_vma_unpin(vma);
i915_vma_put(vma);
}
@@ -860,7 +861,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
return 0;
out_unpin:
- i915_gem_object_unpin_from_display_plane(vma);
+ i915_vma_unpin(vma);
out_pin_section:
atomic_dec(&dev_priv->gpu_error.pending_fb_pin);
diff --git a/drivers/gpu/drm/i915/display/intel_panel.c b/drivers/gpu/drm/i915/display/intel_panel.c
index 9f23bac0d792..5fdf52643150 100644
--- a/drivers/gpu/drm/i915/display/intel_panel.c
+++ b/drivers/gpu/drm/i915/display/intel_panel.c
@@ -511,40 +511,79 @@ static u32 scale_hw_to_user(struct intel_connector *connector,
0, user_max);
}
-static u32 intel_panel_compute_brightness(struct intel_connector *connector,
- u32 val)
+u32 intel_panel_invert_pwm_level(struct intel_connector *connector, u32 val)
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
- drm_WARN_ON(&dev_priv->drm, panel->backlight.max == 0);
+ drm_WARN_ON(&dev_priv->drm, panel->backlight.pwm_level_max == 0);
if (dev_priv->params.invert_brightness < 0)
return val;
if (dev_priv->params.invert_brightness > 0 ||
dev_priv->quirks & QUIRK_INVERT_BRIGHTNESS) {
- return panel->backlight.max - val + panel->backlight.min;
+ return panel->backlight.pwm_level_max - val + panel->backlight.pwm_level_min;
}
return val;
}
-static u32 lpt_get_backlight(struct intel_connector *connector)
+void intel_panel_set_pwm_level(const struct drm_connector_state *conn_state, u32 val)
+{
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_panel *panel = &connector->panel;
+
+ drm_dbg_kms(&i915->drm, "set backlight PWM = %d\n", val);
+ panel->backlight.pwm_funcs->set(conn_state, val);
+}
+
+u32 intel_panel_backlight_level_to_pwm(struct intel_connector *connector, u32 val)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_panel *panel = &connector->panel;
+
+ drm_WARN_ON_ONCE(&dev_priv->drm,
+ panel->backlight.max == 0 || panel->backlight.pwm_level_max == 0);
+
+ val = scale(val, panel->backlight.min, panel->backlight.max,
+ panel->backlight.pwm_level_min, panel->backlight.pwm_level_max);
+
+ return intel_panel_invert_pwm_level(connector, val);
+}
+
+u32 intel_panel_backlight_level_from_pwm(struct intel_connector *connector, u32 val)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_panel *panel = &connector->panel;
+
+ drm_WARN_ON_ONCE(&dev_priv->drm,
+ panel->backlight.max == 0 || panel->backlight.pwm_level_max == 0);
+
+ if (dev_priv->params.invert_brightness > 0 ||
+ (dev_priv->params.invert_brightness == 0 && dev_priv->quirks & QUIRK_INVERT_BRIGHTNESS))
+ val = panel->backlight.pwm_level_max - (val - panel->backlight.pwm_level_min);
+
+ return scale(val, panel->backlight.pwm_level_min, panel->backlight.pwm_level_max,
+ panel->backlight.min, panel->backlight.max);
+}
+
+static u32 lpt_get_backlight(struct intel_connector *connector, enum pipe unused)
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
return intel_de_read(dev_priv, BLC_PWM_PCH_CTL2) & BACKLIGHT_DUTY_CYCLE_MASK;
}
-static u32 pch_get_backlight(struct intel_connector *connector)
+static u32 pch_get_backlight(struct intel_connector *connector, enum pipe unused)
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
return intel_de_read(dev_priv, BLC_PWM_CPU_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
}
-static u32 i9xx_get_backlight(struct intel_connector *connector)
+static u32 i9xx_get_backlight(struct intel_connector *connector, enum pipe unused)
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
@@ -564,23 +603,17 @@ static u32 i9xx_get_backlight(struct intel_connector *connector)
return val;
}
-static u32 _vlv_get_backlight(struct drm_i915_private *dev_priv, enum pipe pipe)
+static u32 vlv_get_backlight(struct intel_connector *connector, enum pipe pipe)
{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+
if (drm_WARN_ON(&dev_priv->drm, pipe != PIPE_A && pipe != PIPE_B))
return 0;
return intel_de_read(dev_priv, VLV_BLC_PWM_CTL(pipe)) & BACKLIGHT_DUTY_CYCLE_MASK;
}
-static u32 vlv_get_backlight(struct intel_connector *connector)
-{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- enum pipe pipe = intel_connector_get_pipe(connector);
-
- return _vlv_get_backlight(dev_priv, pipe);
-}
-
-static u32 bxt_get_backlight(struct intel_connector *connector)
+static u32 bxt_get_backlight(struct intel_connector *connector, enum pipe unused)
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
@@ -589,7 +622,7 @@ static u32 bxt_get_backlight(struct intel_connector *connector)
BXT_BLC_PWM_DUTY(panel->backlight.controller));
}
-static u32 pwm_get_backlight(struct intel_connector *connector)
+static u32 ext_pwm_get_backlight(struct intel_connector *connector, enum pipe unused)
{
struct intel_panel *panel = &connector->panel;
struct pwm_state state;
@@ -624,12 +657,12 @@ static void i9xx_set_backlight(const struct drm_connector_state *conn_state, u32
struct intel_panel *panel = &connector->panel;
u32 tmp, mask;
- drm_WARN_ON(&dev_priv->drm, panel->backlight.max == 0);
+ drm_WARN_ON(&dev_priv->drm, panel->backlight.pwm_level_max == 0);
if (panel->backlight.combination_mode) {
u8 lbpc;
- lbpc = level * 0xfe / panel->backlight.max + 1;
+ lbpc = level * 0xfe / panel->backlight.pwm_level_max + 1;
level /= lbpc;
pci_write_config_byte(dev_priv->drm.pdev, LBPC, lbpc);
}
@@ -666,7 +699,7 @@ static void bxt_set_backlight(const struct drm_connector_state *conn_state, u32
BXT_BLC_PWM_DUTY(panel->backlight.controller), level);
}
-static void pwm_set_backlight(const struct drm_connector_state *conn_state, u32 level)
+static void ext_pwm_set_backlight(const struct drm_connector_state *conn_state, u32 level)
{
struct intel_panel *panel = &to_intel_connector(conn_state->connector)->panel;
@@ -681,10 +714,9 @@ intel_panel_actually_set_backlight(const struct drm_connector_state *conn_state,
struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
- drm_dbg_kms(&i915->drm, "set backlight PWM = %d\n", level);
+ drm_dbg_kms(&i915->drm, "set backlight level = %d\n", level);
- level = intel_panel_compute_brightness(connector, level);
- panel->backlight.set(conn_state, level);
+ panel->backlight.funcs->set(conn_state, level);
}
/* set backlight brightness to level in range [0..max], assuming hw min is
@@ -726,13 +758,13 @@ void intel_panel_set_backlight_acpi(const struct drm_connector_state *conn_state
mutex_unlock(&dev_priv->backlight_lock);
}
-static void lpt_disable_backlight(const struct drm_connector_state *old_conn_state)
+static void lpt_disable_backlight(const struct drm_connector_state *old_conn_state, u32 level)
{
struct intel_connector *connector = to_intel_connector(old_conn_state->connector);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
u32 tmp;
- intel_panel_actually_set_backlight(old_conn_state, 0);
+ intel_panel_set_pwm_level(old_conn_state, level);
/*
* Although we don't support or enable CPU PWM with LPT/SPT based
@@ -754,13 +786,13 @@ static void lpt_disable_backlight(const struct drm_connector_state *old_conn_sta
intel_de_write(dev_priv, BLC_PWM_PCH_CTL1, tmp & ~BLM_PCH_PWM_ENABLE);
}
-static void pch_disable_backlight(const struct drm_connector_state *old_conn_state)
+static void pch_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val)
{
struct intel_connector *connector = to_intel_connector(old_conn_state->connector);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
u32 tmp;
- intel_panel_actually_set_backlight(old_conn_state, 0);
+ intel_panel_set_pwm_level(old_conn_state, val);
tmp = intel_de_read(dev_priv, BLC_PWM_CPU_CTL2);
intel_de_write(dev_priv, BLC_PWM_CPU_CTL2, tmp & ~BLM_PWM_ENABLE);
@@ -769,44 +801,44 @@ static void pch_disable_backlight(const struct drm_connector_state *old_conn_sta
intel_de_write(dev_priv, BLC_PWM_PCH_CTL1, tmp & ~BLM_PCH_PWM_ENABLE);
}
-static void i9xx_disable_backlight(const struct drm_connector_state *old_conn_state)
+static void i9xx_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val)
{
- intel_panel_actually_set_backlight(old_conn_state, 0);
+ intel_panel_set_pwm_level(old_conn_state, val);
}
-static void i965_disable_backlight(const struct drm_connector_state *old_conn_state)
+static void i965_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val)
{
struct drm_i915_private *dev_priv = to_i915(old_conn_state->connector->dev);
u32 tmp;
- intel_panel_actually_set_backlight(old_conn_state, 0);
+ intel_panel_set_pwm_level(old_conn_state, val);
tmp = intel_de_read(dev_priv, BLC_PWM_CTL2);
intel_de_write(dev_priv, BLC_PWM_CTL2, tmp & ~BLM_PWM_ENABLE);
}
-static void vlv_disable_backlight(const struct drm_connector_state *old_conn_state)
+static void vlv_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val)
{
struct intel_connector *connector = to_intel_connector(old_conn_state->connector);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
enum pipe pipe = to_intel_crtc(old_conn_state->crtc)->pipe;
u32 tmp;
- intel_panel_actually_set_backlight(old_conn_state, 0);
+ intel_panel_set_pwm_level(old_conn_state, val);
tmp = intel_de_read(dev_priv, VLV_BLC_PWM_CTL2(pipe));
intel_de_write(dev_priv, VLV_BLC_PWM_CTL2(pipe),
tmp & ~BLM_PWM_ENABLE);
}
-static void bxt_disable_backlight(const struct drm_connector_state *old_conn_state)
+static void bxt_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val)
{
struct intel_connector *connector = to_intel_connector(old_conn_state->connector);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
- u32 tmp, val;
+ u32 tmp;
- intel_panel_actually_set_backlight(old_conn_state, 0);
+ intel_panel_set_pwm_level(old_conn_state, val);
tmp = intel_de_read(dev_priv,
BXT_BLC_PWM_CTL(panel->backlight.controller));
@@ -820,14 +852,14 @@ static void bxt_disable_backlight(const struct drm_connector_state *old_conn_sta
}
}
-static void cnp_disable_backlight(const struct drm_connector_state *old_conn_state)
+static void cnp_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val)
{
struct intel_connector *connector = to_intel_connector(old_conn_state->connector);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
u32 tmp;
- intel_panel_actually_set_backlight(old_conn_state, 0);
+ intel_panel_set_pwm_level(old_conn_state, val);
tmp = intel_de_read(dev_priv,
BXT_BLC_PWM_CTL(panel->backlight.controller));
@@ -835,7 +867,7 @@ static void cnp_disable_backlight(const struct drm_connector_state *old_conn_sta
tmp & ~BXT_BLC_PWM_ENABLE);
}
-static void pwm_disable_backlight(const struct drm_connector_state *old_conn_state)
+static void ext_pwm_disable_backlight(const struct drm_connector_state *old_conn_state, u32 level)
{
struct intel_connector *connector = to_intel_connector(old_conn_state->connector);
struct intel_panel *panel = &connector->panel;
@@ -870,13 +902,13 @@ void intel_panel_disable_backlight(const struct drm_connector_state *old_conn_st
if (panel->backlight.device)
panel->backlight.device->props.power = FB_BLANK_POWERDOWN;
panel->backlight.enabled = false;
- panel->backlight.disable(old_conn_state);
+ panel->backlight.funcs->disable(old_conn_state, 0);
mutex_unlock(&dev_priv->backlight_lock);
}
static void lpt_enable_backlight(const struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state)
+ const struct drm_connector_state *conn_state, u32 level)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
@@ -906,7 +938,7 @@ static void lpt_enable_backlight(const struct intel_crtc_state *crtc_state,
intel_de_write(dev_priv, SOUTH_CHICKEN1, schicken);
}
- pch_ctl2 = panel->backlight.max << 16;
+ pch_ctl2 = panel->backlight.pwm_level_max << 16;
intel_de_write(dev_priv, BLC_PWM_PCH_CTL2, pch_ctl2);
pch_ctl1 = 0;
@@ -923,11 +955,11 @@ static void lpt_enable_backlight(const struct intel_crtc_state *crtc_state,
pch_ctl1 | BLM_PCH_PWM_ENABLE);
/* This won't stick until the above enable. */
- intel_panel_actually_set_backlight(conn_state, panel->backlight.level);
+ intel_panel_set_pwm_level(conn_state, level);
}
static void pch_enable_backlight(const struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state)
+ const struct drm_connector_state *conn_state, u32 level)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
@@ -958,9 +990,9 @@ static void pch_enable_backlight(const struct intel_crtc_state *crtc_state,
intel_de_write(dev_priv, BLC_PWM_CPU_CTL2, cpu_ctl2 | BLM_PWM_ENABLE);
/* This won't stick until the above enable. */
- intel_panel_actually_set_backlight(conn_state, panel->backlight.level);
+ intel_panel_set_pwm_level(conn_state, level);
- pch_ctl2 = panel->backlight.max << 16;
+ pch_ctl2 = panel->backlight.pwm_level_max << 16;
intel_de_write(dev_priv, BLC_PWM_PCH_CTL2, pch_ctl2);
pch_ctl1 = 0;
@@ -974,7 +1006,7 @@ static void pch_enable_backlight(const struct intel_crtc_state *crtc_state,
}
static void i9xx_enable_backlight(const struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state)
+ const struct drm_connector_state *conn_state, u32 level)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
@@ -987,7 +1019,7 @@ static void i9xx_enable_backlight(const struct intel_crtc_state *crtc_state,
intel_de_write(dev_priv, BLC_PWM_CTL, 0);
}
- freq = panel->backlight.max;
+ freq = panel->backlight.pwm_level_max;
if (panel->backlight.combination_mode)
freq /= 0xff;
@@ -1001,7 +1033,7 @@ static void i9xx_enable_backlight(const struct intel_crtc_state *crtc_state,
intel_de_posting_read(dev_priv, BLC_PWM_CTL);
/* XXX: combine this into above write? */
- intel_panel_actually_set_backlight(conn_state, panel->backlight.level);
+ intel_panel_set_pwm_level(conn_state, level);
/*
* Needed to enable backlight on some 855gm models. BLC_HIST_CTL is
@@ -1013,7 +1045,7 @@ static void i9xx_enable_backlight(const struct intel_crtc_state *crtc_state,
}
static void i965_enable_backlight(const struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state)
+ const struct drm_connector_state *conn_state, u32 level)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
@@ -1028,7 +1060,7 @@ static void i965_enable_backlight(const struct intel_crtc_state *crtc_state,
intel_de_write(dev_priv, BLC_PWM_CTL2, ctl2);
}
- freq = panel->backlight.max;
+ freq = panel->backlight.pwm_level_max;
if (panel->backlight.combination_mode)
freq /= 0xff;
@@ -1044,11 +1076,11 @@ static void i965_enable_backlight(const struct intel_crtc_state *crtc_state,
intel_de_posting_read(dev_priv, BLC_PWM_CTL2);
intel_de_write(dev_priv, BLC_PWM_CTL2, ctl2 | BLM_PWM_ENABLE);
- intel_panel_actually_set_backlight(conn_state, panel->backlight.level);
+ intel_panel_set_pwm_level(conn_state, level);
}
static void vlv_enable_backlight(const struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state)
+ const struct drm_connector_state *conn_state, u32 level)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
@@ -1063,11 +1095,11 @@ static void vlv_enable_backlight(const struct intel_crtc_state *crtc_state,
intel_de_write(dev_priv, VLV_BLC_PWM_CTL2(pipe), ctl2);
}
- ctl = panel->backlight.max << 16;
+ ctl = panel->backlight.pwm_level_max << 16;
intel_de_write(dev_priv, VLV_BLC_PWM_CTL(pipe), ctl);
/* XXX: combine this into above write? */
- intel_panel_actually_set_backlight(conn_state, panel->backlight.level);
+ intel_panel_set_pwm_level(conn_state, level);
ctl2 = 0;
if (panel->backlight.active_low_pwm)
@@ -1079,7 +1111,7 @@ static void vlv_enable_backlight(const struct intel_crtc_state *crtc_state,
}
static void bxt_enable_backlight(const struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state)
+ const struct drm_connector_state *conn_state, u32 level)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
@@ -1116,9 +1148,9 @@ static void bxt_enable_backlight(const struct intel_crtc_state *crtc_state,
intel_de_write(dev_priv,
BXT_BLC_PWM_FREQ(panel->backlight.controller),
- panel->backlight.max);
+ panel->backlight.pwm_level_max);
- intel_panel_actually_set_backlight(conn_state, panel->backlight.level);
+ intel_panel_set_pwm_level(conn_state, level);
pwm_ctl = 0;
if (panel->backlight.active_low_pwm)
@@ -1133,7 +1165,7 @@ static void bxt_enable_backlight(const struct intel_crtc_state *crtc_state,
}
static void cnp_enable_backlight(const struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state)
+ const struct drm_connector_state *conn_state, u32 level)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
@@ -1152,9 +1184,9 @@ static void cnp_enable_backlight(const struct intel_crtc_state *crtc_state,
intel_de_write(dev_priv,
BXT_BLC_PWM_FREQ(panel->backlight.controller),
- panel->backlight.max);
+ panel->backlight.pwm_level_max);
- intel_panel_actually_set_backlight(conn_state, panel->backlight.level);
+ intel_panel_set_pwm_level(conn_state, level);
pwm_ctl = 0;
if (panel->backlight.active_low_pwm)
@@ -1168,14 +1200,12 @@ static void cnp_enable_backlight(const struct intel_crtc_state *crtc_state,
pwm_ctl | BXT_BLC_PWM_ENABLE);
}
-static void pwm_enable_backlight(const struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state)
+static void ext_pwm_enable_backlight(const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state, u32 level)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
struct intel_panel *panel = &connector->panel;
- int level = panel->backlight.level;
- level = intel_panel_compute_brightness(connector, level);
pwm_set_relative_duty_cycle(&panel->backlight.pwm_state, level, 100);
panel->backlight.pwm_state.enabled = true;
pwm_apply_state(panel->backlight.pwm, &panel->backlight.pwm_state);
@@ -1198,7 +1228,7 @@ static void __intel_panel_enable_backlight(const struct intel_crtc_state *crtc_s
panel->backlight.device->props.max_brightness);
}
- panel->backlight.enable(crtc_state, conn_state);
+ panel->backlight.funcs->enable(crtc_state, conn_state, panel->backlight.level);
panel->backlight.enabled = true;
if (panel->backlight.device)
panel->backlight.device->props.power = FB_BLANK_UNBLANK;
@@ -1233,10 +1263,8 @@ static u32 intel_panel_get_backlight(struct intel_connector *connector)
mutex_lock(&dev_priv->backlight_lock);
- if (panel->backlight.enabled) {
- val = panel->backlight.get(connector);
- val = intel_panel_compute_brightness(connector, val);
- }
+ if (panel->backlight.enabled)
+ val = panel->backlight.funcs->get(connector, intel_connector_get_pipe(connector));
mutex_unlock(&dev_priv->backlight_lock);
@@ -1567,13 +1595,13 @@ static u32 get_backlight_max_vbt(struct intel_connector *connector)
u16 pwm_freq_hz = get_vbt_pwm_freq(dev_priv);
u32 pwm;
- if (!panel->backlight.hz_to_pwm) {
+ if (!panel->backlight.pwm_funcs->hz_to_pwm) {
drm_dbg_kms(&dev_priv->drm,
"backlight frequency conversion not supported\n");
return 0;
}
- pwm = panel->backlight.hz_to_pwm(connector, pwm_freq_hz);
+ pwm = panel->backlight.pwm_funcs->hz_to_pwm(connector, pwm_freq_hz);
if (!pwm) {
drm_dbg_kms(&dev_priv->drm,
"backlight frequency conversion failed\n");
@@ -1592,7 +1620,7 @@ static u32 get_backlight_min_vbt(struct intel_connector *connector)
struct intel_panel *panel = &connector->panel;
int min;
- drm_WARN_ON(&dev_priv->drm, panel->backlight.max == 0);
+ drm_WARN_ON(&dev_priv->drm, panel->backlight.pwm_level_max == 0);
/*
* XXX: If the vbt value is 255, it makes min equal to max, which leads
@@ -1609,7 +1637,7 @@ static u32 get_backlight_min_vbt(struct intel_connector *connector)
}
/* vbt value is a coefficient in range [0..255] */
- return scale(min, 0, 255, 0, panel->backlight.max);
+ return scale(min, 0, 255, 0, panel->backlight.pwm_level_max);
}
static int lpt_setup_backlight(struct intel_connector *connector, enum pipe unused)
@@ -1629,37 +1657,32 @@ static int lpt_setup_backlight(struct intel_connector *connector, enum pipe unus
panel->backlight.active_low_pwm = pch_ctl1 & BLM_PCH_POLARITY;
pch_ctl2 = intel_de_read(dev_priv, BLC_PWM_PCH_CTL2);
- panel->backlight.max = pch_ctl2 >> 16;
+ panel->backlight.pwm_level_max = pch_ctl2 >> 16;
cpu_ctl2 = intel_de_read(dev_priv, BLC_PWM_CPU_CTL2);
- if (!panel->backlight.max)
- panel->backlight.max = get_backlight_max_vbt(connector);
+ if (!panel->backlight.pwm_level_max)
+ panel->backlight.pwm_level_max = get_backlight_max_vbt(connector);
- if (!panel->backlight.max)
+ if (!panel->backlight.pwm_level_max)
return -ENODEV;
- panel->backlight.min = get_backlight_min_vbt(connector);
+ panel->backlight.pwm_level_min = get_backlight_min_vbt(connector);
- panel->backlight.enabled = pch_ctl1 & BLM_PCH_PWM_ENABLE;
+ panel->backlight.pwm_enabled = pch_ctl1 & BLM_PCH_PWM_ENABLE;
- cpu_mode = panel->backlight.enabled && HAS_PCH_LPT(dev_priv) &&
+ cpu_mode = panel->backlight.pwm_enabled && HAS_PCH_LPT(dev_priv) &&
!(pch_ctl1 & BLM_PCH_OVERRIDE_ENABLE) &&
(cpu_ctl2 & BLM_PWM_ENABLE);
- if (cpu_mode)
- val = pch_get_backlight(connector);
- else
- val = lpt_get_backlight(connector);
- val = intel_panel_compute_brightness(connector, val);
- panel->backlight.level = clamp(val, panel->backlight.min,
- panel->backlight.max);
if (cpu_mode) {
+ val = pch_get_backlight(connector, unused);
+
drm_dbg_kms(&dev_priv->drm,
"CPU backlight register was enabled, switching to PCH override\n");
/* Write converted CPU PWM value to PCH override register */
- lpt_set_backlight(connector->base.state, panel->backlight.level);
+ lpt_set_backlight(connector->base.state, val);
intel_de_write(dev_priv, BLC_PWM_PCH_CTL1,
pch_ctl1 | BLM_PCH_OVERRIDE_ENABLE);
@@ -1674,29 +1697,24 @@ static int pch_setup_backlight(struct intel_connector *connector, enum pipe unus
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
- u32 cpu_ctl2, pch_ctl1, pch_ctl2, val;
+ u32 cpu_ctl2, pch_ctl1, pch_ctl2;
pch_ctl1 = intel_de_read(dev_priv, BLC_PWM_PCH_CTL1);
panel->backlight.active_low_pwm = pch_ctl1 & BLM_PCH_POLARITY;
pch_ctl2 = intel_de_read(dev_priv, BLC_PWM_PCH_CTL2);
- panel->backlight.max = pch_ctl2 >> 16;
+ panel->backlight.pwm_level_max = pch_ctl2 >> 16;
- if (!panel->backlight.max)
- panel->backlight.max = get_backlight_max_vbt(connector);
+ if (!panel->backlight.pwm_level_max)
+ panel->backlight.pwm_level_max = get_backlight_max_vbt(connector);
- if (!panel->backlight.max)
+ if (!panel->backlight.pwm_level_max)
return -ENODEV;
- panel->backlight.min = get_backlight_min_vbt(connector);
-
- val = pch_get_backlight(connector);
- val = intel_panel_compute_brightness(connector, val);
- panel->backlight.level = clamp(val, panel->backlight.min,
- panel->backlight.max);
+ panel->backlight.pwm_level_min = get_backlight_min_vbt(connector);
cpu_ctl2 = intel_de_read(dev_priv, BLC_PWM_CPU_CTL2);
- panel->backlight.enabled = (cpu_ctl2 & BLM_PWM_ENABLE) &&
+ panel->backlight.pwm_enabled = (cpu_ctl2 & BLM_PWM_ENABLE) &&
(pch_ctl1 & BLM_PCH_PWM_ENABLE);
return 0;
@@ -1716,27 +1734,26 @@ static int i9xx_setup_backlight(struct intel_connector *connector, enum pipe unu
if (IS_PINEVIEW(dev_priv))
panel->backlight.active_low_pwm = ctl & BLM_POLARITY_PNV;
- panel->backlight.max = ctl >> 17;
+ panel->backlight.pwm_level_max = ctl >> 17;
- if (!panel->backlight.max) {
- panel->backlight.max = get_backlight_max_vbt(connector);
- panel->backlight.max >>= 1;
+ if (!panel->backlight.pwm_level_max) {
+ panel->backlight.pwm_level_max = get_backlight_max_vbt(connector);
+ panel->backlight.pwm_level_max >>= 1;
}
- if (!panel->backlight.max)
+ if (!panel->backlight.pwm_level_max)
return -ENODEV;
if (panel->backlight.combination_mode)
- panel->backlight.max *= 0xff;
+ panel->backlight.pwm_level_max *= 0xff;
- panel->backlight.min = get_backlight_min_vbt(connector);
+ panel->backlight.pwm_level_min = get_backlight_min_vbt(connector);
- val = i9xx_get_backlight(connector);
- val = intel_panel_compute_brightness(connector, val);
- panel->backlight.level = clamp(val, panel->backlight.min,
- panel->backlight.max);
+ val = i9xx_get_backlight(connector, unused);
+ val = intel_panel_invert_pwm_level(connector, val);
+ val = clamp(val, panel->backlight.pwm_level_min, panel->backlight.pwm_level_max);
- panel->backlight.enabled = val != 0;
+ panel->backlight.pwm_enabled = val != 0;
return 0;
}
@@ -1745,32 +1762,27 @@ static int i965_setup_backlight(struct intel_connector *connector, enum pipe unu
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
- u32 ctl, ctl2, val;
+ u32 ctl, ctl2;
ctl2 = intel_de_read(dev_priv, BLC_PWM_CTL2);
panel->backlight.combination_mode = ctl2 & BLM_COMBINATION_MODE;
panel->backlight.active_low_pwm = ctl2 & BLM_POLARITY_I965;
ctl = intel_de_read(dev_priv, BLC_PWM_CTL);
- panel->backlight.max = ctl >> 16;
+ panel->backlight.pwm_level_max = ctl >> 16;
- if (!panel->backlight.max)
- panel->backlight.max = get_backlight_max_vbt(connector);
+ if (!panel->backlight.pwm_level_max)
+ panel->backlight.pwm_level_max = get_backlight_max_vbt(connector);
- if (!panel->backlight.max)
+ if (!panel->backlight.pwm_level_max)
return -ENODEV;
if (panel->backlight.combination_mode)
- panel->backlight.max *= 0xff;
-
- panel->backlight.min = get_backlight_min_vbt(connector);
+ panel->backlight.pwm_level_max *= 0xff;
- val = i9xx_get_backlight(connector);
- val = intel_panel_compute_brightness(connector, val);
- panel->backlight.level = clamp(val, panel->backlight.min,
- panel->backlight.max);
+ panel->backlight.pwm_level_min = get_backlight_min_vbt(connector);
- panel->backlight.enabled = ctl2 & BLM_PWM_ENABLE;
+ panel->backlight.pwm_enabled = ctl2 & BLM_PWM_ENABLE;
return 0;
}
@@ -1779,7 +1791,7 @@ static int vlv_setup_backlight(struct intel_connector *connector, enum pipe pipe
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
- u32 ctl, ctl2, val;
+ u32 ctl, ctl2;
if (drm_WARN_ON(&dev_priv->drm, pipe != PIPE_A && pipe != PIPE_B))
return -ENODEV;
@@ -1788,22 +1800,17 @@ static int vlv_setup_backlight(struct intel_connector *connector, enum pipe pipe
panel->backlight.active_low_pwm = ctl2 & BLM_POLARITY_I965;
ctl = intel_de_read(dev_priv, VLV_BLC_PWM_CTL(pipe));
- panel->backlight.max = ctl >> 16;
+ panel->backlight.pwm_level_max = ctl >> 16;
- if (!panel->backlight.max)
- panel->backlight.max = get_backlight_max_vbt(connector);
+ if (!panel->backlight.pwm_level_max)
+ panel->backlight.pwm_level_max = get_backlight_max_vbt(connector);
- if (!panel->backlight.max)
+ if (!panel->backlight.pwm_level_max)
return -ENODEV;
- panel->backlight.min = get_backlight_min_vbt(connector);
-
- val = _vlv_get_backlight(dev_priv, pipe);
- val = intel_panel_compute_brightness(connector, val);
- panel->backlight.level = clamp(val, panel->backlight.min,
- panel->backlight.max);
+ panel->backlight.pwm_level_min = get_backlight_min_vbt(connector);
- panel->backlight.enabled = ctl2 & BLM_PWM_ENABLE;
+ panel->backlight.pwm_enabled = ctl2 & BLM_PWM_ENABLE;
return 0;
}
@@ -1828,24 +1835,18 @@ bxt_setup_backlight(struct intel_connector *connector, enum pipe unused)
}
panel->backlight.active_low_pwm = pwm_ctl & BXT_BLC_PWM_POLARITY;
- panel->backlight.max =
- intel_de_read(dev_priv,
- BXT_BLC_PWM_FREQ(panel->backlight.controller));
+ panel->backlight.pwm_level_max =
+ intel_de_read(dev_priv, BXT_BLC_PWM_FREQ(panel->backlight.controller));
- if (!panel->backlight.max)
- panel->backlight.max = get_backlight_max_vbt(connector);
+ if (!panel->backlight.pwm_level_max)
+ panel->backlight.pwm_level_max = get_backlight_max_vbt(connector);
- if (!panel->backlight.max)
+ if (!panel->backlight.pwm_level_max)
return -ENODEV;
- panel->backlight.min = get_backlight_min_vbt(connector);
-
- val = bxt_get_backlight(connector);
- val = intel_panel_compute_brightness(connector, val);
- panel->backlight.level = clamp(val, panel->backlight.min,
- panel->backlight.max);
+ panel->backlight.pwm_level_min = get_backlight_min_vbt(connector);
- panel->backlight.enabled = pwm_ctl & BXT_BLC_PWM_ENABLE;
+ panel->backlight.pwm_enabled = pwm_ctl & BXT_BLC_PWM_ENABLE;
return 0;
}
@@ -1855,7 +1856,7 @@ cnp_setup_backlight(struct intel_connector *connector, enum pipe unused)
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
- u32 pwm_ctl, val;
+ u32 pwm_ctl;
/*
* CNP has the BXT implementation of backlight, but with only one
@@ -1868,30 +1869,24 @@ cnp_setup_backlight(struct intel_connector *connector, enum pipe unused)
BXT_BLC_PWM_CTL(panel->backlight.controller));
panel->backlight.active_low_pwm = pwm_ctl & BXT_BLC_PWM_POLARITY;
- panel->backlight.max =
- intel_de_read(dev_priv,
- BXT_BLC_PWM_FREQ(panel->backlight.controller));
+ panel->backlight.pwm_level_max =
+ intel_de_read(dev_priv, BXT_BLC_PWM_FREQ(panel->backlight.controller));
- if (!panel->backlight.max)
- panel->backlight.max = get_backlight_max_vbt(connector);
+ if (!panel->backlight.pwm_level_max)
+ panel->backlight.pwm_level_max = get_backlight_max_vbt(connector);
- if (!panel->backlight.max)
+ if (!panel->backlight.pwm_level_max)
return -ENODEV;
- panel->backlight.min = get_backlight_min_vbt(connector);
+ panel->backlight.pwm_level_min = get_backlight_min_vbt(connector);
- val = bxt_get_backlight(connector);
- val = intel_panel_compute_brightness(connector, val);
- panel->backlight.level = clamp(val, panel->backlight.min,
- panel->backlight.max);
-
- panel->backlight.enabled = pwm_ctl & BXT_BLC_PWM_ENABLE;
+ panel->backlight.pwm_enabled = pwm_ctl & BXT_BLC_PWM_ENABLE;
return 0;
}
-static int pwm_setup_backlight(struct intel_connector *connector,
- enum pipe pipe)
+static int ext_pwm_setup_backlight(struct intel_connector *connector,
+ enum pipe pipe)
{
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -1915,8 +1910,8 @@ static int pwm_setup_backlight(struct intel_connector *connector,
return -ENODEV;
}
- panel->backlight.max = 100; /* 100% */
- panel->backlight.min = get_backlight_min_vbt(connector);
+ panel->backlight.pwm_level_max = 100; /* 100% */
+ panel->backlight.pwm_level_min = get_backlight_min_vbt(connector);
if (pwm_is_enabled(panel->backlight.pwm)) {
/* PWM is already enabled, use existing settings */
@@ -1924,10 +1919,8 @@ static int pwm_setup_backlight(struct intel_connector *connector,
level = pwm_get_relative_duty_cycle(&panel->backlight.pwm_state,
100);
- level = intel_panel_compute_brightness(connector, level);
- panel->backlight.level = clamp(level, panel->backlight.min,
- panel->backlight.max);
- panel->backlight.enabled = true;
+ level = intel_panel_invert_pwm_level(connector, level);
+ panel->backlight.pwm_enabled = true;
drm_dbg_kms(&dev_priv->drm, "PWM already enabled at freq %ld, VBT freq %d, level %d\n",
NSEC_PER_SEC / (unsigned long)panel->backlight.pwm_state.period,
@@ -1943,6 +1936,58 @@ static int pwm_setup_backlight(struct intel_connector *connector,
return 0;
}
+static void intel_pwm_set_backlight(const struct drm_connector_state *conn_state, u32 level)
+{
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ struct intel_panel *panel = &connector->panel;
+
+ panel->backlight.pwm_funcs->set(conn_state,
+ intel_panel_invert_pwm_level(connector, level));
+}
+
+static u32 intel_pwm_get_backlight(struct intel_connector *connector, enum pipe pipe)
+{
+ struct intel_panel *panel = &connector->panel;
+
+ return intel_panel_invert_pwm_level(connector,
+ panel->backlight.pwm_funcs->get(connector, pipe));
+}
+
+static void intel_pwm_enable_backlight(const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state, u32 level)
+{
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ struct intel_panel *panel = &connector->panel;
+
+ panel->backlight.pwm_funcs->enable(crtc_state, conn_state,
+ intel_panel_invert_pwm_level(connector, level));
+}
+
+static void intel_pwm_disable_backlight(const struct drm_connector_state *conn_state, u32 level)
+{
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ struct intel_panel *panel = &connector->panel;
+
+ panel->backlight.pwm_funcs->disable(conn_state,
+ intel_panel_invert_pwm_level(connector, level));
+}
+
+static int intel_pwm_setup_backlight(struct intel_connector *connector, enum pipe pipe)
+{
+ struct intel_panel *panel = &connector->panel;
+ int ret = panel->backlight.pwm_funcs->setup(connector, pipe);
+
+ if (ret < 0)
+ return ret;
+
+ panel->backlight.min = panel->backlight.pwm_level_min;
+ panel->backlight.max = panel->backlight.pwm_level_max;
+ panel->backlight.level = intel_pwm_get_backlight(connector, pipe);
+ panel->backlight.enabled = panel->backlight.pwm_enabled;
+
+ return 0;
+}
+
void intel_panel_update_backlight(struct intel_atomic_state *state,
struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
@@ -1981,12 +2026,12 @@ int intel_panel_setup_backlight(struct drm_connector *connector, enum pipe pipe)
}
/* ensure intel_panel has been initialized first */
- if (drm_WARN_ON(&dev_priv->drm, !panel->backlight.setup))
+ if (drm_WARN_ON(&dev_priv->drm, !panel->backlight.funcs))
return -ENODEV;
/* set level and max in panel struct */
mutex_lock(&dev_priv->backlight_lock);
- ret = panel->backlight.setup(intel_connector, pipe);
+ ret = panel->backlight.funcs->setup(intel_connector, pipe);
mutex_unlock(&dev_priv->backlight_lock);
if (ret) {
@@ -2016,6 +2061,94 @@ static void intel_panel_destroy_backlight(struct intel_panel *panel)
panel->backlight.present = false;
}
+static const struct intel_panel_bl_funcs bxt_pwm_funcs = {
+ .setup = bxt_setup_backlight,
+ .enable = bxt_enable_backlight,
+ .disable = bxt_disable_backlight,
+ .set = bxt_set_backlight,
+ .get = bxt_get_backlight,
+ .hz_to_pwm = bxt_hz_to_pwm,
+};
+
+static const struct intel_panel_bl_funcs cnp_pwm_funcs = {
+ .setup = cnp_setup_backlight,
+ .enable = cnp_enable_backlight,
+ .disable = cnp_disable_backlight,
+ .set = bxt_set_backlight,
+ .get = bxt_get_backlight,
+ .hz_to_pwm = cnp_hz_to_pwm,
+};
+
+static const struct intel_panel_bl_funcs lpt_pwm_funcs = {
+ .setup = lpt_setup_backlight,
+ .enable = lpt_enable_backlight,
+ .disable = lpt_disable_backlight,
+ .set = lpt_set_backlight,
+ .get = lpt_get_backlight,
+ .hz_to_pwm = lpt_hz_to_pwm,
+};
+
+static const struct intel_panel_bl_funcs spt_pwm_funcs = {
+ .setup = lpt_setup_backlight,
+ .enable = lpt_enable_backlight,
+ .disable = lpt_disable_backlight,
+ .set = lpt_set_backlight,
+ .get = lpt_get_backlight,
+ .hz_to_pwm = spt_hz_to_pwm,
+};
+
+static const struct intel_panel_bl_funcs pch_pwm_funcs = {
+ .setup = pch_setup_backlight,
+ .enable = pch_enable_backlight,
+ .disable = pch_disable_backlight,
+ .set = pch_set_backlight,
+ .get = pch_get_backlight,
+ .hz_to_pwm = pch_hz_to_pwm,
+};
+
+static const struct intel_panel_bl_funcs ext_pwm_funcs = {
+ .setup = ext_pwm_setup_backlight,
+ .enable = ext_pwm_enable_backlight,
+ .disable = ext_pwm_disable_backlight,
+ .set = ext_pwm_set_backlight,
+ .get = ext_pwm_get_backlight,
+};
+
+static const struct intel_panel_bl_funcs vlv_pwm_funcs = {
+ .setup = vlv_setup_backlight,
+ .enable = vlv_enable_backlight,
+ .disable = vlv_disable_backlight,
+ .set = vlv_set_backlight,
+ .get = vlv_get_backlight,
+ .hz_to_pwm = vlv_hz_to_pwm,
+};
+
+static const struct intel_panel_bl_funcs i965_pwm_funcs = {
+ .setup = i965_setup_backlight,
+ .enable = i965_enable_backlight,
+ .disable = i965_disable_backlight,
+ .set = i9xx_set_backlight,
+ .get = i9xx_get_backlight,
+ .hz_to_pwm = i965_hz_to_pwm,
+};
+
+static const struct intel_panel_bl_funcs i9xx_pwm_funcs = {
+ .setup = i9xx_setup_backlight,
+ .enable = i9xx_enable_backlight,
+ .disable = i9xx_disable_backlight,
+ .set = i9xx_set_backlight,
+ .get = i9xx_get_backlight,
+ .hz_to_pwm = i9xx_hz_to_pwm,
+};
+
+static const struct intel_panel_bl_funcs pwm_bl_funcs = {
+ .setup = intel_pwm_setup_backlight,
+ .enable = intel_pwm_enable_backlight,
+ .disable = intel_pwm_disable_backlight,
+ .set = intel_pwm_set_backlight,
+ .get = intel_pwm_get_backlight,
+};
+
/* Set up chip specific backlight functions */
static void
intel_panel_init_backlight_funcs(struct intel_panel *panel)
@@ -2024,75 +2157,39 @@ intel_panel_init_backlight_funcs(struct intel_panel *panel)
container_of(panel, struct intel_connector, panel);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- if (connector->base.connector_type == DRM_MODE_CONNECTOR_eDP &&
- intel_dp_aux_init_backlight_funcs(connector) == 0)
- return;
-
if (connector->base.connector_type == DRM_MODE_CONNECTOR_DSI &&
intel_dsi_dcs_init_backlight_funcs(connector) == 0)
return;
if (IS_GEN9_LP(dev_priv)) {
- panel->backlight.setup = bxt_setup_backlight;
- panel->backlight.enable = bxt_enable_backlight;
- panel->backlight.disable = bxt_disable_backlight;
- panel->backlight.set = bxt_set_backlight;
- panel->backlight.get = bxt_get_backlight;
- panel->backlight.hz_to_pwm = bxt_hz_to_pwm;
+ panel->backlight.pwm_funcs = &bxt_pwm_funcs;
} else if (INTEL_PCH_TYPE(dev_priv) >= PCH_CNP) {
- panel->backlight.setup = cnp_setup_backlight;
- panel->backlight.enable = cnp_enable_backlight;
- panel->backlight.disable = cnp_disable_backlight;
- panel->backlight.set = bxt_set_backlight;
- panel->backlight.get = bxt_get_backlight;
- panel->backlight.hz_to_pwm = cnp_hz_to_pwm;
+ panel->backlight.pwm_funcs = &cnp_pwm_funcs;
} else if (INTEL_PCH_TYPE(dev_priv) >= PCH_LPT) {
- panel->backlight.setup = lpt_setup_backlight;
- panel->backlight.enable = lpt_enable_backlight;
- panel->backlight.disable = lpt_disable_backlight;
- panel->backlight.set = lpt_set_backlight;
- panel->backlight.get = lpt_get_backlight;
if (HAS_PCH_LPT(dev_priv))
- panel->backlight.hz_to_pwm = lpt_hz_to_pwm;
+ panel->backlight.pwm_funcs = &lpt_pwm_funcs;
else
- panel->backlight.hz_to_pwm = spt_hz_to_pwm;
+ panel->backlight.pwm_funcs = &spt_pwm_funcs;
} else if (HAS_PCH_SPLIT(dev_priv)) {
- panel->backlight.setup = pch_setup_backlight;
- panel->backlight.enable = pch_enable_backlight;
- panel->backlight.disable = pch_disable_backlight;
- panel->backlight.set = pch_set_backlight;
- panel->backlight.get = pch_get_backlight;
- panel->backlight.hz_to_pwm = pch_hz_to_pwm;
+ panel->backlight.pwm_funcs = &pch_pwm_funcs;
} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
if (connector->base.connector_type == DRM_MODE_CONNECTOR_DSI) {
- panel->backlight.setup = pwm_setup_backlight;
- panel->backlight.enable = pwm_enable_backlight;
- panel->backlight.disable = pwm_disable_backlight;
- panel->backlight.set = pwm_set_backlight;
- panel->backlight.get = pwm_get_backlight;
+ panel->backlight.pwm_funcs = &ext_pwm_funcs;
} else {
- panel->backlight.setup = vlv_setup_backlight;
- panel->backlight.enable = vlv_enable_backlight;
- panel->backlight.disable = vlv_disable_backlight;
- panel->backlight.set = vlv_set_backlight;
- panel->backlight.get = vlv_get_backlight;
- panel->backlight.hz_to_pwm = vlv_hz_to_pwm;
+ panel->backlight.pwm_funcs = &vlv_pwm_funcs;
}
} else if (IS_GEN(dev_priv, 4)) {
- panel->backlight.setup = i965_setup_backlight;
- panel->backlight.enable = i965_enable_backlight;
- panel->backlight.disable = i965_disable_backlight;
- panel->backlight.set = i9xx_set_backlight;
- panel->backlight.get = i9xx_get_backlight;
- panel->backlight.hz_to_pwm = i965_hz_to_pwm;
+ panel->backlight.pwm_funcs = &i965_pwm_funcs;
} else {
- panel->backlight.setup = i9xx_setup_backlight;
- panel->backlight.enable = i9xx_enable_backlight;
- panel->backlight.disable = i9xx_disable_backlight;
- panel->backlight.set = i9xx_set_backlight;
- panel->backlight.get = i9xx_get_backlight;
- panel->backlight.hz_to_pwm = i9xx_hz_to_pwm;
+ panel->backlight.pwm_funcs = &i9xx_pwm_funcs;
}
+
+ if (connector->base.connector_type == DRM_MODE_CONNECTOR_eDP &&
+ intel_dp_aux_init_backlight_funcs(connector) == 0)
+ return;
+
+ /* We're using a standard PWM backlight interface */
+ panel->backlight.funcs = &pwm_bl_funcs;
}
enum drm_connector_status
diff --git a/drivers/gpu/drm/i915/display/intel_panel.h b/drivers/gpu/drm/i915/display/intel_panel.h
index 5b813fe90557..1d340f77bffc 100644
--- a/drivers/gpu/drm/i915/display/intel_panel.h
+++ b/drivers/gpu/drm/i915/display/intel_panel.h
@@ -49,6 +49,10 @@ struct drm_display_mode *
intel_panel_edid_fixed_mode(struct intel_connector *connector);
struct drm_display_mode *
intel_panel_vbt_fixed_mode(struct intel_connector *connector);
+void intel_panel_set_pwm_level(const struct drm_connector_state *conn_state, u32 level);
+u32 intel_panel_invert_pwm_level(struct intel_connector *connector, u32 level);
+u32 intel_panel_backlight_level_to_pwm(struct intel_connector *connector, u32 level);
+u32 intel_panel_backlight_level_from_pwm(struct intel_connector *connector, u32 val);
#if IS_ENABLED(CONFIG_BACKLIGHT_CLASS_DEVICE)
int intel_backlight_device_register(struct intel_connector *connector);
diff --git a/drivers/gpu/drm/i915/display/intel_pps.c b/drivers/gpu/drm/i915/display/intel_pps.c
new file mode 100644
index 000000000000..c4867a8020a5
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_pps.c
@@ -0,0 +1,1406 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#include "i915_drv.h"
+#include "intel_display_types.h"
+#include "intel_dp.h"
+#include "intel_pps.h"
+
+static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
+ enum pipe pipe);
+
+static void pps_init_delays(struct intel_dp *intel_dp);
+static void pps_init_registers(struct intel_dp *intel_dp, bool force_disable_vdd);
+
+intel_wakeref_t intel_pps_lock(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ intel_wakeref_t wakeref;
+
+ /*
+ * See intel_pps_reset_all() why we need a power domain reference here.
+ */
+ wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_DISPLAY_CORE);
+ mutex_lock(&dev_priv->pps_mutex);
+
+ return wakeref;
+}
+
+intel_wakeref_t intel_pps_unlock(struct intel_dp *intel_dp,
+ intel_wakeref_t wakeref)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
+ mutex_unlock(&dev_priv->pps_mutex);
+ intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
+
+ return 0;
+}
+
+static void
+vlv_power_sequencer_kick(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ enum pipe pipe = intel_dp->pps.pps_pipe;
+ bool pll_enabled, release_cl_override = false;
+ enum dpio_phy phy = DPIO_PHY(pipe);
+ enum dpio_channel ch = vlv_pipe_to_channel(pipe);
+ u32 DP;
+
+ if (drm_WARN(&dev_priv->drm,
+ intel_de_read(dev_priv, intel_dp->output_reg) & DP_PORT_EN,
+ "skipping pipe %c power sequencer kick due to [ENCODER:%d:%s] being active\n",
+ pipe_name(pipe), dig_port->base.base.base.id,
+ dig_port->base.base.name))
+ return;
+
+ drm_dbg_kms(&dev_priv->drm,
+ "kicking pipe %c power sequencer for [ENCODER:%d:%s]\n",
+ pipe_name(pipe), dig_port->base.base.base.id,
+ dig_port->base.base.name);
+
+ /* Preserve the BIOS-computed detected bit. This is
+ * supposed to be read-only.
+ */
+ DP = intel_de_read(dev_priv, intel_dp->output_reg) & DP_DETECTED;
+ DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
+ DP |= DP_PORT_WIDTH(1);
+ DP |= DP_LINK_TRAIN_PAT_1;
+
+ if (IS_CHERRYVIEW(dev_priv))
+ DP |= DP_PIPE_SEL_CHV(pipe);
+ else
+ DP |= DP_PIPE_SEL(pipe);
+
+ pll_enabled = intel_de_read(dev_priv, DPLL(pipe)) & DPLL_VCO_ENABLE;
+
+ /*
+ * The DPLL for the pipe must be enabled for this to work.
+ * So enable temporarily it if it's not already enabled.
+ */
+ if (!pll_enabled) {
+ release_cl_override = IS_CHERRYVIEW(dev_priv) &&
+ !chv_phy_powergate_ch(dev_priv, phy, ch, true);
+
+ if (vlv_force_pll_on(dev_priv, pipe, vlv_get_dpll(dev_priv))) {
+ drm_err(&dev_priv->drm,
+ "Failed to force on pll for pipe %c!\n",
+ pipe_name(pipe));
+ return;
+ }
+ }
+
+ /*
+ * Similar magic as in intel_dp_enable_port().
+ * We _must_ do this port enable + disable trick
+ * to make this power sequencer lock onto the port.
+ * Otherwise even VDD force bit won't work.
+ */
+ intel_de_write(dev_priv, intel_dp->output_reg, DP);
+ intel_de_posting_read(dev_priv, intel_dp->output_reg);
+
+ intel_de_write(dev_priv, intel_dp->output_reg, DP | DP_PORT_EN);
+ intel_de_posting_read(dev_priv, intel_dp->output_reg);
+
+ intel_de_write(dev_priv, intel_dp->output_reg, DP & ~DP_PORT_EN);
+ intel_de_posting_read(dev_priv, intel_dp->output_reg);
+
+ if (!pll_enabled) {
+ vlv_force_pll_off(dev_priv, pipe);
+
+ if (release_cl_override)
+ chv_phy_powergate_ch(dev_priv, phy, ch, false);
+ }
+}
+
+static enum pipe vlv_find_free_pps(struct drm_i915_private *dev_priv)
+{
+ struct intel_encoder *encoder;
+ unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
+
+ /*
+ * We don't have power sequencer currently.
+ * Pick one that's not used by other ports.
+ */
+ for_each_intel_dp(&dev_priv->drm, encoder) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ if (encoder->type == INTEL_OUTPUT_EDP) {
+ drm_WARN_ON(&dev_priv->drm,
+ intel_dp->pps.active_pipe != INVALID_PIPE &&
+ intel_dp->pps.active_pipe !=
+ intel_dp->pps.pps_pipe);
+
+ if (intel_dp->pps.pps_pipe != INVALID_PIPE)
+ pipes &= ~(1 << intel_dp->pps.pps_pipe);
+ } else {
+ drm_WARN_ON(&dev_priv->drm,
+ intel_dp->pps.pps_pipe != INVALID_PIPE);
+
+ if (intel_dp->pps.active_pipe != INVALID_PIPE)
+ pipes &= ~(1 << intel_dp->pps.active_pipe);
+ }
+ }
+
+ if (pipes == 0)
+ return INVALID_PIPE;
+
+ return ffs(pipes) - 1;
+}
+
+static enum pipe
+vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ enum pipe pipe;
+
+ lockdep_assert_held(&dev_priv->pps_mutex);
+
+ /* We should never land here with regular DP ports */
+ drm_WARN_ON(&dev_priv->drm, !intel_dp_is_edp(intel_dp));
+
+ drm_WARN_ON(&dev_priv->drm, intel_dp->pps.active_pipe != INVALID_PIPE &&
+ intel_dp->pps.active_pipe != intel_dp->pps.pps_pipe);
+
+ if (intel_dp->pps.pps_pipe != INVALID_PIPE)
+ return intel_dp->pps.pps_pipe;
+
+ pipe = vlv_find_free_pps(dev_priv);
+
+ /*
+ * Didn't find one. This should not happen since there
+ * are two power sequencers and up to two eDP ports.
+ */
+ if (drm_WARN_ON(&dev_priv->drm, pipe == INVALID_PIPE))
+ pipe = PIPE_A;
+
+ vlv_steal_power_sequencer(dev_priv, pipe);
+ intel_dp->pps.pps_pipe = pipe;
+
+ drm_dbg_kms(&dev_priv->drm,
+ "picked pipe %c power sequencer for [ENCODER:%d:%s]\n",
+ pipe_name(intel_dp->pps.pps_pipe),
+ dig_port->base.base.base.id,
+ dig_port->base.base.name);
+
+ /* init power sequencer on this pipe and port */
+ pps_init_delays(intel_dp);
+ pps_init_registers(intel_dp, true);
+
+ /*
+ * Even vdd force doesn't work until we've made
+ * the power sequencer lock in on the port.
+ */
+ vlv_power_sequencer_kick(intel_dp);
+
+ return intel_dp->pps.pps_pipe;
+}
+
+static int
+bxt_power_sequencer_idx(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ int backlight_controller = dev_priv->vbt.backlight.controller;
+
+ lockdep_assert_held(&dev_priv->pps_mutex);
+
+ /* We should never land here with regular DP ports */
+ drm_WARN_ON(&dev_priv->drm, !intel_dp_is_edp(intel_dp));
+
+ if (!intel_dp->pps.pps_reset)
+ return backlight_controller;
+
+ intel_dp->pps.pps_reset = false;
+
+ /*
+ * Only the HW needs to be reprogrammed, the SW state is fixed and
+ * has been setup during connector init.
+ */
+ pps_init_registers(intel_dp, false);
+
+ return backlight_controller;
+}
+
+typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
+ enum pipe pipe);
+
+static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
+{
+ return intel_de_read(dev_priv, PP_STATUS(pipe)) & PP_ON;
+}
+
+static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
+{
+ return intel_de_read(dev_priv, PP_CONTROL(pipe)) & EDP_FORCE_VDD;
+}
+
+static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
+{
+ return true;
+}
+
+static enum pipe
+vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
+ enum port port,
+ vlv_pipe_check pipe_check)
+{
+ enum pipe pipe;
+
+ for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
+ u32 port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(pipe)) &
+ PANEL_PORT_SELECT_MASK;
+
+ if (port_sel != PANEL_PORT_SELECT_VLV(port))
+ continue;
+
+ if (!pipe_check(dev_priv, pipe))
+ continue;
+
+ return pipe;
+ }
+
+ return INVALID_PIPE;
+}
+
+static void
+vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ enum port port = dig_port->base.port;
+
+ lockdep_assert_held(&dev_priv->pps_mutex);
+
+ /* try to find a pipe with this port selected */
+ /* first pick one where the panel is on */
+ intel_dp->pps.pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
+ vlv_pipe_has_pp_on);
+ /* didn't find one? pick one where vdd is on */
+ if (intel_dp->pps.pps_pipe == INVALID_PIPE)
+ intel_dp->pps.pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
+ vlv_pipe_has_vdd_on);
+ /* didn't find one? pick one with just the correct port */
+ if (intel_dp->pps.pps_pipe == INVALID_PIPE)
+ intel_dp->pps.pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
+ vlv_pipe_any);
+
+ /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
+ if (intel_dp->pps.pps_pipe == INVALID_PIPE) {
+ drm_dbg_kms(&dev_priv->drm,
+ "no initial power sequencer for [ENCODER:%d:%s]\n",
+ dig_port->base.base.base.id,
+ dig_port->base.base.name);
+ return;
+ }
+
+ drm_dbg_kms(&dev_priv->drm,
+ "initial power sequencer for [ENCODER:%d:%s]: pipe %c\n",
+ dig_port->base.base.base.id,
+ dig_port->base.base.name,
+ pipe_name(intel_dp->pps.pps_pipe));
+}
+
+void intel_pps_reset_all(struct drm_i915_private *dev_priv)
+{
+ struct intel_encoder *encoder;
+
+ if (drm_WARN_ON(&dev_priv->drm,
+ !(IS_VALLEYVIEW(dev_priv) ||
+ IS_CHERRYVIEW(dev_priv) ||
+ IS_GEN9_LP(dev_priv))))
+ return;
+
+ /*
+ * We can't grab pps_mutex here due to deadlock with power_domain
+ * mutex when power_domain functions are called while holding pps_mutex.
+ * That also means that in order to use pps_pipe the code needs to
+ * hold both a power domain reference and pps_mutex, and the power domain
+ * reference get/put must be done while _not_ holding pps_mutex.
+ * pps_{lock,unlock}() do these steps in the correct order, so one
+ * should use them always.
+ */
+
+ for_each_intel_dp(&dev_priv->drm, encoder) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ drm_WARN_ON(&dev_priv->drm,
+ intel_dp->pps.active_pipe != INVALID_PIPE);
+
+ if (encoder->type != INTEL_OUTPUT_EDP)
+ continue;
+
+ if (IS_GEN9_LP(dev_priv))
+ intel_dp->pps.pps_reset = true;
+ else
+ intel_dp->pps.pps_pipe = INVALID_PIPE;
+ }
+}
+
+struct pps_registers {
+ i915_reg_t pp_ctrl;
+ i915_reg_t pp_stat;
+ i915_reg_t pp_on;
+ i915_reg_t pp_off;
+ i915_reg_t pp_div;
+};
+
+static void intel_pps_get_registers(struct intel_dp *intel_dp,
+ struct pps_registers *regs)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ int pps_idx = 0;
+
+ memset(regs, 0, sizeof(*regs));
+
+ if (IS_GEN9_LP(dev_priv))
+ pps_idx = bxt_power_sequencer_idx(intel_dp);
+ else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ pps_idx = vlv_power_sequencer_pipe(intel_dp);
+
+ regs->pp_ctrl = PP_CONTROL(pps_idx);
+ regs->pp_stat = PP_STATUS(pps_idx);
+ regs->pp_on = PP_ON_DELAYS(pps_idx);
+ regs->pp_off = PP_OFF_DELAYS(pps_idx);
+
+ /* Cycle delay moved from PP_DIVISOR to PP_CONTROL */
+ if (IS_GEN9_LP(dev_priv) || INTEL_PCH_TYPE(dev_priv) >= PCH_CNP)
+ regs->pp_div = INVALID_MMIO_REG;
+ else
+ regs->pp_div = PP_DIVISOR(pps_idx);
+}
+
+static i915_reg_t
+_pp_ctrl_reg(struct intel_dp *intel_dp)
+{
+ struct pps_registers regs;
+
+ intel_pps_get_registers(intel_dp, &regs);
+
+ return regs.pp_ctrl;
+}
+
+static i915_reg_t
+_pp_stat_reg(struct intel_dp *intel_dp)
+{
+ struct pps_registers regs;
+
+ intel_pps_get_registers(intel_dp, &regs);
+
+ return regs.pp_stat;
+}
+
+static bool edp_have_panel_power(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
+ lockdep_assert_held(&dev_priv->pps_mutex);
+
+ if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
+ intel_dp->pps.pps_pipe == INVALID_PIPE)
+ return false;
+
+ return (intel_de_read(dev_priv, _pp_stat_reg(intel_dp)) & PP_ON) != 0;
+}
+
+static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
+ lockdep_assert_held(&dev_priv->pps_mutex);
+
+ if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
+ intel_dp->pps.pps_pipe == INVALID_PIPE)
+ return false;
+
+ return intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
+}
+
+void intel_pps_check_power_unlocked(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
+ if (!intel_dp_is_edp(intel_dp))
+ return;
+
+ if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
+ drm_WARN(&dev_priv->drm, 1,
+ "eDP powered off while attempting aux channel communication.\n");
+ drm_dbg_kms(&dev_priv->drm, "Status 0x%08x Control 0x%08x\n",
+ intel_de_read(dev_priv, _pp_stat_reg(intel_dp)),
+ intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp)));
+ }
+}
+
+#define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
+#define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
+
+#define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0)
+#define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0)
+
+#define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
+#define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
+
+static void intel_pps_verify_state(struct intel_dp *intel_dp);
+
+static void wait_panel_status(struct intel_dp *intel_dp,
+ u32 mask,
+ u32 value)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ i915_reg_t pp_stat_reg, pp_ctrl_reg;
+
+ lockdep_assert_held(&dev_priv->pps_mutex);
+
+ intel_pps_verify_state(intel_dp);
+
+ pp_stat_reg = _pp_stat_reg(intel_dp);
+ pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
+
+ drm_dbg_kms(&dev_priv->drm,
+ "mask %08x value %08x status %08x control %08x\n",
+ mask, value,
+ intel_de_read(dev_priv, pp_stat_reg),
+ intel_de_read(dev_priv, pp_ctrl_reg));
+
+ if (intel_de_wait_for_register(dev_priv, pp_stat_reg,
+ mask, value, 5000))
+ drm_err(&dev_priv->drm,
+ "Panel status timeout: status %08x control %08x\n",
+ intel_de_read(dev_priv, pp_stat_reg),
+ intel_de_read(dev_priv, pp_ctrl_reg));
+
+ drm_dbg_kms(&dev_priv->drm, "Wait complete\n");
+}
+
+static void wait_panel_on(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+
+ drm_dbg_kms(&i915->drm, "Wait for panel power on\n");
+ wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
+}
+
+static void wait_panel_off(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+
+ drm_dbg_kms(&i915->drm, "Wait for panel power off time\n");
+ wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
+}
+
+static void wait_panel_power_cycle(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ ktime_t panel_power_on_time;
+ s64 panel_power_off_duration;
+
+ drm_dbg_kms(&i915->drm, "Wait for panel power cycle\n");
+
+ /* take the difference of currrent time and panel power off time
+ * and then make panel wait for t11_t12 if needed. */
+ panel_power_on_time = ktime_get_boottime();
+ panel_power_off_duration = ktime_ms_delta(panel_power_on_time, intel_dp->pps.panel_power_off_time);
+
+ /* When we disable the VDD override bit last we have to do the manual
+ * wait. */
+ if (panel_power_off_duration < (s64)intel_dp->pps.panel_power_cycle_delay)
+ wait_remaining_ms_from_jiffies(jiffies,
+ intel_dp->pps.panel_power_cycle_delay - panel_power_off_duration);
+
+ wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
+}
+
+void intel_pps_wait_power_cycle(struct intel_dp *intel_dp)
+{
+ intel_wakeref_t wakeref;
+
+ if (!intel_dp_is_edp(intel_dp))
+ return;
+
+ with_intel_pps_lock(intel_dp, wakeref)
+ wait_panel_power_cycle(intel_dp);
+}
+
+static void wait_backlight_on(struct intel_dp *intel_dp)
+{
+ wait_remaining_ms_from_jiffies(intel_dp->pps.last_power_on,
+ intel_dp->pps.backlight_on_delay);
+}
+
+static void edp_wait_backlight_off(struct intel_dp *intel_dp)
+{
+ wait_remaining_ms_from_jiffies(intel_dp->pps.last_backlight_off,
+ intel_dp->pps.backlight_off_delay);
+}
+
+/* Read the current pp_control value, unlocking the register if it
+ * is locked
+ */
+
+static u32 ilk_get_pp_control(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ u32 control;
+
+ lockdep_assert_held(&dev_priv->pps_mutex);
+
+ control = intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp));
+ if (drm_WARN_ON(&dev_priv->drm, !HAS_DDI(dev_priv) &&
+ (control & PANEL_UNLOCK_MASK) != PANEL_UNLOCK_REGS)) {
+ control &= ~PANEL_UNLOCK_MASK;
+ control |= PANEL_UNLOCK_REGS;
+ }
+ return control;
+}
+
+/*
+ * Must be paired with intel_pps_vdd_off_unlocked().
+ * Must hold pps_mutex around the whole on/off sequence.
+ * Can be nested with intel_pps_vdd_{on,off}() calls.
+ */
+bool intel_pps_vdd_on_unlocked(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ u32 pp;
+ i915_reg_t pp_stat_reg, pp_ctrl_reg;
+ bool need_to_disable = !intel_dp->pps.want_panel_vdd;
+
+ lockdep_assert_held(&dev_priv->pps_mutex);
+
+ if (!intel_dp_is_edp(intel_dp))
+ return false;
+
+ cancel_delayed_work(&intel_dp->pps.panel_vdd_work);
+ intel_dp->pps.want_panel_vdd = true;
+
+ if (edp_have_panel_vdd(intel_dp))
+ return need_to_disable;
+
+ drm_WARN_ON(&dev_priv->drm, intel_dp->pps.vdd_wakeref);
+ intel_dp->pps.vdd_wakeref = intel_display_power_get(dev_priv,
+ intel_aux_power_domain(dig_port));
+
+ drm_dbg_kms(&dev_priv->drm, "Turning [ENCODER:%d:%s] VDD on\n",
+ dig_port->base.base.base.id,
+ dig_port->base.base.name);
+
+ if (!edp_have_panel_power(intel_dp))
+ wait_panel_power_cycle(intel_dp);
+
+ pp = ilk_get_pp_control(intel_dp);
+ pp |= EDP_FORCE_VDD;
+
+ pp_stat_reg = _pp_stat_reg(intel_dp);
+ pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
+
+ intel_de_write(dev_priv, pp_ctrl_reg, pp);
+ intel_de_posting_read(dev_priv, pp_ctrl_reg);
+ drm_dbg_kms(&dev_priv->drm, "PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
+ intel_de_read(dev_priv, pp_stat_reg),
+ intel_de_read(dev_priv, pp_ctrl_reg));
+ /*
+ * If the panel wasn't on, delay before accessing aux channel
+ */
+ if (!edp_have_panel_power(intel_dp)) {
+ drm_dbg_kms(&dev_priv->drm,
+ "[ENCODER:%d:%s] panel power wasn't enabled\n",
+ dig_port->base.base.base.id,
+ dig_port->base.base.name);
+ msleep(intel_dp->pps.panel_power_up_delay);
+ }
+
+ return need_to_disable;
+}
+
+/*
+ * Must be paired with intel_pps_off().
+ * Nested calls to these functions are not allowed since
+ * we drop the lock. Caller must use some higher level
+ * locking to prevent nested calls from other threads.
+ */
+void intel_pps_vdd_on(struct intel_dp *intel_dp)
+{
+ intel_wakeref_t wakeref;
+ bool vdd;
+
+ if (!intel_dp_is_edp(intel_dp))
+ return;
+
+ vdd = false;
+ with_intel_pps_lock(intel_dp, wakeref)
+ vdd = intel_pps_vdd_on_unlocked(intel_dp);
+ I915_STATE_WARN(!vdd, "[ENCODER:%d:%s] VDD already requested on\n",
+ dp_to_dig_port(intel_dp)->base.base.base.id,
+ dp_to_dig_port(intel_dp)->base.base.name);
+}
+
+static void intel_pps_vdd_off_sync_unlocked(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_digital_port *dig_port =
+ dp_to_dig_port(intel_dp);
+ u32 pp;
+ i915_reg_t pp_stat_reg, pp_ctrl_reg;
+
+ lockdep_assert_held(&dev_priv->pps_mutex);
+
+ drm_WARN_ON(&dev_priv->drm, intel_dp->pps.want_panel_vdd);
+
+ if (!edp_have_panel_vdd(intel_dp))
+ return;
+
+ drm_dbg_kms(&dev_priv->drm, "Turning [ENCODER:%d:%s] VDD off\n",
+ dig_port->base.base.base.id,
+ dig_port->base.base.name);
+
+ pp = ilk_get_pp_control(intel_dp);
+ pp &= ~EDP_FORCE_VDD;
+
+ pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
+ pp_stat_reg = _pp_stat_reg(intel_dp);
+
+ intel_de_write(dev_priv, pp_ctrl_reg, pp);
+ intel_de_posting_read(dev_priv, pp_ctrl_reg);
+
+ /* Make sure sequencer is idle before allowing subsequent activity */
+ drm_dbg_kms(&dev_priv->drm, "PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
+ intel_de_read(dev_priv, pp_stat_reg),
+ intel_de_read(dev_priv, pp_ctrl_reg));
+
+ if ((pp & PANEL_POWER_ON) == 0)
+ intel_dp->pps.panel_power_off_time = ktime_get_boottime();
+
+ intel_display_power_put(dev_priv,
+ intel_aux_power_domain(dig_port),
+ fetch_and_zero(&intel_dp->pps.vdd_wakeref));
+}
+
+void intel_pps_vdd_off_sync(struct intel_dp *intel_dp)
+{
+ intel_wakeref_t wakeref;
+
+ if (!intel_dp_is_edp(intel_dp))
+ return;
+
+ cancel_delayed_work_sync(&intel_dp->pps.panel_vdd_work);
+ /*
+ * vdd might still be enabled due to the delayed vdd off.
+ * Make sure vdd is actually turned off here.
+ */
+ with_intel_pps_lock(intel_dp, wakeref)
+ intel_pps_vdd_off_sync_unlocked(intel_dp);
+}
+
+static void edp_panel_vdd_work(struct work_struct *__work)
+{
+ struct intel_pps *pps = container_of(to_delayed_work(__work),
+ struct intel_pps, panel_vdd_work);
+ struct intel_dp *intel_dp = container_of(pps, struct intel_dp, pps);
+ intel_wakeref_t wakeref;
+
+ with_intel_pps_lock(intel_dp, wakeref) {
+ if (!intel_dp->pps.want_panel_vdd)
+ intel_pps_vdd_off_sync_unlocked(intel_dp);
+ }
+}
+
+static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
+{
+ unsigned long delay;
+
+ /*
+ * Queue the timer to fire a long time from now (relative to the power
+ * down delay) to keep the panel power up across a sequence of
+ * operations.
+ */
+ delay = msecs_to_jiffies(intel_dp->pps.panel_power_cycle_delay * 5);
+ schedule_delayed_work(&intel_dp->pps.panel_vdd_work, delay);
+}
+
+/*
+ * Must be paired with edp_panel_vdd_on().
+ * Must hold pps_mutex around the whole on/off sequence.
+ * Can be nested with intel_pps_vdd_{on,off}() calls.
+ */
+void intel_pps_vdd_off_unlocked(struct intel_dp *intel_dp, bool sync)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
+ lockdep_assert_held(&dev_priv->pps_mutex);
+
+ if (!intel_dp_is_edp(intel_dp))
+ return;
+
+ I915_STATE_WARN(!intel_dp->pps.want_panel_vdd, "[ENCODER:%d:%s] VDD not forced on",
+ dp_to_dig_port(intel_dp)->base.base.base.id,
+ dp_to_dig_port(intel_dp)->base.base.name);
+
+ intel_dp->pps.want_panel_vdd = false;
+
+ if (sync)
+ intel_pps_vdd_off_sync_unlocked(intel_dp);
+ else
+ edp_panel_vdd_schedule_off(intel_dp);
+}
+
+void intel_pps_on_unlocked(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ u32 pp;
+ i915_reg_t pp_ctrl_reg;
+
+ lockdep_assert_held(&dev_priv->pps_mutex);
+
+ if (!intel_dp_is_edp(intel_dp))
+ return;
+
+ drm_dbg_kms(&dev_priv->drm, "Turn [ENCODER:%d:%s] panel power on\n",
+ dp_to_dig_port(intel_dp)->base.base.base.id,
+ dp_to_dig_port(intel_dp)->base.base.name);
+
+ if (drm_WARN(&dev_priv->drm, edp_have_panel_power(intel_dp),
+ "[ENCODER:%d:%s] panel power already on\n",
+ dp_to_dig_port(intel_dp)->base.base.base.id,
+ dp_to_dig_port(intel_dp)->base.base.name))
+ return;
+
+ wait_panel_power_cycle(intel_dp);
+
+ pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
+ pp = ilk_get_pp_control(intel_dp);
+ if (IS_GEN(dev_priv, 5)) {
+ /* ILK workaround: disable reset around power sequence */
+ pp &= ~PANEL_POWER_RESET;
+ intel_de_write(dev_priv, pp_ctrl_reg, pp);
+ intel_de_posting_read(dev_priv, pp_ctrl_reg);
+ }
+
+ pp |= PANEL_POWER_ON;
+ if (!IS_GEN(dev_priv, 5))
+ pp |= PANEL_POWER_RESET;
+
+ intel_de_write(dev_priv, pp_ctrl_reg, pp);
+ intel_de_posting_read(dev_priv, pp_ctrl_reg);
+
+ wait_panel_on(intel_dp);
+ intel_dp->pps.last_power_on = jiffies;
+
+ if (IS_GEN(dev_priv, 5)) {
+ pp |= PANEL_POWER_RESET; /* restore panel reset bit */
+ intel_de_write(dev_priv, pp_ctrl_reg, pp);
+ intel_de_posting_read(dev_priv, pp_ctrl_reg);
+ }
+}
+
+void intel_pps_on(struct intel_dp *intel_dp)
+{
+ intel_wakeref_t wakeref;
+
+ if (!intel_dp_is_edp(intel_dp))
+ return;
+
+ with_intel_pps_lock(intel_dp, wakeref)
+ intel_pps_on_unlocked(intel_dp);
+}
+
+void intel_pps_off_unlocked(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ u32 pp;
+ i915_reg_t pp_ctrl_reg;
+
+ lockdep_assert_held(&dev_priv->pps_mutex);
+
+ if (!intel_dp_is_edp(intel_dp))
+ return;
+
+ drm_dbg_kms(&dev_priv->drm, "Turn [ENCODER:%d:%s] panel power off\n",
+ dig_port->base.base.base.id, dig_port->base.base.name);
+
+ drm_WARN(&dev_priv->drm, !intel_dp->pps.want_panel_vdd,
+ "Need [ENCODER:%d:%s] VDD to turn off panel\n",
+ dig_port->base.base.base.id, dig_port->base.base.name);
+
+ pp = ilk_get_pp_control(intel_dp);
+ /* We need to switch off panel power _and_ force vdd, for otherwise some
+ * panels get very unhappy and cease to work. */
+ pp &= ~(PANEL_POWER_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
+ EDP_BLC_ENABLE);
+
+ pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
+
+ intel_dp->pps.want_panel_vdd = false;
+
+ intel_de_write(dev_priv, pp_ctrl_reg, pp);
+ intel_de_posting_read(dev_priv, pp_ctrl_reg);
+
+ wait_panel_off(intel_dp);
+ intel_dp->pps.panel_power_off_time = ktime_get_boottime();
+
+ /* We got a reference when we enabled the VDD. */
+ intel_display_power_put(dev_priv,
+ intel_aux_power_domain(dig_port),
+ fetch_and_zero(&intel_dp->pps.vdd_wakeref));
+}
+
+void intel_pps_off(struct intel_dp *intel_dp)
+{
+ intel_wakeref_t wakeref;
+
+ if (!intel_dp_is_edp(intel_dp))
+ return;
+
+ with_intel_pps_lock(intel_dp, wakeref)
+ intel_pps_off_unlocked(intel_dp);
+}
+
+/* Enable backlight in the panel power control. */
+void intel_pps_backlight_on(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ intel_wakeref_t wakeref;
+
+ /*
+ * If we enable the backlight right away following a panel power
+ * on, we may see slight flicker as the panel syncs with the eDP
+ * link. So delay a bit to make sure the image is solid before
+ * allowing it to appear.
+ */
+ wait_backlight_on(intel_dp);
+
+ with_intel_pps_lock(intel_dp, wakeref) {
+ i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
+ u32 pp;
+
+ pp = ilk_get_pp_control(intel_dp);
+ pp |= EDP_BLC_ENABLE;
+
+ intel_de_write(dev_priv, pp_ctrl_reg, pp);
+ intel_de_posting_read(dev_priv, pp_ctrl_reg);
+ }
+}
+
+/* Disable backlight in the panel power control. */
+void intel_pps_backlight_off(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ intel_wakeref_t wakeref;
+
+ if (!intel_dp_is_edp(intel_dp))
+ return;
+
+ with_intel_pps_lock(intel_dp, wakeref) {
+ i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
+ u32 pp;
+
+ pp = ilk_get_pp_control(intel_dp);
+ pp &= ~EDP_BLC_ENABLE;
+
+ intel_de_write(dev_priv, pp_ctrl_reg, pp);
+ intel_de_posting_read(dev_priv, pp_ctrl_reg);
+ }
+
+ intel_dp->pps.last_backlight_off = jiffies;
+ edp_wait_backlight_off(intel_dp);
+}
+
+/*
+ * Hook for controlling the panel power control backlight through the bl_power
+ * sysfs attribute. Take care to handle multiple calls.
+ */
+void intel_pps_backlight_power(struct intel_connector *connector, bool enable)
+{
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_dp *intel_dp = intel_attached_dp(connector);
+ intel_wakeref_t wakeref;
+ bool is_enabled;
+
+ is_enabled = false;
+ with_intel_pps_lock(intel_dp, wakeref)
+ is_enabled = ilk_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
+ if (is_enabled == enable)
+ return;
+
+ drm_dbg_kms(&i915->drm, "panel power control backlight %s\n",
+ enable ? "enable" : "disable");
+
+ if (enable)
+ intel_pps_backlight_on(intel_dp);
+ else
+ intel_pps_backlight_off(intel_dp);
+}
+
+static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
+{
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
+ enum pipe pipe = intel_dp->pps.pps_pipe;
+ i915_reg_t pp_on_reg = PP_ON_DELAYS(pipe);
+
+ drm_WARN_ON(&dev_priv->drm, intel_dp->pps.active_pipe != INVALID_PIPE);
+
+ if (drm_WARN_ON(&dev_priv->drm, pipe != PIPE_A && pipe != PIPE_B))
+ return;
+
+ intel_pps_vdd_off_sync_unlocked(intel_dp);
+
+ /*
+ * VLV seems to get confused when multiple power sequencers
+ * have the same port selected (even if only one has power/vdd
+ * enabled). The failure manifests as vlv_wait_port_ready() failing
+ * CHV on the other hand doesn't seem to mind having the same port
+ * selected in multiple power sequencers, but let's clear the
+ * port select always when logically disconnecting a power sequencer
+ * from a port.
+ */
+ drm_dbg_kms(&dev_priv->drm,
+ "detaching pipe %c power sequencer from [ENCODER:%d:%s]\n",
+ pipe_name(pipe), dig_port->base.base.base.id,
+ dig_port->base.base.name);
+ intel_de_write(dev_priv, pp_on_reg, 0);
+ intel_de_posting_read(dev_priv, pp_on_reg);
+
+ intel_dp->pps.pps_pipe = INVALID_PIPE;
+}
+
+static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
+{
+ struct intel_encoder *encoder;
+
+ lockdep_assert_held(&dev_priv->pps_mutex);
+
+ for_each_intel_dp(&dev_priv->drm, encoder) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ drm_WARN(&dev_priv->drm, intel_dp->pps.active_pipe == pipe,
+ "stealing pipe %c power sequencer from active [ENCODER:%d:%s]\n",
+ pipe_name(pipe), encoder->base.base.id,
+ encoder->base.name);
+
+ if (intel_dp->pps.pps_pipe != pipe)
+ continue;
+
+ drm_dbg_kms(&dev_priv->drm,
+ "stealing pipe %c power sequencer from [ENCODER:%d:%s]\n",
+ pipe_name(pipe), encoder->base.base.id,
+ encoder->base.name);
+
+ /* make sure vdd is off before we steal it */
+ vlv_detach_power_sequencer(intel_dp);
+ }
+}
+
+void vlv_pps_init(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+
+ lockdep_assert_held(&dev_priv->pps_mutex);
+
+ drm_WARN_ON(&dev_priv->drm, intel_dp->pps.active_pipe != INVALID_PIPE);
+
+ if (intel_dp->pps.pps_pipe != INVALID_PIPE &&
+ intel_dp->pps.pps_pipe != crtc->pipe) {
+ /*
+ * If another power sequencer was being used on this
+ * port previously make sure to turn off vdd there while
+ * we still have control of it.
+ */
+ vlv_detach_power_sequencer(intel_dp);
+ }
+
+ /*
+ * We may be stealing the power
+ * sequencer from another port.
+ */
+ vlv_steal_power_sequencer(dev_priv, crtc->pipe);
+
+ intel_dp->pps.active_pipe = crtc->pipe;
+
+ if (!intel_dp_is_edp(intel_dp))
+ return;
+
+ /* now it's all ours */
+ intel_dp->pps.pps_pipe = crtc->pipe;
+
+ drm_dbg_kms(&dev_priv->drm,
+ "initializing pipe %c power sequencer for [ENCODER:%d:%s]\n",
+ pipe_name(intel_dp->pps.pps_pipe), encoder->base.base.id,
+ encoder->base.name);
+
+ /* init power sequencer on this pipe and port */
+ pps_init_delays(intel_dp);
+ pps_init_registers(intel_dp, true);
+}
+
+static void intel_pps_vdd_sanitize(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+
+ lockdep_assert_held(&dev_priv->pps_mutex);
+
+ if (!edp_have_panel_vdd(intel_dp))
+ return;
+
+ /*
+ * The VDD bit needs a power domain reference, so if the bit is
+ * already enabled when we boot or resume, grab this reference and
+ * schedule a vdd off, so we don't hold on to the reference
+ * indefinitely.
+ */
+ drm_dbg_kms(&dev_priv->drm,
+ "VDD left on by BIOS, adjusting state tracking\n");
+ drm_WARN_ON(&dev_priv->drm, intel_dp->pps.vdd_wakeref);
+ intel_dp->pps.vdd_wakeref = intel_display_power_get(dev_priv,
+ intel_aux_power_domain(dig_port));
+
+ edp_panel_vdd_schedule_off(intel_dp);
+}
+
+bool intel_pps_have_power(struct intel_dp *intel_dp)
+{
+ intel_wakeref_t wakeref;
+ bool have_power = false;
+
+ with_intel_pps_lock(intel_dp, wakeref) {
+ have_power = edp_have_panel_power(intel_dp) &&
+ edp_have_panel_vdd(intel_dp);
+ }
+
+ return have_power;
+}
+
+static void pps_init_timestamps(struct intel_dp *intel_dp)
+{
+ intel_dp->pps.panel_power_off_time = ktime_get_boottime();
+ intel_dp->pps.last_power_on = jiffies;
+ intel_dp->pps.last_backlight_off = jiffies;
+}
+
+static void
+intel_pps_readout_hw_state(struct intel_dp *intel_dp, struct edp_power_seq *seq)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ u32 pp_on, pp_off, pp_ctl;
+ struct pps_registers regs;
+
+ intel_pps_get_registers(intel_dp, &regs);
+
+ pp_ctl = ilk_get_pp_control(intel_dp);
+
+ /* Ensure PPS is unlocked */
+ if (!HAS_DDI(dev_priv))
+ intel_de_write(dev_priv, regs.pp_ctrl, pp_ctl);
+
+ pp_on = intel_de_read(dev_priv, regs.pp_on);
+ pp_off = intel_de_read(dev_priv, regs.pp_off);
+
+ /* Pull timing values out of registers */
+ seq->t1_t3 = REG_FIELD_GET(PANEL_POWER_UP_DELAY_MASK, pp_on);
+ seq->t8 = REG_FIELD_GET(PANEL_LIGHT_ON_DELAY_MASK, pp_on);
+ seq->t9 = REG_FIELD_GET(PANEL_LIGHT_OFF_DELAY_MASK, pp_off);
+ seq->t10 = REG_FIELD_GET(PANEL_POWER_DOWN_DELAY_MASK, pp_off);
+
+ if (i915_mmio_reg_valid(regs.pp_div)) {
+ u32 pp_div;
+
+ pp_div = intel_de_read(dev_priv, regs.pp_div);
+
+ seq->t11_t12 = REG_FIELD_GET(PANEL_POWER_CYCLE_DELAY_MASK, pp_div) * 1000;
+ } else {
+ seq->t11_t12 = REG_FIELD_GET(BXT_POWER_CYCLE_DELAY_MASK, pp_ctl) * 1000;
+ }
+}
+
+static void
+intel_pps_dump_state(const char *state_name, const struct edp_power_seq *seq)
+{
+ DRM_DEBUG_KMS("%s t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
+ state_name,
+ seq->t1_t3, seq->t8, seq->t9, seq->t10, seq->t11_t12);
+}
+
+static void
+intel_pps_verify_state(struct intel_dp *intel_dp)
+{
+ struct edp_power_seq hw;
+ struct edp_power_seq *sw = &intel_dp->pps.pps_delays;
+
+ intel_pps_readout_hw_state(intel_dp, &hw);
+
+ if (hw.t1_t3 != sw->t1_t3 || hw.t8 != sw->t8 || hw.t9 != sw->t9 ||
+ hw.t10 != sw->t10 || hw.t11_t12 != sw->t11_t12) {
+ DRM_ERROR("PPS state mismatch\n");
+ intel_pps_dump_state("sw", sw);
+ intel_pps_dump_state("hw", &hw);
+ }
+}
+
+static void pps_init_delays(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct edp_power_seq cur, vbt, spec,
+ *final = &intel_dp->pps.pps_delays;
+
+ lockdep_assert_held(&dev_priv->pps_mutex);
+
+ /* already initialized? */
+ if (final->t11_t12 != 0)
+ return;
+
+ intel_pps_readout_hw_state(intel_dp, &cur);
+
+ intel_pps_dump_state("cur", &cur);
+
+ vbt = dev_priv->vbt.edp.pps;
+ /* On Toshiba Satellite P50-C-18C system the VBT T12 delay
+ * of 500ms appears to be too short. Ocassionally the panel
+ * just fails to power back on. Increasing the delay to 800ms
+ * seems sufficient to avoid this problem.
+ */
+ if (dev_priv->quirks & QUIRK_INCREASE_T12_DELAY) {
+ vbt.t11_t12 = max_t(u16, vbt.t11_t12, 1300 * 10);
+ drm_dbg_kms(&dev_priv->drm,
+ "Increasing T12 panel delay as per the quirk to %d\n",
+ vbt.t11_t12);
+ }
+ /* T11_T12 delay is special and actually in units of 100ms, but zero
+ * based in the hw (so we need to add 100 ms). But the sw vbt
+ * table multiplies it with 1000 to make it in units of 100usec,
+ * too. */
+ vbt.t11_t12 += 100 * 10;
+
+ /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
+ * our hw here, which are all in 100usec. */
+ spec.t1_t3 = 210 * 10;
+ spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
+ spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
+ spec.t10 = 500 * 10;
+ /* This one is special and actually in units of 100ms, but zero
+ * based in the hw (so we need to add 100 ms). But the sw vbt
+ * table multiplies it with 1000 to make it in units of 100usec,
+ * too. */
+ spec.t11_t12 = (510 + 100) * 10;
+
+ intel_pps_dump_state("vbt", &vbt);
+
+ /* Use the max of the register settings and vbt. If both are
+ * unset, fall back to the spec limits. */
+#define assign_final(field) final->field = (max(cur.field, vbt.field) == 0 ? \
+ spec.field : \
+ max(cur.field, vbt.field))
+ assign_final(t1_t3);
+ assign_final(t8);
+ assign_final(t9);
+ assign_final(t10);
+ assign_final(t11_t12);
+#undef assign_final
+
+#define get_delay(field) (DIV_ROUND_UP(final->field, 10))
+ intel_dp->pps.panel_power_up_delay = get_delay(t1_t3);
+ intel_dp->pps.backlight_on_delay = get_delay(t8);
+ intel_dp->pps.backlight_off_delay = get_delay(t9);
+ intel_dp->pps.panel_power_down_delay = get_delay(t10);
+ intel_dp->pps.panel_power_cycle_delay = get_delay(t11_t12);
+#undef get_delay
+
+ drm_dbg_kms(&dev_priv->drm,
+ "panel power up delay %d, power down delay %d, power cycle delay %d\n",
+ intel_dp->pps.panel_power_up_delay,
+ intel_dp->pps.panel_power_down_delay,
+ intel_dp->pps.panel_power_cycle_delay);
+
+ drm_dbg_kms(&dev_priv->drm, "backlight on delay %d, off delay %d\n",
+ intel_dp->pps.backlight_on_delay,
+ intel_dp->pps.backlight_off_delay);
+
+ /*
+ * We override the HW backlight delays to 1 because we do manual waits
+ * on them. For T8, even BSpec recommends doing it. For T9, if we
+ * don't do this, we'll end up waiting for the backlight off delay
+ * twice: once when we do the manual sleep, and once when we disable
+ * the panel and wait for the PP_STATUS bit to become zero.
+ */
+ final->t8 = 1;
+ final->t9 = 1;
+
+ /*
+ * HW has only a 100msec granularity for t11_t12 so round it up
+ * accordingly.
+ */
+ final->t11_t12 = roundup(final->t11_t12, 100 * 10);
+}
+
+static void pps_init_registers(struct intel_dp *intel_dp, bool force_disable_vdd)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ u32 pp_on, pp_off, port_sel = 0;
+ int div = RUNTIME_INFO(dev_priv)->rawclk_freq / 1000;
+ struct pps_registers regs;
+ enum port port = dp_to_dig_port(intel_dp)->base.port;
+ const struct edp_power_seq *seq = &intel_dp->pps.pps_delays;
+
+ lockdep_assert_held(&dev_priv->pps_mutex);
+
+ intel_pps_get_registers(intel_dp, &regs);
+
+ /*
+ * On some VLV machines the BIOS can leave the VDD
+ * enabled even on power sequencers which aren't
+ * hooked up to any port. This would mess up the
+ * power domain tracking the first time we pick
+ * one of these power sequencers for use since
+ * intel_pps_vdd_on_unlocked() would notice that the VDD was
+ * already on and therefore wouldn't grab the power
+ * domain reference. Disable VDD first to avoid this.
+ * This also avoids spuriously turning the VDD on as
+ * soon as the new power sequencer gets initialized.
+ */
+ if (force_disable_vdd) {
+ u32 pp = ilk_get_pp_control(intel_dp);
+
+ drm_WARN(&dev_priv->drm, pp & PANEL_POWER_ON,
+ "Panel power already on\n");
+
+ if (pp & EDP_FORCE_VDD)
+ drm_dbg_kms(&dev_priv->drm,
+ "VDD already on, disabling first\n");
+
+ pp &= ~EDP_FORCE_VDD;
+
+ intel_de_write(dev_priv, regs.pp_ctrl, pp);
+ }
+
+ pp_on = REG_FIELD_PREP(PANEL_POWER_UP_DELAY_MASK, seq->t1_t3) |
+ REG_FIELD_PREP(PANEL_LIGHT_ON_DELAY_MASK, seq->t8);
+ pp_off = REG_FIELD_PREP(PANEL_LIGHT_OFF_DELAY_MASK, seq->t9) |
+ REG_FIELD_PREP(PANEL_POWER_DOWN_DELAY_MASK, seq->t10);
+
+ /* Haswell doesn't have any port selection bits for the panel
+ * power sequencer any more. */
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+ port_sel = PANEL_PORT_SELECT_VLV(port);
+ } else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
+ switch (port) {
+ case PORT_A:
+ port_sel = PANEL_PORT_SELECT_DPA;
+ break;
+ case PORT_C:
+ port_sel = PANEL_PORT_SELECT_DPC;
+ break;
+ case PORT_D:
+ port_sel = PANEL_PORT_SELECT_DPD;
+ break;
+ default:
+ MISSING_CASE(port);
+ break;
+ }
+ }
+
+ pp_on |= port_sel;
+
+ intel_de_write(dev_priv, regs.pp_on, pp_on);
+ intel_de_write(dev_priv, regs.pp_off, pp_off);
+
+ /*
+ * Compute the divisor for the pp clock, simply match the Bspec formula.
+ */
+ if (i915_mmio_reg_valid(regs.pp_div)) {
+ intel_de_write(dev_priv, regs.pp_div,
+ REG_FIELD_PREP(PP_REFERENCE_DIVIDER_MASK, (100 * div) / 2 - 1) | REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000)));
+ } else {
+ u32 pp_ctl;
+
+ pp_ctl = intel_de_read(dev_priv, regs.pp_ctrl);
+ pp_ctl &= ~BXT_POWER_CYCLE_DELAY_MASK;
+ pp_ctl |= REG_FIELD_PREP(BXT_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000));
+ intel_de_write(dev_priv, regs.pp_ctrl, pp_ctl);
+ }
+
+ drm_dbg_kms(&dev_priv->drm,
+ "panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
+ intel_de_read(dev_priv, regs.pp_on),
+ intel_de_read(dev_priv, regs.pp_off),
+ i915_mmio_reg_valid(regs.pp_div) ?
+ intel_de_read(dev_priv, regs.pp_div) :
+ (intel_de_read(dev_priv, regs.pp_ctrl) & BXT_POWER_CYCLE_DELAY_MASK));
+}
+
+void intel_pps_encoder_reset(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ intel_wakeref_t wakeref;
+
+ if (!intel_dp_is_edp(intel_dp))
+ return;
+
+ with_intel_pps_lock(intel_dp, wakeref) {
+ /*
+ * Reinit the power sequencer also on the resume path, in case
+ * BIOS did something nasty with it.
+ */
+ if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
+ vlv_initial_power_sequencer_setup(intel_dp);
+
+ pps_init_delays(intel_dp);
+ pps_init_registers(intel_dp, false);
+
+ intel_pps_vdd_sanitize(intel_dp);
+ }
+}
+
+void intel_pps_init(struct intel_dp *intel_dp)
+{
+ INIT_DELAYED_WORK(&intel_dp->pps.panel_vdd_work, edp_panel_vdd_work);
+
+ pps_init_timestamps(intel_dp);
+
+ intel_pps_encoder_reset(intel_dp);
+}
+
+void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv)
+{
+ int pps_num;
+ int pps_idx;
+
+ if (HAS_DDI(dev_priv))
+ return;
+ /*
+ * This w/a is needed at least on CPT/PPT, but to be sure apply it
+ * everywhere where registers can be write protected.
+ */
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ pps_num = 2;
+ else
+ pps_num = 1;
+
+ for (pps_idx = 0; pps_idx < pps_num; pps_idx++) {
+ u32 val = intel_de_read(dev_priv, PP_CONTROL(pps_idx));
+
+ val = (val & ~PANEL_UNLOCK_MASK) | PANEL_UNLOCK_REGS;
+ intel_de_write(dev_priv, PP_CONTROL(pps_idx), val);
+ }
+}
+
+void intel_pps_setup(struct drm_i915_private *i915)
+{
+ if (HAS_PCH_SPLIT(i915) || IS_GEN9_LP(i915))
+ i915->pps_mmio_base = PCH_PPS_BASE;
+ else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
+ i915->pps_mmio_base = VLV_PPS_BASE;
+ else
+ i915->pps_mmio_base = PPS_BASE;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_pps.h b/drivers/gpu/drm/i915/display/intel_pps.h
new file mode 100644
index 000000000000..fbbcca782e7b
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_pps.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef __INTEL_PPS_H__
+#define __INTEL_PPS_H__
+
+#include <linux/types.h>
+
+#include "intel_wakeref.h"
+
+struct drm_i915_private;
+struct intel_connector;
+struct intel_crtc_state;
+struct intel_dp;
+struct intel_encoder;
+
+intel_wakeref_t intel_pps_lock(struct intel_dp *intel_dp);
+intel_wakeref_t intel_pps_unlock(struct intel_dp *intel_dp, intel_wakeref_t wakeref);
+
+#define with_intel_pps_lock(dp, wf) \
+ for ((wf) = intel_pps_lock(dp); (wf); (wf) = intel_pps_unlock((dp), (wf)))
+
+void intel_pps_backlight_on(struct intel_dp *intel_dp);
+void intel_pps_backlight_off(struct intel_dp *intel_dp);
+void intel_pps_backlight_power(struct intel_connector *connector, bool enable);
+
+bool intel_pps_vdd_on_unlocked(struct intel_dp *intel_dp);
+void intel_pps_vdd_off_unlocked(struct intel_dp *intel_dp, bool sync);
+void intel_pps_on_unlocked(struct intel_dp *intel_dp);
+void intel_pps_off_unlocked(struct intel_dp *intel_dp);
+void intel_pps_check_power_unlocked(struct intel_dp *intel_dp);
+
+void intel_pps_vdd_on(struct intel_dp *intel_dp);
+void intel_pps_on(struct intel_dp *intel_dp);
+void intel_pps_off(struct intel_dp *intel_dp);
+void intel_pps_vdd_off_sync(struct intel_dp *intel_dp);
+bool intel_pps_have_power(struct intel_dp *intel_dp);
+void intel_pps_wait_power_cycle(struct intel_dp *intel_dp);
+
+void intel_pps_init(struct intel_dp *intel_dp);
+void intel_pps_encoder_reset(struct intel_dp *intel_dp);
+void intel_pps_reset_all(struct drm_i915_private *i915);
+
+void vlv_pps_init(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state);
+
+void intel_pps_unlock_regs_wa(struct drm_i915_private *i915);
+void intel_pps_setup(struct drm_i915_private *i915);
+
+#endif /* __INTEL_PPS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c
index b3631b722de3..850cb7f5b332 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.c
+++ b/drivers/gpu/drm/i915/display/intel_psr.c
@@ -28,9 +28,10 @@
#include "i915_drv.h"
#include "intel_atomic.h"
#include "intel_display_types.h"
+#include "intel_dp_aux.h"
+#include "intel_hdmi.h"
#include "intel_psr.h"
#include "intel_sprite.h"
-#include "intel_hdmi.h"
/**
* DOC: Panel Self Refresh (PSR/SRD)
@@ -305,7 +306,7 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp)
drm_dbg_kms(&dev_priv->drm, "eDP panel supports PSR version %x\n",
intel_dp->psr_dpcd[0]);
- if (drm_dp_has_quirk(&intel_dp->desc, 0, DP_DPCD_QUIRK_NO_PSR)) {
+ if (drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_NO_PSR)) {
drm_dbg_kms(&dev_priv->drm,
"PSR support not currently available for this panel\n");
return;
@@ -811,6 +812,13 @@ void intel_psr_compute_config(struct intel_dp *intel_dp,
&crtc_state->hw.adjusted_mode;
int psr_setup_time;
+ /*
+ * Current PSR panels dont work reliably with VRR enabled
+ * So if VRR is enabled, do not enable PSR.
+ */
+ if (crtc_state->vrr.enable)
+ return;
+
if (!CAN_PSR(dev_priv))
return;
@@ -1185,7 +1193,9 @@ void intel_psr2_program_plane_sel_fetch(struct intel_plane *plane,
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum pipe pipe = plane->pipe;
- u32 val;
+ const struct drm_rect *clip;
+ u32 val, offset;
+ int ret, x, y;
if (!crtc_state->enable_psr2_sel_fetch)
return;
@@ -1196,16 +1206,25 @@ void intel_psr2_program_plane_sel_fetch(struct intel_plane *plane,
if (!val || plane->id == PLANE_CURSOR)
return;
- val = plane_state->uapi.dst.y1 << 16 | plane_state->uapi.dst.x1;
+ clip = &plane_state->psr2_sel_fetch_area;
+
+ val = (clip->y1 + plane_state->uapi.dst.y1) << 16;
+ val |= plane_state->uapi.dst.x1;
intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_POS(pipe, plane->id), val);
- val = plane_state->color_plane[color_plane].y << 16;
- val |= plane_state->color_plane[color_plane].x;
+ /* TODO: consider auxiliary surfaces */
+ x = plane_state->uapi.src.x1 >> 16;
+ y = (plane_state->uapi.src.y1 >> 16) + clip->y1;
+ ret = skl_calc_main_surface_offset(plane_state, &x, &y, &offset);
+ if (ret)
+ drm_warn_once(&dev_priv->drm, "skl_calc_main_surface_offset() returned %i\n",
+ ret);
+ val = y << 16 | x;
intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_OFFSET(pipe, plane->id),
val);
/* Sizes are 0 based */
- val = ((drm_rect_height(&plane_state->uapi.src) >> 16) - 1) << 16;
+ val = (drm_rect_height(clip) - 1) << 16;
val |= (drm_rect_width(&plane_state->uapi.src) >> 16) - 1;
intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_SIZE(pipe, plane->id), val);
}
@@ -1237,9 +1256,11 @@ static void psr2_man_trk_ctl_calc(struct intel_crtc_state *crtc_state,
if (clip->y1 == -1)
goto exit;
+ drm_WARN_ON(crtc_state->uapi.crtc->dev, clip->y1 % 4 || clip->y2 % 4);
+
val |= PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE;
val |= PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(clip->y1 / 4 + 1);
- val |= PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(DIV_ROUND_UP(clip->y2, 4) + 1);
+ val |= PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(clip->y2 / 4 + 1);
exit:
crtc_state->psr2_man_track_ctl = val;
}
@@ -1264,8 +1285,8 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
+ struct drm_rect pipe_clip = { .x1 = 0, .y1 = -1, .x2 = INT_MAX, .y2 = -1 };
struct intel_plane_state *new_plane_state, *old_plane_state;
- struct drm_rect pipe_clip = { .y1 = -1 };
struct intel_plane *plane;
bool full_update = false;
int i, ret;
@@ -1277,13 +1298,25 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
if (ret)
return ret;
+ /*
+ * Calculate minimal selective fetch area of each plane and calculate
+ * the pipe damaged area.
+ * In the next loop the plane selective fetch area will actually be set
+ * using whole pipe damaged area.
+ */
for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
new_plane_state, i) {
- struct drm_rect temp;
+ struct drm_rect src, damaged_area = { .y1 = -1 };
+ struct drm_mode_rect *damaged_clips;
+ u32 num_clips, j;
if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc)
continue;
+ if (!new_plane_state->uapi.visible &&
+ !old_plane_state->uapi.visible)
+ continue;
+
/*
* TODO: Not clear how to handle planes with negative position,
* also planes are not updated if they have a negative X
@@ -1295,18 +1328,94 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
break;
}
- if (!new_plane_state->uapi.visible)
- continue;
+ num_clips = drm_plane_get_damage_clips_count(&new_plane_state->uapi);
/*
- * For now doing a selective fetch in the whole plane area,
- * optimizations will come in the future.
+ * If visibility or plane moved, mark the whole plane area as
+ * damaged as it needs to be complete redraw in the new and old
+ * position.
*/
- temp.y1 = new_plane_state->uapi.dst.y1;
- temp.y2 = new_plane_state->uapi.dst.y2;
- clip_area_update(&pipe_clip, &temp);
+ if (new_plane_state->uapi.visible != old_plane_state->uapi.visible ||
+ !drm_rect_equals(&new_plane_state->uapi.dst,
+ &old_plane_state->uapi.dst)) {
+ if (old_plane_state->uapi.visible) {
+ damaged_area.y1 = old_plane_state->uapi.dst.y1;
+ damaged_area.y2 = old_plane_state->uapi.dst.y2;
+ clip_area_update(&pipe_clip, &damaged_area);
+ }
+
+ if (new_plane_state->uapi.visible) {
+ damaged_area.y1 = new_plane_state->uapi.dst.y1;
+ damaged_area.y2 = new_plane_state->uapi.dst.y2;
+ clip_area_update(&pipe_clip, &damaged_area);
+ }
+ continue;
+ } else if (new_plane_state->uapi.alpha != old_plane_state->uapi.alpha ||
+ (!num_clips &&
+ new_plane_state->uapi.fb != old_plane_state->uapi.fb)) {
+ /*
+ * If the plane don't have damaged areas but the
+ * framebuffer changed or alpha changed, mark the whole
+ * plane area as damaged.
+ */
+ damaged_area.y1 = new_plane_state->uapi.dst.y1;
+ damaged_area.y2 = new_plane_state->uapi.dst.y2;
+ clip_area_update(&pipe_clip, &damaged_area);
+ continue;
+ }
+
+ drm_rect_fp_to_int(&src, &new_plane_state->uapi.src);
+ damaged_clips = drm_plane_get_damage_clips(&new_plane_state->uapi);
+
+ for (j = 0; j < num_clips; j++) {
+ struct drm_rect clip;
+
+ clip.x1 = damaged_clips[j].x1;
+ clip.y1 = damaged_clips[j].y1;
+ clip.x2 = damaged_clips[j].x2;
+ clip.y2 = damaged_clips[j].y2;
+ if (drm_rect_intersect(&clip, &src))
+ clip_area_update(&damaged_area, &clip);
+ }
+
+ if (damaged_area.y1 == -1)
+ continue;
+
+ damaged_area.y1 += new_plane_state->uapi.dst.y1 - src.y1;
+ damaged_area.y2 += new_plane_state->uapi.dst.y1 - src.y1;
+ clip_area_update(&pipe_clip, &damaged_area);
+ }
+
+ if (full_update)
+ goto skip_sel_fetch_set_loop;
+
+ /* It must be aligned to 4 lines */
+ pipe_clip.y1 -= pipe_clip.y1 % 4;
+ if (pipe_clip.y2 % 4)
+ pipe_clip.y2 = ((pipe_clip.y2 / 4) + 1) * 4;
+
+ /*
+ * Now that we have the pipe damaged area check if it intersect with
+ * every plane, if it does set the plane selective fetch area.
+ */
+ for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
+ new_plane_state, i) {
+ struct drm_rect *sel_fetch_area, inter;
+
+ if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc ||
+ !new_plane_state->uapi.visible)
+ continue;
+
+ inter = pipe_clip;
+ if (!drm_rect_intersect(&inter, &new_plane_state->uapi.dst))
+ continue;
+
+ sel_fetch_area = &new_plane_state->psr2_sel_fetch_area;
+ sel_fetch_area->y1 = inter.y1 - new_plane_state->uapi.dst.y1;
+ sel_fetch_area->y2 = inter.y2 - new_plane_state->uapi.dst.y1;
}
+skip_sel_fetch_set_loop:
psr2_man_trk_ctl_calc(crtc_state, &pipe_clip, full_update);
return 0;
}
diff --git a/drivers/gpu/drm/i915/display/intel_sprite.c b/drivers/gpu/drm/i915/display/intel_sprite.c
index 019a2d6d807a..993543334a1e 100644
--- a/drivers/gpu/drm/i915/display/intel_sprite.c
+++ b/drivers/gpu/drm/i915/display/intel_sprite.c
@@ -49,6 +49,8 @@
#include "intel_psr.h"
#include "intel_dsi.h"
#include "intel_sprite.h"
+#include "i9xx_plane.h"
+#include "intel_vrr.h"
int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode,
int usecs)
@@ -61,13 +63,15 @@ int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode,
1000 * adjusted_mode->crtc_htotal);
}
-/* FIXME: We should instead only take spinlocks once for the entire update
- * instead of once per mmio. */
-#if IS_ENABLED(CONFIG_PROVE_LOCKING)
-#define VBLANK_EVASION_TIME_US 250
-#else
-#define VBLANK_EVASION_TIME_US 100
-#endif
+static int intel_mode_vblank_start(const struct drm_display_mode *mode)
+{
+ int vblank_start = mode->crtc_vblank_start;
+
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ vblank_start = DIV_ROUND_UP(vblank_start, 2);
+
+ return vblank_start;
+}
/**
* intel_pipe_update_start() - start update of a set of display registers
@@ -97,9 +101,10 @@ void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state)
if (new_crtc_state->uapi.async_flip)
return;
- vblank_start = adjusted_mode->crtc_vblank_start;
- if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
- vblank_start = DIV_ROUND_UP(vblank_start, 2);
+ if (new_crtc_state->vrr.enable)
+ vblank_start = intel_vrr_vmax_vblank_start(new_crtc_state);
+ else
+ vblank_start = intel_mode_vblank_start(adjusted_mode);
/* FIXME needs to be calibrated sensibly */
min = vblank_start - intel_usecs_to_scanlines(adjusted_mode,
@@ -187,6 +192,36 @@ irq_disable:
local_irq_disable();
}
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_VBLANK_EVADE)
+static void dbg_vblank_evade(struct intel_crtc *crtc, ktime_t end)
+{
+ u64 delta = ktime_to_ns(ktime_sub(end, crtc->debug.start_vbl_time));
+ unsigned int h;
+
+ h = ilog2(delta >> 9);
+ if (h >= ARRAY_SIZE(crtc->debug.vbl.times))
+ h = ARRAY_SIZE(crtc->debug.vbl.times) - 1;
+ crtc->debug.vbl.times[h]++;
+
+ crtc->debug.vbl.sum += delta;
+ if (!crtc->debug.vbl.min || delta < crtc->debug.vbl.min)
+ crtc->debug.vbl.min = delta;
+ if (delta > crtc->debug.vbl.max)
+ crtc->debug.vbl.max = delta;
+
+ if (delta > 1000 * VBLANK_EVASION_TIME_US) {
+ drm_dbg_kms(crtc->base.dev,
+ "Atomic update on pipe (%c) took %lld us, max time under evasion is %u us\n",
+ pipe_name(crtc->pipe),
+ div_u64(delta, 1000),
+ VBLANK_EVASION_TIME_US);
+ crtc->debug.vbl.over++;
+ }
+}
+#else
+static void dbg_vblank_evade(struct intel_crtc *crtc, ktime_t end) {}
+#endif
+
/**
* intel_pipe_update_end() - end update of a set of display registers
* @new_crtc_state: the new crtc state
@@ -235,6 +270,9 @@ void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state)
local_irq_enable();
+ /* Send VRR Push to terminate Vblank */
+ intel_vrr_send_push(new_crtc_state);
+
if (intel_vgpu_active(dev_priv))
return;
@@ -249,15 +287,8 @@ void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state)
crtc->debug.min_vbl, crtc->debug.max_vbl,
crtc->debug.scanline_start, scanline_end);
}
-#ifdef CONFIG_DRM_I915_DEBUG_VBLANK_EVADE
- else if (ktime_us_delta(end_vbl_time, crtc->debug.start_vbl_time) >
- VBLANK_EVASION_TIME_US)
- drm_warn(&dev_priv->drm,
- "Atomic update on pipe (%c) took %lld us, max time under evasion is %u us\n",
- pipe_name(pipe),
- ktime_us_delta(end_vbl_time, crtc->debug.start_vbl_time),
- VBLANK_EVASION_TIME_US);
-#endif
+
+ dbg_vblank_evade(crtc, end_vbl_time);
}
int intel_plane_check_stride(const struct intel_plane_state *plane_state)
@@ -618,13 +649,19 @@ skl_program_scaler(struct intel_plane *plane,
/* Preoffset values for YUV to RGB Conversion */
#define PREOFF_YUV_TO_RGB_HI 0x1800
-#define PREOFF_YUV_TO_RGB_ME 0x1F00
+#define PREOFF_YUV_TO_RGB_ME 0x0000
#define PREOFF_YUV_TO_RGB_LO 0x1800
#define ROFF(x) (((x) & 0xffff) << 16)
#define GOFF(x) (((x) & 0xffff) << 0)
#define BOFF(x) (((x) & 0xffff) << 16)
+/*
+ * Programs the input color space conversion stage for ICL HDR planes.
+ * Note that it is assumed that this stage always happens after YUV
+ * range correction. Thus, the input to this stage is assumed to be
+ * in full-range YCbCr.
+ */
static void
icl_program_input_csc(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
@@ -672,52 +709,7 @@ icl_program_input_csc(struct intel_plane *plane,
0x0, 0x7800, 0x7F10,
},
};
-
- /* Matrix for Limited Range to Full Range Conversion */
- static const u16 input_csc_matrix_lr[][9] = {
- /*
- * BT.601 Limted range YCbCr -> full range RGB
- * The matrix required is :
- * [1.164384, 0.000, 1.596027,
- * 1.164384, -0.39175, -0.812813,
- * 1.164384, 2.017232, 0.0000]
- */
- [DRM_COLOR_YCBCR_BT601] = {
- 0x7CC8, 0x7950, 0x0,
- 0x8D00, 0x7950, 0x9C88,
- 0x0, 0x7950, 0x6810,
- },
- /*
- * BT.709 Limited range YCbCr -> full range RGB
- * The matrix required is :
- * [1.164384, 0.000, 1.792741,
- * 1.164384, -0.213249, -0.532909,
- * 1.164384, 2.112402, 0.0000]
- */
- [DRM_COLOR_YCBCR_BT709] = {
- 0x7E58, 0x7950, 0x0,
- 0x8888, 0x7950, 0xADA8,
- 0x0, 0x7950, 0x6870,
- },
- /*
- * BT.2020 Limited range YCbCr -> full range RGB
- * The matrix required is :
- * [1.164, 0.000, 1.678,
- * 1.164, -0.1873, -0.6504,
- * 1.164, 2.1417, 0.0000]
- */
- [DRM_COLOR_YCBCR_BT2020] = {
- 0x7D70, 0x7950, 0x0,
- 0x8A68, 0x7950, 0xAC00,
- 0x0, 0x7950, 0x6890,
- },
- };
- const u16 *csc;
-
- if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
- csc = input_csc_matrix[plane_state->hw.color_encoding];
- else
- csc = input_csc_matrix_lr[plane_state->hw.color_encoding];
+ const u16 *csc = input_csc_matrix[plane_state->hw.color_encoding];
intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 0),
ROFF(csc[0]) | GOFF(csc[1]));
@@ -734,14 +726,8 @@ icl_program_input_csc(struct intel_plane *plane,
intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 0),
PREOFF_YUV_TO_RGB_HI);
- if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
- intel_de_write_fw(dev_priv,
- PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 1),
- 0);
- else
- intel_de_write_fw(dev_priv,
- PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 1),
- PREOFF_YUV_TO_RGB_ME);
+ intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 1),
+ PREOFF_YUV_TO_RGB_ME);
intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 2),
PREOFF_YUV_TO_RGB_LO);
intel_de_write_fw(dev_priv,
@@ -755,7 +741,8 @@ icl_program_input_csc(struct intel_plane *plane,
static void
skl_plane_async_flip(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
+ const struct intel_plane_state *plane_state,
+ bool async_flip)
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
unsigned long irqflags;
@@ -766,6 +753,9 @@ skl_plane_async_flip(struct intel_plane *plane,
plane_ctl |= skl_plane_ctl_crtc(crtc_state);
+ if (async_flip)
+ plane_ctl |= PLANE_CTL_ASYNC_FLIP;
+
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
intel_de_write_fw(dev_priv, PLANE_CTL(pipe, plane_id), plane_ctl);
@@ -851,6 +841,10 @@ skl_program_plane(struct intel_plane *plane,
if (fb->format->is_yuv && icl_is_hdr_plane(dev_priv, plane_id))
icl_program_input_csc(plane, crtc_state, plane_state);
+ if (fb->modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC)
+ intel_uncore_write64_fw(&dev_priv->uncore,
+ PLANE_CC_VAL(pipe, plane_id), plane_state->ccval);
+
skl_write_plane_wm(plane, crtc_state);
intel_de_write_fw(dev_priv, PLANE_KEYVAL(pipe, plane_id),
@@ -942,6 +936,28 @@ skl_plane_get_hw_state(struct intel_plane *plane,
return ret;
}
+static void
+skl_plane_enable_flip_done(struct intel_plane *plane)
+{
+ struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ enum pipe pipe = plane->pipe;
+
+ spin_lock_irq(&i915->irq_lock);
+ bdw_enable_pipe_irq(i915, pipe, GEN9_PIPE_PLANE_FLIP_DONE(plane->id));
+ spin_unlock_irq(&i915->irq_lock);
+}
+
+static void
+skl_plane_disable_flip_done(struct intel_plane *plane)
+{
+ struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ enum pipe pipe = plane->pipe;
+
+ spin_lock_irq(&i915->irq_lock);
+ bdw_disable_pipe_irq(i915, pipe, GEN9_PIPE_PLANE_FLIP_DONE(plane->id));
+ spin_unlock_irq(&i915->irq_lock);
+}
+
static void i9xx_plane_linear_gamma(u16 gamma[8])
{
/* The points are not evenly spaced. */
@@ -1835,7 +1851,26 @@ g4x_sprite_max_stride(struct intel_plane *plane,
u32 pixel_format, u64 modifier,
unsigned int rotation)
{
- return 16384;
+ const struct drm_format_info *info = drm_format_info(pixel_format);
+ int cpp = info->cpp[0];
+
+ /* Limit to 4k pixels to guarantee TILEOFF.x doesn't get too big. */
+ if (modifier == I915_FORMAT_MOD_X_TILED)
+ return min(4096 * cpp, 16 * 1024);
+ else
+ return 16 * 1024;
+}
+
+static unsigned int
+hsw_sprite_max_stride(struct intel_plane *plane,
+ u32 pixel_format, u64 modifier,
+ unsigned int rotation)
+{
+ const struct drm_format_info *info = drm_format_info(pixel_format);
+ int cpp = info->cpp[0];
+
+ /* Limit to 8k pixels to guarantee OFFSET.x doesn't get too big. */
+ return min(8192 * cpp, 16 * 1024);
}
static u32 g4x_sprite_ctl_crtc(const struct intel_crtc_state *crtc_state)
@@ -2350,7 +2385,8 @@ static int skl_plane_check_fb(const struct intel_crtc_state *crtc_state,
fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS ||
fb->modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS ||
- fb->modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS)) {
+ fb->modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS ||
+ fb->modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC)) {
drm_dbg_kms(&dev_priv->drm,
"Y/Yf tiling not supported in IF-ID mode\n");
return -EINVAL;
@@ -2840,6 +2876,7 @@ static const u64 skl_plane_format_modifiers_ccs[] = {
static const u64 gen12_plane_format_modifiers_mc_ccs[] = {
I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS,
I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS,
+ I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC,
I915_FORMAT_MOD_Y_TILED,
I915_FORMAT_MOD_X_TILED,
DRM_FORMAT_MOD_LINEAR,
@@ -2848,6 +2885,7 @@ static const u64 gen12_plane_format_modifiers_mc_ccs[] = {
static const u64 gen12_plane_format_modifiers_rc_ccs[] = {
I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS,
+ I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC,
I915_FORMAT_MOD_Y_TILED,
I915_FORMAT_MOD_X_TILED,
DRM_FORMAT_MOD_LINEAR,
@@ -3038,6 +3076,7 @@ static bool gen12_plane_format_mod_supported(struct drm_plane *_plane,
case I915_FORMAT_MOD_X_TILED:
case I915_FORMAT_MOD_Y_TILED:
case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
+ case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
break;
default:
return false;
@@ -3274,7 +3313,13 @@ skl_universal_plane_create(struct drm_i915_private *dev_priv,
plane->get_hw_state = skl_plane_get_hw_state;
plane->check_plane = skl_plane_check;
plane->min_cdclk = skl_plane_min_cdclk;
- plane->async_flip = skl_plane_async_flip;
+
+ if (plane_id == PLANE_PRIMARY) {
+ plane->need_async_flip_disable_wa = IS_GEN_RANGE(dev_priv, 9, 10);
+ plane->async_flip = skl_plane_async_flip;
+ plane->enable_flip_done = skl_plane_enable_flip_done;
+ plane->disable_flip_done = skl_plane_disable_flip_done;
+ }
if (INTEL_GEN(dev_priv) >= 11)
formats = icl_get_plane_formats(dev_priv, pipe,
@@ -3382,11 +3427,11 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
return plane;
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
- plane->max_stride = i9xx_plane_max_stride;
plane->update_plane = vlv_update_plane;
plane->disable_plane = vlv_disable_plane;
plane->get_hw_state = vlv_plane_get_hw_state;
plane->check_plane = vlv_sprite_check;
+ plane->max_stride = i965_plane_max_stride;
plane->min_cdclk = vlv_plane_min_cdclk;
if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
@@ -3400,16 +3445,18 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
plane_funcs = &vlv_sprite_funcs;
} else if (INTEL_GEN(dev_priv) >= 7) {
- plane->max_stride = g4x_sprite_max_stride;
plane->update_plane = ivb_update_plane;
plane->disable_plane = ivb_disable_plane;
plane->get_hw_state = ivb_plane_get_hw_state;
plane->check_plane = g4x_sprite_check;
- if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
+ if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
+ plane->max_stride = hsw_sprite_max_stride;
plane->min_cdclk = hsw_plane_min_cdclk;
- else
+ } else {
+ plane->max_stride = g4x_sprite_max_stride;
plane->min_cdclk = ivb_sprite_min_cdclk;
+ }
formats = snb_plane_formats;
num_formats = ARRAY_SIZE(snb_plane_formats);
@@ -3417,11 +3464,11 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
plane_funcs = &snb_sprite_funcs;
} else {
- plane->max_stride = g4x_sprite_max_stride;
plane->update_plane = g4x_update_plane;
plane->disable_plane = g4x_disable_plane;
plane->get_hw_state = g4x_plane_get_hw_state;
plane->check_plane = g4x_sprite_check;
+ plane->max_stride = g4x_sprite_max_stride;
plane->min_cdclk = g4x_sprite_min_cdclk;
modifiers = i9xx_plane_format_modifiers;
diff --git a/drivers/gpu/drm/i915/display/intel_sprite.h b/drivers/gpu/drm/i915/display/intel_sprite.h
index cd2104ba1ca1..76126dd8d584 100644
--- a/drivers/gpu/drm/i915/display/intel_sprite.h
+++ b/drivers/gpu/drm/i915/display/intel_sprite.h
@@ -17,6 +17,16 @@ struct drm_i915_private;
struct intel_crtc_state;
struct intel_plane_state;
+/*
+ * FIXME: We should instead only take spinlocks once for the entire update
+ * instead of once per mmio.
+ */
+#if IS_ENABLED(CONFIG_PROVE_LOCKING)
+#define VBLANK_EVASION_TIME_US 250
+#else
+#define VBLANK_EVASION_TIME_US 100
+#endif
+
int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode,
int usecs);
struct intel_plane *intel_sprite_plane_create(struct drm_i915_private *dev_priv,
diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c
index 4346bc1a747a..27dc2dad6809 100644
--- a/drivers/gpu/drm/i915/display/intel_tc.c
+++ b/drivers/gpu/drm/i915/display/intel_tc.c
@@ -262,7 +262,7 @@ static u32 tc_port_live_status_mask(struct intel_digital_port *dig_port)
mask |= BIT(TC_PORT_LEGACY);
/* The sink can be connected only in a single mode. */
- if (!drm_WARN_ON(&i915->drm, hweight32(mask) > 1))
+ if (!drm_WARN_ON_ONCE(&i915->drm, hweight32(mask) > 1))
tc_port_fixup_legacy_flag(dig_port, mask);
return mask;
diff --git a/drivers/gpu/drm/i915/display/intel_vbt_defs.h b/drivers/gpu/drm/i915/display/intel_vbt_defs.h
index 49b4b5fca941..187ec573de59 100644
--- a/drivers/gpu/drm/i915/display/intel_vbt_defs.h
+++ b/drivers/gpu/drm/i915/display/intel_vbt_defs.h
@@ -319,6 +319,8 @@ enum vbt_gmbus_ddi {
ICL_DDC_BUS_DDI_A = 0x1,
ICL_DDC_BUS_DDI_B,
TGL_DDC_BUS_DDI_C,
+ RKL_DDC_BUS_DDI_D = 0x3,
+ RKL_DDC_BUS_DDI_E,
ICL_DDC_BUS_PORT_1 = 0x4,
ICL_DDC_BUS_PORT_2,
ICL_DDC_BUS_PORT_3,
diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.c b/drivers/gpu/drm/i915/display/intel_vdsc.c
index e2716a67b281..f58cc5700784 100644
--- a/drivers/gpu/drm/i915/display/intel_vdsc.c
+++ b/drivers/gpu/drm/i915/display/intel_vdsc.c
@@ -454,8 +454,6 @@ int intel_dsc_compute_params(struct intel_encoder *encoder,
else if (vdsc_cfg->bits_per_component == 12)
vdsc_cfg->mux_word_size = DSC_MUX_WORD_SIZE_12_BPC;
- /* RC_MODEL_SIZE is a constant across all configurations */
- vdsc_cfg->rc_model_size = DSC_RC_MODEL_SIZE_CONST;
/* InitialScaleValue is a 6 bit value with 3 fractional bits (U3.3) */
vdsc_cfg->initial_scale_value = (vdsc_cfg->rc_model_size << 3) /
(vdsc_cfg->rc_model_size - vdsc_cfg->initial_offset);
@@ -741,7 +739,7 @@ static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state)
/* Populate PICTURE_PARAMETER_SET_9 registers */
pps_val = 0;
- pps_val |= DSC_RC_MODEL_SIZE(DSC_RC_MODEL_SIZE_CONST) |
+ pps_val |= DSC_RC_MODEL_SIZE(vdsc_cfg->rc_model_size) |
DSC_RC_EDGE_FACTOR(DSC_RC_EDGE_FACTOR_CONST);
drm_info(&dev_priv->drm, "PPS9 = 0x%08x\n", pps_val);
if (!is_pipe_dsc(crtc_state)) {
diff --git a/drivers/gpu/drm/i915/display/intel_vrr.c b/drivers/gpu/drm/i915/display/intel_vrr.c
new file mode 100644
index 000000000000..a9c2b2fd9252
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_vrr.c
@@ -0,0 +1,209 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ *
+ */
+
+#include "i915_drv.h"
+#include "intel_display_types.h"
+#include "intel_vrr.h"
+
+bool intel_vrr_is_capable(struct drm_connector *connector)
+{
+ struct intel_dp *intel_dp;
+ const struct drm_display_info *info = &connector->display_info;
+ struct drm_i915_private *i915 = to_i915(connector->dev);
+
+ if (connector->connector_type != DRM_MODE_CONNECTOR_eDP &&
+ connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
+ return false;
+
+ intel_dp = intel_attached_dp(to_intel_connector(connector));
+ /*
+ * DP Sink is capable of VRR video timings if
+ * Ignore MSA bit is set in DPCD.
+ * EDID monitor range also should be atleast 10 for reasonable
+ * Adaptive Sync or Variable Refresh Rate end user experience.
+ */
+ return HAS_VRR(i915) &&
+ drm_dp_sink_can_do_video_without_timing_msa(intel_dp->dpcd) &&
+ info->monitor_range.max_vfreq - info->monitor_range.min_vfreq > 10;
+}
+
+void
+intel_vrr_check_modeset(struct intel_atomic_state *state)
+{
+ int i;
+ struct intel_crtc_state *old_crtc_state, *new_crtc_state;
+ struct intel_crtc *crtc;
+
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+ new_crtc_state, i) {
+ if (new_crtc_state->uapi.vrr_enabled !=
+ old_crtc_state->uapi.vrr_enabled)
+ new_crtc_state->uapi.mode_changed = true;
+ }
+}
+
+/*
+ * Without VRR registers get latched at:
+ * vblank_start
+ *
+ * With VRR the earliest registers can get latched is:
+ * intel_vrr_vmin_vblank_start(), which if we want to maintain
+ * the correct min vtotal is >=vblank_start+1
+ *
+ * The latest point registers can get latched is the vmax decision boundary:
+ * intel_vrr_vmax_vblank_start()
+ *
+ * Between those two points the vblank exit starts (and hence registers get
+ * latched) ASAP after a push is sent.
+ *
+ * framestart_delay is programmable 0-3.
+ */
+static int intel_vrr_vblank_exit_length(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+
+ /* The hw imposes the extra scanline before frame start */
+ return crtc_state->vrr.pipeline_full + i915->framestart_delay + 1;
+}
+
+int intel_vrr_vmin_vblank_start(const struct intel_crtc_state *crtc_state)
+{
+ /* Min vblank actually determined by flipline that is always >=vmin+1 */
+ return crtc_state->vrr.vmin + 1 - intel_vrr_vblank_exit_length(crtc_state);
+}
+
+int intel_vrr_vmax_vblank_start(const struct intel_crtc_state *crtc_state)
+{
+ return crtc_state->vrr.vmax - intel_vrr_vblank_exit_length(crtc_state);
+}
+
+void
+intel_vrr_compute_config(struct intel_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct intel_connector *connector =
+ to_intel_connector(conn_state->connector);
+ struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
+ const struct drm_display_info *info = &connector->base.display_info;
+ int vmin, vmax;
+
+ if (!intel_vrr_is_capable(&connector->base))
+ return;
+
+ if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
+ return;
+
+ if (!crtc_state->uapi.vrr_enabled)
+ return;
+
+ vmin = DIV_ROUND_UP(adjusted_mode->crtc_clock * 1000,
+ adjusted_mode->crtc_htotal * info->monitor_range.max_vfreq);
+ vmax = adjusted_mode->crtc_clock * 1000 /
+ (adjusted_mode->crtc_htotal * info->monitor_range.min_vfreq);
+
+ vmin = max_t(int, vmin, adjusted_mode->crtc_vtotal);
+ vmax = max_t(int, vmax, adjusted_mode->crtc_vtotal);
+
+ if (vmin >= vmax)
+ return;
+
+ /*
+ * flipline determines the min vblank length the hardware will
+ * generate, and flipline>=vmin+1, hence we reduce vmin by one
+ * to make sure we can get the actual min vblank length.
+ */
+ crtc_state->vrr.vmin = vmin - 1;
+ crtc_state->vrr.vmax = vmax;
+ crtc_state->vrr.enable = true;
+
+ crtc_state->vrr.flipline = crtc_state->vrr.vmin + 1;
+
+ /*
+ * FIXME: s/4/framestart_delay+1/ to get consistent
+ * earliest/latest points for register latching regardless
+ * of the framestart_delay used?
+ *
+ * FIXME: this really needs the extra scanline to provide consistent
+ * behaviour for all framestart_delay values. Otherwise with
+ * framestart_delay==3 we will end up extending the min vblank by
+ * one extra line.
+ */
+ crtc_state->vrr.pipeline_full =
+ min(255, crtc_state->vrr.vmin - adjusted_mode->crtc_vdisplay - 4 - 1);
+
+ crtc_state->mode_flags |= I915_MODE_FLAG_VRR;
+}
+
+void intel_vrr_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+ u32 trans_vrr_ctl;
+
+ if (!crtc_state->vrr.enable)
+ return;
+
+ trans_vrr_ctl = VRR_CTL_VRR_ENABLE |
+ VRR_CTL_IGN_MAX_SHIFT | VRR_CTL_FLIP_LINE_EN |
+ VRR_CTL_PIPELINE_FULL(crtc_state->vrr.pipeline_full) |
+ VRR_CTL_PIPELINE_FULL_OVERRIDE;
+
+ intel_de_write(dev_priv, TRANS_VRR_VMIN(cpu_transcoder), crtc_state->vrr.vmin - 1);
+ intel_de_write(dev_priv, TRANS_VRR_VMAX(cpu_transcoder), crtc_state->vrr.vmax - 1);
+ intel_de_write(dev_priv, TRANS_VRR_CTL(cpu_transcoder), trans_vrr_ctl);
+ intel_de_write(dev_priv, TRANS_VRR_FLIPLINE(cpu_transcoder), crtc_state->vrr.flipline - 1);
+ intel_de_write(dev_priv, TRANS_PUSH(cpu_transcoder), TRANS_PUSH_EN);
+}
+
+void intel_vrr_send_push(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+
+ if (!crtc_state->vrr.enable)
+ return;
+
+ intel_de_write(dev_priv, TRANS_PUSH(cpu_transcoder),
+ TRANS_PUSH_EN | TRANS_PUSH_SEND);
+}
+
+void intel_vrr_disable(const struct intel_crtc_state *old_crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
+
+ if (!old_crtc_state->vrr.enable)
+ return;
+
+ intel_de_write(dev_priv, TRANS_VRR_CTL(cpu_transcoder), 0);
+ intel_de_write(dev_priv, TRANS_PUSH(cpu_transcoder), 0);
+}
+
+void intel_vrr_get_config(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+ u32 trans_vrr_ctl;
+
+ trans_vrr_ctl = intel_de_read(dev_priv, TRANS_VRR_CTL(cpu_transcoder));
+ crtc_state->vrr.enable = trans_vrr_ctl & VRR_CTL_VRR_ENABLE;
+ if (!crtc_state->vrr.enable)
+ return;
+
+ if (trans_vrr_ctl & VRR_CTL_PIPELINE_FULL_OVERRIDE)
+ crtc_state->vrr.pipeline_full = REG_FIELD_GET(VRR_CTL_PIPELINE_FULL_MASK, trans_vrr_ctl);
+ if (trans_vrr_ctl & VRR_CTL_FLIP_LINE_EN)
+ crtc_state->vrr.flipline = intel_de_read(dev_priv, TRANS_VRR_FLIPLINE(cpu_transcoder)) + 1;
+ crtc_state->vrr.vmax = intel_de_read(dev_priv, TRANS_VRR_VMAX(cpu_transcoder)) + 1;
+ crtc_state->vrr.vmin = intel_de_read(dev_priv, TRANS_VRR_VMIN(cpu_transcoder)) + 1;
+
+ crtc_state->mode_flags |= I915_MODE_FLAG_VRR;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_vrr.h b/drivers/gpu/drm/i915/display/intel_vrr.h
new file mode 100644
index 000000000000..fac01bf4ab50
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_vrr.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_VRR_H__
+#define __INTEL_VRR_H__
+
+#include <linux/types.h>
+
+struct drm_connector;
+struct drm_connector_state;
+struct intel_atomic_state;
+struct intel_crtc;
+struct intel_crtc_state;
+struct intel_dp;
+struct intel_encoder;
+struct intel_crtc;
+
+bool intel_vrr_is_capable(struct drm_connector *connector);
+void intel_vrr_check_modeset(struct intel_atomic_state *state);
+void intel_vrr_compute_config(struct intel_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state);
+void intel_vrr_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state);
+void intel_vrr_send_push(const struct intel_crtc_state *crtc_state);
+void intel_vrr_disable(const struct intel_crtc_state *old_crtc_state);
+void intel_vrr_get_config(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state);
+int intel_vrr_vmax_vblank_start(const struct intel_crtc_state *crtc_state);
+int intel_vrr_vmin_vblank_start(const struct intel_crtc_state *crtc_state);
+
+#endif /* __INTEL_VRR_H__ */
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c
index d52f9c177908..f94025ec603a 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi.c
+++ b/drivers/gpu/drm/i915/display/vlv_dsi.c
@@ -812,10 +812,20 @@ static void intel_dsi_pre_enable(struct intel_atomic_state *state,
intel_dsi_prepare(encoder, pipe_config);
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_POWER_ON);
- intel_dsi_msleep(intel_dsi, intel_dsi->panel_on_delay);
- /* Deassert reset */
- intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DEASSERT_RESET);
+ /*
+ * Give the panel time to power-on and then deassert its reset.
+ * Depending on the VBT MIPI sequences version the deassert-seq
+ * may contain the necessary delay, intel_dsi_msleep() will skip
+ * the delay in that case. If there is no deassert-seq, then an
+ * unconditional msleep is used to give the panel time to power-on.
+ */
+ if (dev_priv->vbt.dsi.sequence[MIPI_SEQ_DEASSERT_RESET]) {
+ intel_dsi_msleep(intel_dsi, intel_dsi->panel_on_delay);
+ intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DEASSERT_RESET);
+ } else {
+ msleep(intel_dsi->panel_on_delay);
+ }
if (IS_GEMINILAKE(dev_priv)) {
glk_cold_boot = glk_dsi_enable_io(encoder);
diff --git a/drivers/gpu/drm/i915/dma_resv_utils.c b/drivers/gpu/drm/i915/dma_resv_utils.c
new file mode 100644
index 000000000000..9e508e7d4629
--- /dev/null
+++ b/drivers/gpu/drm/i915/dma_resv_utils.c
@@ -0,0 +1,17 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#include <linux/dma-resv.h>
+
+#include "dma_resv_utils.h"
+
+void dma_resv_prune(struct dma_resv *resv)
+{
+ if (dma_resv_trylock(resv)) {
+ if (dma_resv_test_signaled_rcu(resv, true))
+ dma_resv_add_excl_fence(resv, NULL);
+ dma_resv_unlock(resv);
+ }
+}
diff --git a/drivers/gpu/drm/i915/dma_resv_utils.h b/drivers/gpu/drm/i915/dma_resv_utils.h
new file mode 100644
index 000000000000..b9d8fb5f8367
--- /dev/null
+++ b/drivers/gpu/drm/i915/dma_resv_utils.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef DMA_RESV_UTILS_H
+#define DMA_RESV_UTILS_H
+
+struct dma_resv;
+
+void dma_resv_prune(struct dma_resv *resv);
+
+#endif /* DMA_RESV_UTILS_H */
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
index 4fd38101bb56..4d2f40cf237b 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
@@ -72,6 +72,8 @@
#include "gt/intel_context_param.h"
#include "gt/intel_engine_heartbeat.h"
#include "gt/intel_engine_user.h"
+#include "gt/intel_execlists_submission.h" /* virtual_engine */
+#include "gt/intel_gpu_commands.h"
#include "gt/intel_ring.h"
#include "i915_gem_context.h"
@@ -333,13 +335,12 @@ static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx)
return e;
}
-static void i915_gem_context_free(struct i915_gem_context *ctx)
+void i915_gem_context_release(struct kref *ref)
{
- GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
+ struct i915_gem_context *ctx = container_of(ref, typeof(*ctx), ref);
- spin_lock(&ctx->i915->gem.contexts.lock);
- list_del(&ctx->link);
- spin_unlock(&ctx->i915->gem.contexts.lock);
+ trace_i915_context_free(ctx);
+ GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
mutex_destroy(&ctx->engines_mutex);
mutex_destroy(&ctx->lut_mutex);
@@ -353,37 +354,6 @@ static void i915_gem_context_free(struct i915_gem_context *ctx)
kfree_rcu(ctx, rcu);
}
-static void contexts_free_all(struct llist_node *list)
-{
- struct i915_gem_context *ctx, *cn;
-
- llist_for_each_entry_safe(ctx, cn, list, free_link)
- i915_gem_context_free(ctx);
-}
-
-static void contexts_flush_free(struct i915_gem_contexts *gc)
-{
- contexts_free_all(llist_del_all(&gc->free_list));
-}
-
-static void contexts_free_worker(struct work_struct *work)
-{
- struct i915_gem_contexts *gc =
- container_of(work, typeof(*gc), free_work);
-
- contexts_flush_free(gc);
-}
-
-void i915_gem_context_release(struct kref *ref)
-{
- struct i915_gem_context *ctx = container_of(ref, typeof(*ctx), ref);
- struct i915_gem_contexts *gc = &ctx->i915->gem.contexts;
-
- trace_i915_context_free(ctx);
- if (llist_add(&ctx->free_link, &gc->free_list))
- schedule_work(&gc->free_work);
-}
-
static inline struct i915_gem_engines *
__context_engines_static(const struct i915_gem_context *ctx)
{
@@ -438,7 +408,7 @@ __active_engine(struct i915_request *rq, struct intel_engine_cs **active)
}
if (i915_request_is_active(rq)) {
- if (!i915_request_completed(rq))
+ if (!__i915_request_is_complete(rq))
*active = locked;
ret = true;
}
@@ -453,6 +423,9 @@ static struct intel_engine_cs *active_engine(struct intel_context *ce)
struct intel_engine_cs *engine = NULL;
struct i915_request *rq;
+ if (intel_context_has_inflight(ce))
+ return intel_context_inflight(ce);
+
if (!ce->timeline)
return NULL;
@@ -632,6 +605,10 @@ static void context_close(struct i915_gem_context *ctx)
*/
lut_close(ctx);
+ spin_lock(&ctx->i915->gem.contexts.lock);
+ list_del(&ctx->link);
+ spin_unlock(&ctx->i915->gem.contexts.lock);
+
mutex_unlock(&ctx->mutex);
/*
@@ -740,7 +717,8 @@ err_free:
}
static inline struct i915_gem_engines *
-__context_engines_await(const struct i915_gem_context *ctx)
+__context_engines_await(const struct i915_gem_context *ctx,
+ bool *user_engines)
{
struct i915_gem_engines *engines;
@@ -749,6 +727,10 @@ __context_engines_await(const struct i915_gem_context *ctx)
engines = rcu_dereference(ctx->engines);
GEM_BUG_ON(!engines);
+ if (user_engines)
+ *user_engines = i915_gem_context_user_engines(ctx);
+
+ /* successful await => strong mb */
if (unlikely(!i915_sw_fence_await(&engines->fence)))
continue;
@@ -772,7 +754,7 @@ context_apply_all(struct i915_gem_context *ctx,
struct intel_context *ce;
int err = 0;
- e = __context_engines_await(ctx);
+ e = __context_engines_await(ctx, NULL);
for_each_gem_engine(ce, e, it) {
err = fn(ce, data);
if (err)
@@ -849,9 +831,6 @@ i915_gem_create_context(struct drm_i915_private *i915, unsigned int flags)
!HAS_EXECLISTS(i915))
return ERR_PTR(-EINVAL);
- /* Reap the stale contexts */
- contexts_flush_free(&i915->gem.contexts);
-
ctx = __create_context(i915);
if (IS_ERR(ctx))
return ctx;
@@ -896,23 +875,11 @@ static void init_contexts(struct i915_gem_contexts *gc)
{
spin_lock_init(&gc->lock);
INIT_LIST_HEAD(&gc->list);
-
- INIT_WORK(&gc->free_work, contexts_free_worker);
- init_llist_head(&gc->free_list);
}
void i915_gem_init__contexts(struct drm_i915_private *i915)
{
init_contexts(&i915->gem.contexts);
- drm_dbg(&i915->drm, "%s context support initialized\n",
- DRIVER_CAPS(i915)->has_logical_contexts ?
- "logical" : "fake");
-}
-
-void i915_gem_driver_release__contexts(struct drm_i915_private *i915)
-{
- flush_work(&i915->gem.contexts.free_work);
- rcu_barrier(); /* and flush the left over RCU frees */
}
static int gem_context_register(struct i915_gem_context *ctx,
@@ -988,7 +955,6 @@ err:
void i915_gem_context_close(struct drm_file *file)
{
struct drm_i915_file_private *file_priv = file->driver_priv;
- struct drm_i915_private *i915 = file_priv->dev_priv;
struct i915_address_space *vm;
struct i915_gem_context *ctx;
unsigned long idx;
@@ -1000,8 +966,6 @@ void i915_gem_context_close(struct drm_file *file)
xa_for_each(&file_priv->vm_xa, idx, vm)
i915_vm_put(vm);
xa_destroy(&file_priv->vm_xa);
-
- contexts_flush_free(&i915->gem.contexts);
}
int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data,
@@ -1116,7 +1080,7 @@ static int context_barrier_task(struct i915_gem_context *ctx,
return err;
}
- e = __context_engines_await(ctx);
+ e = __context_engines_await(ctx, NULL);
if (!e) {
i915_active_release(&cb->base);
return -ENOENT;
@@ -1879,27 +1843,6 @@ replace:
return 0;
}
-static struct i915_gem_engines *
-__copy_engines(struct i915_gem_engines *e)
-{
- struct i915_gem_engines *copy;
- unsigned int n;
-
- copy = alloc_engines(e->num_engines);
- if (!copy)
- return ERR_PTR(-ENOMEM);
-
- for (n = 0; n < e->num_engines; n++) {
- if (e->engines[n])
- copy->engines[n] = intel_context_get(e->engines[n]);
- else
- copy->engines[n] = NULL;
- }
- copy->num_engines = n;
-
- return copy;
-}
-
static int
get_engines(struct i915_gem_context *ctx,
struct drm_i915_gem_context_param *args)
@@ -1907,19 +1850,17 @@ get_engines(struct i915_gem_context *ctx,
struct i915_context_param_engines __user *user;
struct i915_gem_engines *e;
size_t n, count, size;
+ bool user_engines;
int err = 0;
- err = mutex_lock_interruptible(&ctx->engines_mutex);
- if (err)
- return err;
+ e = __context_engines_await(ctx, &user_engines);
+ if (!e)
+ return -ENOENT;
- e = NULL;
- if (i915_gem_context_user_engines(ctx))
- e = __copy_engines(i915_gem_context_engines(ctx));
- mutex_unlock(&ctx->engines_mutex);
- if (IS_ERR_OR_NULL(e)) {
+ if (!user_engines) {
+ i915_sw_fence_complete(&e->fence);
args->size = 0;
- return PTR_ERR_OR_ZERO(e);
+ return 0;
}
count = e->num_engines;
@@ -1970,7 +1911,7 @@ get_engines(struct i915_gem_context *ctx,
args->size = size;
err_free:
- free_engines(e);
+ i915_sw_fence_complete(&e->fence);
return err;
}
@@ -2136,11 +2077,14 @@ static int copy_ring_size(struct intel_context *dst,
static int clone_engines(struct i915_gem_context *dst,
struct i915_gem_context *src)
{
- struct i915_gem_engines *e = i915_gem_context_lock_engines(src);
- struct i915_gem_engines *clone;
+ struct i915_gem_engines *clone, *e;
bool user_engines;
unsigned long n;
+ e = __context_engines_await(src, &user_engines);
+ if (!e)
+ return -ENOENT;
+
clone = alloc_engines(e->num_engines);
if (!clone)
goto err_unlock;
@@ -2182,9 +2126,7 @@ static int clone_engines(struct i915_gem_context *dst,
}
}
clone->num_engines = n;
-
- user_engines = i915_gem_context_user_engines(src);
- i915_gem_context_unlock_engines(src);
+ i915_sw_fence_complete(&e->fence);
/* Serialised by constructor */
engines_idle_release(dst, rcu_replace_pointer(dst->engines, clone, 1));
@@ -2195,7 +2137,7 @@ static int clone_engines(struct i915_gem_context *dst,
return 0;
err_unlock:
- i915_gem_context_unlock_engines(src);
+ i915_sw_fence_complete(&e->fence);
return -ENOMEM;
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.h b/drivers/gpu/drm/i915/gem/i915_gem_context.h
index a133f92bbedb..b5c908f3f4f2 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.h
@@ -110,7 +110,6 @@ i915_gem_context_clear_user_engines(struct i915_gem_context *ctx)
/* i915_gem_context.c */
void i915_gem_init__contexts(struct drm_i915_private *i915);
-void i915_gem_driver_release__contexts(struct drm_i915_private *i915);
int i915_gem_context_open(struct drm_i915_private *i915,
struct drm_file *file);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
index ae14ca24a11f..1449f54924e0 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
@@ -108,7 +108,6 @@ struct i915_gem_context {
/** link: place with &drm_i915_private.context_list */
struct list_head link;
- struct llist_node free_link;
/**
* @ref: reference count
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_create.c b/drivers/gpu/drm/i915/gem/i915_gem_create.c
new file mode 100644
index 000000000000..45d60e3d98e3
--- /dev/null
+++ b/drivers/gpu/drm/i915/gem/i915_gem_create.c
@@ -0,0 +1,113 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#include "gem/i915_gem_ioctls.h"
+#include "gem/i915_gem_region.h"
+
+#include "i915_drv.h"
+
+static int
+i915_gem_create(struct drm_file *file,
+ struct intel_memory_region *mr,
+ u64 *size_p,
+ u32 *handle_p)
+{
+ struct drm_i915_gem_object *obj;
+ u32 handle;
+ u64 size;
+ int ret;
+
+ GEM_BUG_ON(!is_power_of_2(mr->min_page_size));
+ size = round_up(*size_p, mr->min_page_size);
+ if (size == 0)
+ return -EINVAL;
+
+ /* For most of the ABI (e.g. mmap) we think in system pages */
+ GEM_BUG_ON(!IS_ALIGNED(size, PAGE_SIZE));
+
+ /* Allocate the new object */
+ obj = i915_gem_object_create_region(mr, size, 0);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ GEM_BUG_ON(size != obj->base.size);
+
+ ret = drm_gem_handle_create(file, &obj->base, &handle);
+ /* drop reference from allocate - handle holds it now */
+ i915_gem_object_put(obj);
+ if (ret)
+ return ret;
+
+ *handle_p = handle;
+ *size_p = size;
+ return 0;
+}
+
+int
+i915_gem_dumb_create(struct drm_file *file,
+ struct drm_device *dev,
+ struct drm_mode_create_dumb *args)
+{
+ enum intel_memory_type mem_type;
+ int cpp = DIV_ROUND_UP(args->bpp, 8);
+ u32 format;
+
+ switch (cpp) {
+ case 1:
+ format = DRM_FORMAT_C8;
+ break;
+ case 2:
+ format = DRM_FORMAT_RGB565;
+ break;
+ case 4:
+ format = DRM_FORMAT_XRGB8888;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* have to work out size/pitch and return them */
+ args->pitch = ALIGN(args->width * cpp, 64);
+
+ /* align stride to page size so that we can remap */
+ if (args->pitch > intel_plane_fb_max_stride(to_i915(dev), format,
+ DRM_FORMAT_MOD_LINEAR))
+ args->pitch = ALIGN(args->pitch, 4096);
+
+ if (args->pitch < args->width)
+ return -EINVAL;
+
+ args->size = mul_u32_u32(args->pitch, args->height);
+
+ mem_type = INTEL_MEMORY_SYSTEM;
+ if (HAS_LMEM(to_i915(dev)))
+ mem_type = INTEL_MEMORY_LOCAL;
+
+ return i915_gem_create(file,
+ intel_memory_region_by_type(to_i915(dev),
+ mem_type),
+ &args->size, &args->handle);
+}
+
+/**
+ * Creates a new mm object and returns a handle to it.
+ * @dev: drm device pointer
+ * @data: ioctl data blob
+ * @file: drm file pointer
+ */
+int
+i915_gem_create_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_i915_private *i915 = to_i915(dev);
+ struct drm_i915_gem_create *args = data;
+
+ i915_gem_flush_free_objects(i915);
+
+ return i915_gem_create(file,
+ intel_memory_region_by_type(i915,
+ INTEL_MEMORY_SYSTEM),
+ &args->size, &args->handle);
+}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_domain.c b/drivers/gpu/drm/i915/gem/i915_gem_domain.c
index fcce6909f201..36f54cedaaeb 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_domain.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_domain.c
@@ -5,6 +5,7 @@
*/
#include "display/intel_frontbuffer.h"
+#include "gt/intel_gt.h"
#include "i915_drv.h"
#include "i915_gem_clflush.h"
@@ -15,13 +16,58 @@
#include "i915_gem_lmem.h"
#include "i915_gem_mman.h"
+static bool gpu_write_needs_clflush(struct drm_i915_gem_object *obj)
+{
+ return !(obj->cache_level == I915_CACHE_NONE ||
+ obj->cache_level == I915_CACHE_WT);
+}
+
+static void
+flush_write_domain(struct drm_i915_gem_object *obj, unsigned int flush_domains)
+{
+ struct i915_vma *vma;
+
+ assert_object_held(obj);
+
+ if (!(obj->write_domain & flush_domains))
+ return;
+
+ switch (obj->write_domain) {
+ case I915_GEM_DOMAIN_GTT:
+ spin_lock(&obj->vma.lock);
+ for_each_ggtt_vma(vma, obj) {
+ if (i915_vma_unset_ggtt_write(vma))
+ intel_gt_flush_ggtt_writes(vma->vm->gt);
+ }
+ spin_unlock(&obj->vma.lock);
+
+ i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
+ break;
+
+ case I915_GEM_DOMAIN_WC:
+ wmb();
+ break;
+
+ case I915_GEM_DOMAIN_CPU:
+ i915_gem_clflush_object(obj, I915_CLFLUSH_SYNC);
+ break;
+
+ case I915_GEM_DOMAIN_RENDER:
+ if (gpu_write_needs_clflush(obj))
+ obj->cache_dirty = true;
+ break;
+ }
+
+ obj->write_domain = 0;
+}
+
static void __i915_gem_object_flush_for_display(struct drm_i915_gem_object *obj)
{
/*
* We manually flush the CPU domain so that we can override and
* force the flush for the display, and perform it asyncrhonously.
*/
- i915_gem_object_flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
+ flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
if (obj->cache_dirty)
i915_gem_clflush_object(obj, I915_CLFLUSH_FORCE);
obj->write_domain = 0;
@@ -80,7 +126,7 @@ i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write)
if (ret)
return ret;
- i915_gem_object_flush_write_domain(obj, ~I915_GEM_DOMAIN_WC);
+ flush_write_domain(obj, ~I915_GEM_DOMAIN_WC);
/* Serialise direct access to this object with the barriers for
* coherent writes from the GPU, by effectively invalidating the
@@ -141,7 +187,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
if (ret)
return ret;
- i915_gem_object_flush_write_domain(obj, ~I915_GEM_DOMAIN_GTT);
+ flush_write_domain(obj, ~I915_GEM_DOMAIN_GTT);
/* Serialise direct access to this object with the barriers for
* coherent writes from the GPU, by effectively invalidating the
@@ -370,6 +416,7 @@ retry:
}
vma->display_alignment = max_t(u64, vma->display_alignment, alignment);
+ i915_vma_mark_scanout(vma);
i915_gem_object_flush_if_display_locked(obj);
@@ -387,48 +434,6 @@ err:
return vma;
}
-static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
-{
- struct drm_i915_private *i915 = to_i915(obj->base.dev);
- struct i915_vma *vma;
-
- if (list_empty(&obj->vma.list))
- return;
-
- mutex_lock(&i915->ggtt.vm.mutex);
- spin_lock(&obj->vma.lock);
- for_each_ggtt_vma(vma, obj) {
- if (!drm_mm_node_allocated(&vma->node))
- continue;
-
- GEM_BUG_ON(vma->vm != &i915->ggtt.vm);
- list_move_tail(&vma->vm_link, &vma->vm->bound_list);
- }
- spin_unlock(&obj->vma.lock);
- mutex_unlock(&i915->ggtt.vm.mutex);
-
- if (i915_gem_object_is_shrinkable(obj)) {
- unsigned long flags;
-
- spin_lock_irqsave(&i915->mm.obj_lock, flags);
-
- if (obj->mm.madv == I915_MADV_WILLNEED &&
- !atomic_read(&obj->mm.shrink_pin))
- list_move_tail(&obj->mm.link, &i915->mm.shrink_list);
-
- spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
- }
-}
-
-void
-i915_gem_object_unpin_from_display_plane(struct i915_vma *vma)
-{
- /* Bump the LRU to try and avoid premature eviction whilst flipping */
- i915_gem_object_bump_inactive_ggtt(vma->obj);
-
- i915_vma_unpin(vma);
-}
-
/**
* Moves a single object to the CPU read, and possibly write domain.
* @obj: object to act on
@@ -451,7 +456,7 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
if (ret)
return ret;
- i915_gem_object_flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
+ flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
/* Flush the CPU cache if it's still invalid. */
if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) {
@@ -569,9 +574,6 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
else
err = i915_gem_object_set_to_cpu_domain(obj, write_domain);
- /* And bump the LRU for this access */
- i915_gem_object_bump_inactive_ggtt(obj);
-
i915_gem_object_unlock(obj);
if (write_domain)
@@ -619,7 +621,7 @@ int i915_gem_object_prepare_read(struct drm_i915_gem_object *obj,
goto out;
}
- i915_gem_object_flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
+ flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
/* If we're not in the cpu read domain, set ourself into the gtt
* read domain and manually flush cachelines (if required). This
@@ -670,7 +672,7 @@ int i915_gem_object_prepare_write(struct drm_i915_gem_object *obj,
goto out;
}
- i915_gem_object_flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
+ flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
/* If we're not in the cpu write domain, set ourself into the
* gtt write domain and manually flush cachelines (as required).
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index bcc80f428172..d70ca36f74f6 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -15,6 +15,7 @@
#include "gem/i915_gem_ioctls.h"
#include "gt/intel_context.h"
+#include "gt/intel_gpu_commands.h"
#include "gt/intel_gt.h"
#include "gt/intel_gt_buffer_pool.h"
#include "gt/intel_gt_pm.h"
@@ -534,8 +535,6 @@ eb_add_vma(struct i915_execbuffer *eb,
struct drm_i915_gem_exec_object2 *entry = &eb->exec[i];
struct eb_vma *ev = &eb->vma[i];
- GEM_BUG_ON(i915_vma_is_closed(vma));
-
ev->vma = vma;
ev->exec = entry;
ev->flags = entry->flags;
@@ -1046,7 +1045,7 @@ static void reloc_gpu_flush(struct i915_execbuffer *eb, struct reloc_cache *cach
GEM_BUG_ON(cache->rq_size >= obj->base.size / sizeof(u32));
cache->rq_cmd[cache->rq_size] = MI_BATCH_BUFFER_END;
- __i915_gem_object_flush_map(obj, 0, sizeof(u32) * (cache->rq_size + 1));
+ i915_gem_object_flush_map(obj);
i915_gem_object_unpin_map(obj);
intel_gt_chipset_flush(cache->rq->engine->gt);
@@ -1277,7 +1276,10 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
int err;
if (!pool) {
- pool = intel_gt_get_buffer_pool(engine->gt, PAGE_SIZE);
+ pool = intel_gt_get_buffer_pool(engine->gt, PAGE_SIZE,
+ cache->has_llc ?
+ I915_MAP_WB :
+ I915_MAP_WC);
if (IS_ERR(pool))
return PTR_ERR(pool);
}
@@ -1287,15 +1289,14 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
if (err)
goto err_pool;
- cmd = i915_gem_object_pin_map(pool->obj,
- cache->has_llc ?
- I915_MAP_FORCE_WB :
- I915_MAP_FORCE_WC);
+ cmd = i915_gem_object_pin_map(pool->obj, pool->type);
if (IS_ERR(cmd)) {
err = PTR_ERR(cmd);
goto err_pool;
}
+ memset32(cmd, 0, pool->obj->base.size / sizeof(u32));
+
batch = i915_vma_instance(pool->obj, vma->vm, NULL);
if (IS_ERR(batch)) {
err = PTR_ERR(batch);
@@ -2457,7 +2458,8 @@ static int eb_parse(struct i915_execbuffer *eb)
return -EINVAL;
if (!pool) {
- pool = intel_gt_get_buffer_pool(eb->engine->gt, len);
+ pool = intel_gt_get_buffer_pool(eb->engine->gt, len,
+ I915_MAP_WB);
if (IS_ERR(pool))
return PTR_ERR(pool);
eb->batch_pool = pool;
@@ -2533,6 +2535,9 @@ static int eb_submit(struct i915_execbuffer *eb, struct i915_vma *batch)
{
int err;
+ if (intel_context_nopreempt(eb->context))
+ __set_bit(I915_FENCE_FLAG_NOPREEMPT, &eb->request->fence.flags);
+
err = eb_move_to_gpu(eb);
if (err)
return err;
@@ -2573,15 +2578,12 @@ static int eb_submit(struct i915_execbuffer *eb, struct i915_vma *batch)
return err;
}
- if (intel_context_nopreempt(eb->context))
- __set_bit(I915_FENCE_FLAG_NOPREEMPT, &eb->request->fence.flags);
-
return 0;
}
static int num_vcs_engines(const struct drm_i915_private *i915)
{
- return hweight64(VDBOX_MASK(&i915->gt));
+ return hweight_long(VDBOX_MASK(&i915->gt));
}
/*
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_lmem.c b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
index 932ee21e6609..194f35342710 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
@@ -31,18 +31,13 @@ i915_gem_object_create_lmem(struct drm_i915_private *i915,
size, flags);
}
-struct drm_i915_gem_object *
-__i915_gem_lmem_object_create(struct intel_memory_region *mem,
- resource_size_t size,
- unsigned int flags)
+int __i915_gem_lmem_object_init(struct intel_memory_region *mem,
+ struct drm_i915_gem_object *obj,
+ resource_size_t size,
+ unsigned int flags)
{
static struct lock_class_key lock_class;
struct drm_i915_private *i915 = mem->i915;
- struct drm_i915_gem_object *obj;
-
- obj = i915_gem_object_alloc();
- if (!obj)
- return ERR_PTR(-ENOMEM);
drm_gem_private_object_init(&i915->drm, &obj->base, size);
i915_gem_object_init(obj, &i915_gem_lmem_obj_ops, &lock_class);
@@ -53,5 +48,5 @@ __i915_gem_lmem_object_create(struct intel_memory_region *mem,
i915_gem_object_init_memory_region(obj, mem, flags);
- return obj;
+ return 0;
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_lmem.h b/drivers/gpu/drm/i915/gem/i915_gem_lmem.h
index fc3f15580fe3..036d53c01de9 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_lmem.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_lmem.h
@@ -21,9 +21,9 @@ i915_gem_object_create_lmem(struct drm_i915_private *i915,
resource_size_t size,
unsigned int flags);
-struct drm_i915_gem_object *
-__i915_gem_lmem_object_create(struct intel_memory_region *mem,
- resource_size_t size,
- unsigned int flags);
+int __i915_gem_lmem_object_init(struct intel_memory_region *mem,
+ struct drm_i915_gem_object *obj,
+ resource_size_t size,
+ unsigned int flags);
#endif /* !__I915_GEM_LMEM_H */
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c
index 00d24000b5e8..70f798405f7f 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
@@ -25,13 +25,13 @@
#include <linux/sched/mm.h>
#include "display/intel_frontbuffer.h"
-#include "gt/intel_gt.h"
#include "i915_drv.h"
#include "i915_gem_clflush.h"
#include "i915_gem_context.h"
#include "i915_gem_mman.h"
#include "i915_gem_object.h"
#include "i915_globals.h"
+#include "i915_memcpy.h"
#include "i915_trace.h"
static struct i915_global_object {
@@ -313,52 +313,6 @@ static void i915_gem_free_object(struct drm_gem_object *gem_obj)
queue_work(i915->wq, &i915->mm.free_work);
}
-static bool gpu_write_needs_clflush(struct drm_i915_gem_object *obj)
-{
- return !(obj->cache_level == I915_CACHE_NONE ||
- obj->cache_level == I915_CACHE_WT);
-}
-
-void
-i915_gem_object_flush_write_domain(struct drm_i915_gem_object *obj,
- unsigned int flush_domains)
-{
- struct i915_vma *vma;
-
- assert_object_held(obj);
-
- if (!(obj->write_domain & flush_domains))
- return;
-
- switch (obj->write_domain) {
- case I915_GEM_DOMAIN_GTT:
- spin_lock(&obj->vma.lock);
- for_each_ggtt_vma(vma, obj) {
- if (i915_vma_unset_ggtt_write(vma))
- intel_gt_flush_ggtt_writes(vma->vm->gt);
- }
- spin_unlock(&obj->vma.lock);
-
- i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
- break;
-
- case I915_GEM_DOMAIN_WC:
- wmb();
- break;
-
- case I915_GEM_DOMAIN_CPU:
- i915_gem_clflush_object(obj, I915_CLFLUSH_SYNC);
- break;
-
- case I915_GEM_DOMAIN_RENDER:
- if (gpu_write_needs_clflush(obj))
- obj->cache_dirty = true;
- break;
- }
-
- obj->write_domain = 0;
-}
-
void __i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj,
enum fb_op_origin origin)
{
@@ -383,6 +337,70 @@ void __i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object *obj,
}
}
+static void
+i915_gem_object_read_from_page_kmap(struct drm_i915_gem_object *obj, u64 offset, void *dst, int size)
+{
+ void *src_map;
+ void *src_ptr;
+
+ src_map = kmap_atomic(i915_gem_object_get_page(obj, offset >> PAGE_SHIFT));
+
+ src_ptr = src_map + offset_in_page(offset);
+ if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ))
+ drm_clflush_virt_range(src_ptr, size);
+ memcpy(dst, src_ptr, size);
+
+ kunmap_atomic(src_map);
+}
+
+static void
+i915_gem_object_read_from_page_iomap(struct drm_i915_gem_object *obj, u64 offset, void *dst, int size)
+{
+ void __iomem *src_map;
+ void __iomem *src_ptr;
+ dma_addr_t dma = i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT);
+
+ src_map = io_mapping_map_wc(&obj->mm.region->iomap,
+ dma - obj->mm.region->region.start,
+ PAGE_SIZE);
+
+ src_ptr = src_map + offset_in_page(offset);
+ if (!i915_memcpy_from_wc(dst, (void __force *)src_ptr, size))
+ memcpy_fromio(dst, src_ptr, size);
+
+ io_mapping_unmap(src_map);
+}
+
+/**
+ * i915_gem_object_read_from_page - read data from the page of a GEM object
+ * @obj: GEM object to read from
+ * @offset: offset within the object
+ * @dst: buffer to store the read data
+ * @size: size to read
+ *
+ * Reads data from @obj at the specified offset. The requested region to read
+ * from can't cross a page boundary. The caller must ensure that @obj pages
+ * are pinned and that @obj is synced wrt. any related writes.
+ *
+ * Returns 0 on success or -ENODEV if the type of @obj's backing store is
+ * unsupported.
+ */
+int i915_gem_object_read_from_page(struct drm_i915_gem_object *obj, u64 offset, void *dst, int size)
+{
+ GEM_BUG_ON(offset >= obj->base.size);
+ GEM_BUG_ON(offset_in_page(offset) > PAGE_SIZE - size);
+ GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
+
+ if (i915_gem_object_has_struct_page(obj))
+ i915_gem_object_read_from_page_kmap(obj, offset, dst, size);
+ else if (i915_gem_object_has_iomem(obj))
+ i915_gem_object_read_from_page_iomap(obj, offset, dst, size);
+ else
+ return -ENODEV;
+
+ return 0;
+}
+
void i915_gem_init__objects(struct drm_i915_private *i915)
{
INIT_WORK(&i915->mm.free_work, __i915_gem_free_work);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h
index be14486f63a7..d0ae834d787a 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
@@ -188,6 +188,24 @@ i915_gem_object_set_volatile(struct drm_i915_gem_object *obj)
}
static inline bool
+i915_gem_object_has_tiling_quirk(struct drm_i915_gem_object *obj)
+{
+ return test_bit(I915_TILING_QUIRK_BIT, &obj->flags);
+}
+
+static inline void
+i915_gem_object_set_tiling_quirk(struct drm_i915_gem_object *obj)
+{
+ set_bit(I915_TILING_QUIRK_BIT, &obj->flags);
+}
+
+static inline void
+i915_gem_object_clear_tiling_quirk(struct drm_i915_gem_object *obj)
+{
+ clear_bit(I915_TILING_QUIRK_BIT, &obj->flags);
+}
+
+static inline bool
i915_gem_object_type_has(const struct drm_i915_gem_object *obj,
unsigned long flags)
{
@@ -201,6 +219,12 @@ i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj)
}
static inline bool
+i915_gem_object_has_iomem(const struct drm_i915_gem_object *obj)
+{
+ return i915_gem_object_type_has(obj, I915_GEM_OBJECT_HAS_IOMEM);
+}
+
+static inline bool
i915_gem_object_is_shrinkable(const struct drm_i915_gem_object *obj)
{
return i915_gem_object_type_has(obj, I915_GEM_OBJECT_IS_SHRINKABLE);
@@ -384,14 +408,6 @@ int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
void i915_gem_object_writeback(struct drm_i915_gem_object *obj);
-enum i915_map_type {
- I915_MAP_WB = 0,
- I915_MAP_WC,
-#define I915_MAP_OVERRIDE BIT(31)
- I915_MAP_FORCE_WB = I915_MAP_WB | I915_MAP_OVERRIDE,
- I915_MAP_FORCE_WC = I915_MAP_WC | I915_MAP_OVERRIDE,
-};
-
/**
* i915_gem_object_pin_map - return a contiguous mapping of the entire object
* @obj: the object to map into kernel address space
@@ -435,10 +451,6 @@ static inline void i915_gem_object_unpin_map(struct drm_i915_gem_object *obj)
void __i915_gem_object_release_map(struct drm_i915_gem_object *obj);
-void
-i915_gem_object_flush_write_domain(struct drm_i915_gem_object *obj,
- unsigned int flush_domains);
-
int i915_gem_object_prepare_read(struct drm_i915_gem_object *obj,
unsigned int *needs_clflush);
int i915_gem_object_prepare_write(struct drm_i915_gem_object *obj,
@@ -486,7 +498,6 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
u32 alignment,
const struct i915_ggtt_view *view,
unsigned int flags);
-void i915_gem_object_unpin_from_display_plane(struct i915_vma *vma);
void i915_gem_object_make_unshrinkable(struct drm_i915_gem_object *obj);
void i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj);
@@ -512,6 +523,9 @@ static inline void __start_cpu_write(struct drm_i915_gem_object *obj)
obj->cache_dirty = true;
}
+void i915_gem_fence_wait_priority(struct dma_fence *fence,
+ const struct i915_sched_attr *attr);
+
int i915_gem_object_wait(struct drm_i915_gem_object *obj,
unsigned int flags,
long timeout);
@@ -540,4 +554,8 @@ i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object *obj,
__i915_gem_object_invalidate_frontbuffer(obj, origin);
}
+int i915_gem_object_read_from_page(struct drm_i915_gem_object *obj, u64 offset, void *dst, int size);
+
+bool i915_gem_object_is_shmem(const struct drm_i915_gem_object *obj);
+
#endif
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c b/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c
index aee7ad3cc3c6..d6dac21fce0b 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c
@@ -6,6 +6,7 @@
#include "i915_drv.h"
#include "gt/intel_context.h"
#include "gt/intel_engine_pm.h"
+#include "gt/intel_gpu_commands.h"
#include "gt/intel_gt.h"
#include "gt/intel_gt_buffer_pool.h"
#include "gt/intel_ring.h"
@@ -34,7 +35,7 @@ struct i915_vma *intel_emit_vma_fill_blt(struct intel_context *ce,
count = div_u64(round_up(vma->size, block_size), block_size);
size = (1 + 8 * count) * sizeof(u32);
size = round_up(size, PAGE_SIZE);
- pool = intel_gt_get_buffer_pool(ce->engine->gt, size);
+ pool = intel_gt_get_buffer_pool(ce->engine->gt, size, I915_MAP_WC);
if (IS_ERR(pool)) {
err = PTR_ERR(pool);
goto out_pm;
@@ -54,7 +55,7 @@ struct i915_vma *intel_emit_vma_fill_blt(struct intel_context *ce,
if (unlikely(err))
goto out_put;
- cmd = i915_gem_object_pin_map(pool->obj, I915_MAP_WC);
+ cmd = i915_gem_object_pin_map(pool->obj, pool->type);
if (IS_ERR(cmd)) {
err = PTR_ERR(cmd);
goto out_unpin;
@@ -256,7 +257,7 @@ struct i915_vma *intel_emit_vma_copy_blt(struct intel_context *ce,
count = div_u64(round_up(dst->size, block_size), block_size);
size = (1 + 11 * count) * sizeof(u32);
size = round_up(size, PAGE_SIZE);
- pool = intel_gt_get_buffer_pool(ce->engine->gt, size);
+ pool = intel_gt_get_buffer_pool(ce->engine->gt, size, I915_MAP_WC);
if (IS_ERR(pool)) {
err = PTR_ERR(pool);
goto out_pm;
@@ -276,7 +277,7 @@ struct i915_vma *intel_emit_vma_copy_blt(struct intel_context *ce,
if (unlikely(err))
goto out_put;
- cmd = i915_gem_object_pin_map(pool->obj, I915_MAP_WC);
+ cmd = i915_gem_object_pin_map(pool->obj, pool->type);
if (IS_ERR(cmd)) {
err = PTR_ERR(cmd);
goto out_unpin;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
index e2d9b7e1e152..0438e00d4ca7 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
@@ -67,6 +67,14 @@ struct drm_i915_gem_object_ops {
const char *name; /* friendly name for debug, e.g. lockdep classes */
};
+enum i915_map_type {
+ I915_MAP_WB = 0,
+ I915_MAP_WC,
+#define I915_MAP_OVERRIDE BIT(31)
+ I915_MAP_FORCE_WB = I915_MAP_WB | I915_MAP_OVERRIDE,
+ I915_MAP_FORCE_WC = I915_MAP_WC | I915_MAP_OVERRIDE,
+};
+
enum i915_mmap_type {
I915_MMAP_TYPE_GTT = 0,
I915_MMAP_TYPE_WC,
@@ -142,8 +150,6 @@ struct drm_i915_gem_object {
*/
struct list_head obj_link;
- /** Stolen memory for this object, instead of being backed by shmem. */
- struct drm_mm_node *stolen;
union {
struct rcu_head rcu;
struct llist_node freed;
@@ -167,6 +173,7 @@ struct drm_i915_gem_object {
#define I915_BO_ALLOC_VOLATILE BIT(1)
#define I915_BO_ALLOC_FLAGS (I915_BO_ALLOC_CONTIGUOUS | I915_BO_ALLOC_VOLATILE)
#define I915_BO_READONLY BIT(2)
+#define I915_TILING_QUIRK_BIT 3 /* unknown swizzling; do not release! */
/*
* Is the object to be mapped as read-only to the GPU
@@ -275,12 +282,6 @@ struct drm_i915_gem_object {
* pages were last acquired.
*/
bool dirty:1;
-
- /**
- * This is set if the object has been pinned due to unknown
- * swizzling.
- */
- bool quirked:1;
} mm;
/** Record of address bit 17 of each page at last unbind. */
@@ -295,6 +296,8 @@ struct drm_i915_gem_object {
struct work_struct *work;
} userptr;
+ struct drm_mm_node *stolen;
+
unsigned long scratch;
u64 encode;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
index e2c7b2a7895f..43028f3539a6 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
@@ -16,6 +16,7 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
unsigned long supported = INTEL_INFO(i915)->page_sizes;
+ bool shrinkable;
int i;
lockdep_assert_held(&obj->mm.lock);
@@ -38,13 +39,6 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
obj->mm.pages = pages;
- if (i915_gem_object_is_tiled(obj) &&
- i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
- GEM_BUG_ON(obj->mm.quirked);
- __i915_gem_object_pin_pages(obj);
- obj->mm.quirked = true;
- }
-
GEM_BUG_ON(!sg_page_sizes);
obj->mm.page_sizes.phys = sg_page_sizes;
@@ -63,7 +57,16 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
}
GEM_BUG_ON(!HAS_PAGE_SIZES(i915, obj->mm.page_sizes.sg));
- if (i915_gem_object_is_shrinkable(obj)) {
+ shrinkable = i915_gem_object_is_shrinkable(obj);
+
+ if (i915_gem_object_is_tiled(obj) &&
+ i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
+ GEM_BUG_ON(i915_gem_object_has_tiling_quirk(obj));
+ i915_gem_object_set_tiling_quirk(obj);
+ shrinkable = false;
+ }
+
+ if (shrinkable) {
struct list_head *list;
unsigned long flags;
@@ -238,7 +241,7 @@ unlock:
/* The 'mapping' part of i915_gem_object_pin_map() below */
static void *i915_gem_object_map_page(struct drm_i915_gem_object *obj,
- enum i915_map_type type)
+ enum i915_map_type type)
{
unsigned long n_pages = obj->base.size >> PAGE_SHIFT, i;
struct page *stack[32], **pages = stack, *page;
@@ -281,7 +284,7 @@ static void *i915_gem_object_map_page(struct drm_i915_gem_object *obj,
/* Too big for stack -- allocate temporary array instead */
pages = kvmalloc_array(n_pages, sizeof(*pages), GFP_KERNEL);
if (!pages)
- return NULL;
+ return ERR_PTR(-ENOMEM);
}
i = 0;
@@ -290,11 +293,12 @@ static void *i915_gem_object_map_page(struct drm_i915_gem_object *obj,
vaddr = vmap(pages, n_pages, 0, pgprot);
if (pages != stack)
kvfree(pages);
- return vaddr;
+
+ return vaddr ?: ERR_PTR(-ENOMEM);
}
static void *i915_gem_object_map_pfn(struct drm_i915_gem_object *obj,
- enum i915_map_type type)
+ enum i915_map_type type)
{
resource_size_t iomap = obj->mm.region->iomap.base -
obj->mm.region->region.start;
@@ -305,13 +309,13 @@ static void *i915_gem_object_map_pfn(struct drm_i915_gem_object *obj,
void *vaddr;
if (type != I915_MAP_WC)
- return NULL;
+ return ERR_PTR(-ENODEV);
if (n_pfn > ARRAY_SIZE(stack)) {
/* Too big for stack -- allocate temporary array instead */
pfns = kvmalloc_array(n_pfn, sizeof(*pfns), GFP_KERNEL);
if (!pfns)
- return NULL;
+ return ERR_PTR(-ENOMEM);
}
i = 0;
@@ -320,7 +324,8 @@ static void *i915_gem_object_map_pfn(struct drm_i915_gem_object *obj,
vaddr = vmap_pfn(pfns, n_pfn, pgprot_writecombine(PAGE_KERNEL_IO));
if (pfns != stack)
kvfree(pfns);
- return vaddr;
+
+ return vaddr ?: ERR_PTR(-ENOMEM);
}
/* get, pin, and map the pages of the object into kernel space */
@@ -349,8 +354,10 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
err = ____i915_gem_object_get_pages(obj);
- if (err)
- goto err_unlock;
+ if (err) {
+ ptr = ERR_PTR(err);
+ goto out_unlock;
+ }
smp_mb__before_atomic();
}
@@ -362,7 +369,7 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
ptr = page_unpack_bits(obj->mm.mapping, &has_type);
if (ptr && has_type != type) {
if (pinned) {
- err = -EBUSY;
+ ptr = ERR_PTR(-EBUSY);
goto err_unpin;
}
@@ -374,15 +381,13 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
if (!ptr) {
if (GEM_WARN_ON(type == I915_MAP_WC &&
!static_cpu_has(X86_FEATURE_PAT)))
- ptr = NULL;
+ ptr = ERR_PTR(-ENODEV);
else if (i915_gem_object_has_struct_page(obj))
ptr = i915_gem_object_map_page(obj, type);
else
ptr = i915_gem_object_map_pfn(obj, type);
- if (!ptr) {
- err = -ENOMEM;
+ if (IS_ERR(ptr))
goto err_unpin;
- }
obj->mm.mapping = page_pack_bits(ptr, type);
}
@@ -393,8 +398,6 @@ out_unlock:
err_unpin:
atomic_dec(&obj->mm.pages_pin_count);
-err_unlock:
- ptr = ERR_PTR(err);
goto out_unlock;
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_phys.c b/drivers/gpu/drm/i915/gem/i915_gem_phys.c
index 3a4dfe2ef1da..3c0b157e2a35 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_phys.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_phys.c
@@ -213,7 +213,7 @@ int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)
if (obj->ops == &i915_gem_phys_ops)
return 0;
- if (obj->ops != &i915_gem_shmem_ops)
+ if (!i915_gem_object_is_shmem(obj))
return -EINVAL;
err = i915_gem_object_unbind(obj, I915_GEM_OBJECT_UNBIND_ACTIVE);
@@ -227,7 +227,7 @@ int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)
goto err_unlock;
}
- if (obj->mm.quirked) {
+ if (i915_gem_object_has_tiling_quirk(obj)) {
err = -EFAULT;
goto err_unlock;
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
index 40d3e40500fa..215766cc22bf 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
@@ -11,6 +11,13 @@
#include "i915_drv.h"
+#if defined(CONFIG_X86)
+#include <asm/smp.h>
+#else
+#define wbinvd_on_all_cpus() \
+ pr_warn(DRIVER_NAME ": Missing cache flush in %s\n", __func__)
+#endif
+
void i915_gem_suspend(struct drm_i915_private *i915)
{
GEM_TRACE("%s\n", dev_name(i915->drm.dev));
@@ -32,13 +39,6 @@ void i915_gem_suspend(struct drm_i915_private *i915)
i915_gem_drain_freed_objects(i915);
}
-static struct drm_i915_gem_object *first_mm_object(struct list_head *list)
-{
- return list_first_entry_or_null(list,
- struct drm_i915_gem_object,
- mm.link);
-}
-
void i915_gem_suspend_late(struct drm_i915_private *i915)
{
struct drm_i915_gem_object *obj;
@@ -48,6 +48,7 @@ void i915_gem_suspend_late(struct drm_i915_private *i915)
NULL
}, **phase;
unsigned long flags;
+ bool flush = false;
/*
* Neither the BIOS, ourselves or any other kernel
@@ -73,29 +74,15 @@ void i915_gem_suspend_late(struct drm_i915_private *i915)
spin_lock_irqsave(&i915->mm.obj_lock, flags);
for (phase = phases; *phase; phase++) {
- LIST_HEAD(keep);
-
- while ((obj = first_mm_object(*phase))) {
- list_move_tail(&obj->mm.link, &keep);
-
- /* Beware the background _i915_gem_free_objects */
- if (!kref_get_unless_zero(&obj->base.refcount))
- continue;
-
- spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
-
- i915_gem_object_lock(obj, NULL);
- drm_WARN_ON(&i915->drm,
- i915_gem_object_set_to_gtt_domain(obj, false));
- i915_gem_object_unlock(obj);
- i915_gem_object_put(obj);
-
- spin_lock_irqsave(&i915->mm.obj_lock, flags);
+ list_for_each_entry(obj, *phase, mm.link) {
+ if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ))
+ flush |= (obj->read_domains & I915_GEM_DOMAIN_CPU) == 0;
+ __start_cpu_write(obj); /* presume auto-hibernate */
}
-
- list_splice_tail(&keep, *phase);
}
spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
+ if (flush)
+ wbinvd_on_all_cpus();
}
void i915_gem_resume(struct drm_i915_private *i915)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_region.c b/drivers/gpu/drm/i915/gem/i915_gem_region.c
index 1515384d7e0e..3e3dad22a683 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_region.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_region.c
@@ -22,6 +22,7 @@ i915_gem_object_put_pages_buddy(struct drm_i915_gem_object *obj,
int
i915_gem_object_get_pages_buddy(struct drm_i915_gem_object *obj)
{
+ const u64 max_segment = i915_sg_segment_size();
struct intel_memory_region *mem = obj->mm.region;
struct list_head *blocks = &obj->mm.blocks;
resource_size_t size = obj->base.size;
@@ -37,7 +38,7 @@ i915_gem_object_get_pages_buddy(struct drm_i915_gem_object *obj)
if (!st)
return -ENOMEM;
- if (sg_alloc_table(st, size >> ilog2(mem->mm.chunk_size), GFP_KERNEL)) {
+ if (sg_alloc_table(st, size >> PAGE_SHIFT, GFP_KERNEL)) {
kfree(st);
return -ENOMEM;
}
@@ -64,27 +65,30 @@ i915_gem_object_get_pages_buddy(struct drm_i915_gem_object *obj)
i915_buddy_block_size(&mem->mm, block));
offset = i915_buddy_block_offset(block);
- GEM_BUG_ON(overflows_type(block_size, sg->length));
+ while (block_size) {
+ u64 len;
- if (offset != prev_end ||
- add_overflows_t(typeof(sg->length), sg->length, block_size)) {
- if (st->nents) {
- sg_page_sizes |= sg->length;
- sg = __sg_next(sg);
+ if (offset != prev_end || sg->length >= max_segment) {
+ if (st->nents) {
+ sg_page_sizes |= sg->length;
+ sg = __sg_next(sg);
+ }
+
+ sg_dma_address(sg) = mem->region.start + offset;
+ sg_dma_len(sg) = 0;
+ sg->length = 0;
+ st->nents++;
}
- sg_dma_address(sg) = mem->region.start + offset;
- sg_dma_len(sg) = block_size;
+ len = min(block_size, max_segment - sg->length);
+ sg->length += len;
+ sg_dma_len(sg) += len;
- sg->length = block_size;
+ offset += len;
+ block_size -= len;
- st->nents++;
- } else {
- sg->length += block_size;
- sg_dma_len(sg) += block_size;
+ prev_end = offset;
}
-
- prev_end = offset + block_size;
}
sg_page_sizes |= sg->length;
@@ -139,6 +143,7 @@ i915_gem_object_create_region(struct intel_memory_region *mem,
unsigned int flags)
{
struct drm_i915_gem_object *obj;
+ int err;
/*
* NB: Our use of resource_size_t for the size stems from using struct
@@ -169,9 +174,18 @@ i915_gem_object_create_region(struct intel_memory_region *mem,
if (overflows_type(size, obj->base.size))
return ERR_PTR(-E2BIG);
- obj = mem->ops->create_object(mem, size, flags);
- if (!IS_ERR(obj))
- trace_i915_gem_object_create(obj);
+ obj = i915_gem_object_alloc();
+ if (!obj)
+ return ERR_PTR(-ENOMEM);
+ err = mem->ops->init_object(mem, obj, size, flags);
+ if (err)
+ goto err_object_free;
+
+ trace_i915_gem_object_create(obj);
return obj;
+
+err_object_free:
+ i915_gem_object_free(obj);
+ return ERR_PTR(err);
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
index 75e8b71c18b9..cf83c208688c 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
@@ -464,26 +464,21 @@ static int __create_shmem(struct drm_i915_private *i915,
return 0;
}
-static struct drm_i915_gem_object *
-create_shmem(struct intel_memory_region *mem,
- resource_size_t size,
- unsigned int flags)
+static int shmem_object_init(struct intel_memory_region *mem,
+ struct drm_i915_gem_object *obj,
+ resource_size_t size,
+ unsigned int flags)
{
static struct lock_class_key lock_class;
struct drm_i915_private *i915 = mem->i915;
- struct drm_i915_gem_object *obj;
struct address_space *mapping;
unsigned int cache_level;
gfp_t mask;
int ret;
- obj = i915_gem_object_alloc();
- if (!obj)
- return ERR_PTR(-ENOMEM);
-
ret = __create_shmem(i915, &obj->base, size);
if (ret)
- goto fail;
+ return ret;
mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
if (IS_I965GM(i915) || IS_I965G(i915)) {
@@ -522,11 +517,7 @@ create_shmem(struct intel_memory_region *mem,
i915_gem_object_init_memory_region(obj, mem, 0);
- return obj;
-
-fail:
- i915_gem_object_free(obj);
- return ERR_PTR(ret);
+ return 0;
}
struct drm_i915_gem_object *
@@ -611,7 +602,7 @@ static void release_shmem(struct intel_memory_region *mem)
static const struct intel_memory_region_ops shmem_region_ops = {
.init = init_shmem,
.release = release_shmem,
- .create_object = create_shmem,
+ .init_object = shmem_object_init,
};
struct intel_memory_region *i915_gem_shmem_setup(struct drm_i915_private *i915)
@@ -621,3 +612,8 @@ struct intel_memory_region *i915_gem_shmem_setup(struct drm_i915_private *i915)
PAGE_SIZE, 0,
&shmem_region_ops);
}
+
+bool i915_gem_object_is_shmem(const struct drm_i915_gem_object *obj)
+{
+ return obj->ops == &i915_gem_shmem_ops;
+}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
index dc8f052a0ffe..c2dba1cd9532 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
@@ -15,6 +15,7 @@
#include "gt/intel_gt_requests.h"
+#include "dma_resv_utils.h"
#include "i915_trace.h"
static bool swap_available(void)
@@ -209,6 +210,8 @@ i915_gem_shrink(struct drm_i915_private *i915,
mutex_unlock(&obj->mm.lock);
}
+ dma_resv_prune(obj->base.resv);
+
scanned += obj->base.size >> PAGE_SHIFT;
i915_gem_object_put(obj);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
index 29bffc6afcc1..551935348ad8 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
@@ -608,11 +608,10 @@ i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
struct drm_mm_node *stolen = fetch_and_zero(&obj->stolen);
GEM_BUG_ON(!stolen);
-
- i915_gem_object_release_memory_region(obj);
-
i915_gem_stolen_remove_node(i915, stolen);
kfree(stolen);
+
+ i915_gem_object_release_memory_region(obj);
}
static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = {
@@ -622,18 +621,13 @@ static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = {
.release = i915_gem_object_release_stolen,
};
-static struct drm_i915_gem_object *
-__i915_gem_object_create_stolen(struct intel_memory_region *mem,
- struct drm_mm_node *stolen)
+static int __i915_gem_object_create_stolen(struct intel_memory_region *mem,
+ struct drm_i915_gem_object *obj,
+ struct drm_mm_node *stolen)
{
static struct lock_class_key lock_class;
- struct drm_i915_gem_object *obj;
unsigned int cache_level;
- int err = -ENOMEM;
-
- obj = i915_gem_object_alloc();
- if (!obj)
- goto err;
+ int err;
drm_gem_private_object_init(&mem->i915->drm, &obj->base, stolen->size);
i915_gem_object_init(obj, &i915_gem_object_stolen_ops, &lock_class);
@@ -645,55 +639,47 @@ __i915_gem_object_create_stolen(struct intel_memory_region *mem,
err = i915_gem_object_pin_pages(obj);
if (err)
- goto cleanup;
+ return err;
i915_gem_object_init_memory_region(obj, mem, 0);
- return obj;
-
-cleanup:
- i915_gem_object_free(obj);
-err:
- return ERR_PTR(err);
+ return 0;
}
-static struct drm_i915_gem_object *
-_i915_gem_object_create_stolen(struct intel_memory_region *mem,
- resource_size_t size,
- unsigned int flags)
+static int _i915_gem_object_stolen_init(struct intel_memory_region *mem,
+ struct drm_i915_gem_object *obj,
+ resource_size_t size,
+ unsigned int flags)
{
struct drm_i915_private *i915 = mem->i915;
- struct drm_i915_gem_object *obj;
struct drm_mm_node *stolen;
int ret;
if (!drm_mm_initialized(&i915->mm.stolen))
- return ERR_PTR(-ENODEV);
+ return -ENODEV;
if (size == 0)
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
if (!stolen)
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
ret = i915_gem_stolen_insert_node(i915, stolen, size, 4096);
- if (ret) {
- obj = ERR_PTR(ret);
+ if (ret)
goto err_free;
- }
- obj = __i915_gem_object_create_stolen(mem, stolen);
- if (IS_ERR(obj))
+ ret = __i915_gem_object_create_stolen(mem, obj, stolen);
+ if (ret)
goto err_remove;
- return obj;
+ return 0;
err_remove:
i915_gem_stolen_remove_node(i915, stolen);
err_free:
kfree(stolen);
- return obj;
+ return ret;
}
struct drm_i915_gem_object *
@@ -723,7 +709,7 @@ static void release_stolen(struct intel_memory_region *mem)
static const struct intel_memory_region_ops i915_region_stolen_ops = {
.init = init_stolen,
.release = release_stolen,
- .create_object = _i915_gem_object_create_stolen,
+ .init_object = _i915_gem_object_stolen_init,
};
struct intel_memory_region *i915_gem_stolen_setup(struct drm_i915_private *i915)
@@ -772,16 +758,31 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *i915,
goto err_free;
}
- obj = __i915_gem_object_create_stolen(mem, stolen);
- if (IS_ERR(obj))
+ obj = i915_gem_object_alloc();
+ if (!obj) {
+ obj = ERR_PTR(-ENOMEM);
goto err_stolen;
+ }
+
+ ret = __i915_gem_object_create_stolen(mem, obj, stolen);
+ if (ret) {
+ obj = ERR_PTR(ret);
+ goto err_object_free;
+ }
i915_gem_object_set_cache_coherency(obj, I915_CACHE_NONE);
return obj;
+err_object_free:
+ i915_gem_object_free(obj);
err_stolen:
i915_gem_stolen_remove_node(i915, stolen);
err_free:
kfree(stolen);
return obj;
}
+
+bool i915_gem_object_is_stolen(const struct drm_i915_gem_object *obj)
+{
+ return obj->ops == &i915_gem_object_stolen_ops;
+}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.h b/drivers/gpu/drm/i915/gem/i915_gem_stolen.h
index 61e028063f9f..b03489706796 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.h
@@ -30,6 +30,8 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv
resource_size_t stolen_offset,
resource_size_t size);
+bool i915_gem_object_is_stolen(const struct drm_i915_gem_object *obj);
+
#define I915_GEM_STOLEN_BIAS SZ_128K
#endif /* __I915_GEM_STOLEN_H__ */
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
index ffcaee74a249..d589d3d81085 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
@@ -270,14 +270,14 @@ i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
obj->mm.madv == I915_MADV_WILLNEED &&
i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
if (tiling == I915_TILING_NONE) {
- GEM_BUG_ON(!obj->mm.quirked);
- __i915_gem_object_unpin_pages(obj);
- obj->mm.quirked = false;
+ GEM_BUG_ON(!i915_gem_object_has_tiling_quirk(obj));
+ i915_gem_object_clear_tiling_quirk(obj);
+ i915_gem_object_make_shrinkable(obj);
}
if (!i915_gem_object_is_tiled(obj)) {
- GEM_BUG_ON(obj->mm.quirked);
- __i915_gem_object_pin_pages(obj);
- obj->mm.quirked = true;
+ GEM_BUG_ON(i915_gem_object_has_tiling_quirk(obj));
+ i915_gem_object_make_unshrinkable(obj);
+ i915_gem_object_set_tiling_quirk(obj);
}
}
mutex_unlock(&obj->mm.lock);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_wait.c b/drivers/gpu/drm/i915/gem/i915_gem_wait.c
index 8af55cd3e690..4b9856d5ba14 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_wait.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_wait.c
@@ -5,10 +5,12 @@
*/
#include <linux/dma-fence-array.h>
+#include <linux/dma-fence-chain.h>
#include <linux/jiffies.h>
#include "gt/intel_engine.h"
+#include "dma_resv_utils.h"
#include "i915_gem_ioctls.h"
#include "i915_gem_object.h"
@@ -43,8 +45,7 @@ i915_gem_object_wait_reservation(struct dma_resv *resv,
unsigned int count, i;
int ret;
- ret = dma_resv_get_fences_rcu(resv,
- &excl, &count, &shared);
+ ret = dma_resv_get_fences_rcu(resv, &excl, &count, &shared);
if (ret)
return ret;
@@ -84,17 +85,14 @@ i915_gem_object_wait_reservation(struct dma_resv *resv,
* Opportunistically prune the fences iff we know they have *all* been
* signaled.
*/
- if (prune_fences && dma_resv_trylock(resv)) {
- if (dma_resv_test_signaled_rcu(resv, true))
- dma_resv_add_excl_fence(resv, NULL);
- dma_resv_unlock(resv);
- }
+ if (prune_fences)
+ dma_resv_prune(resv);
return timeout;
}
-static void __fence_set_priority(struct dma_fence *fence,
- const struct i915_sched_attr *attr)
+static void fence_set_priority(struct dma_fence *fence,
+ const struct i915_sched_attr *attr)
{
struct i915_request *rq;
struct intel_engine_cs *engine;
@@ -105,27 +103,47 @@ static void __fence_set_priority(struct dma_fence *fence,
rq = to_request(fence);
engine = rq->engine;
- local_bh_disable();
rcu_read_lock(); /* RCU serialisation for set-wedged protection */
if (engine->schedule)
engine->schedule(rq, attr);
rcu_read_unlock();
- local_bh_enable(); /* kick the tasklets if queues were reprioritised */
}
-static void fence_set_priority(struct dma_fence *fence,
- const struct i915_sched_attr *attr)
+static inline bool __dma_fence_is_chain(const struct dma_fence *fence)
{
+ return fence->ops == &dma_fence_chain_ops;
+}
+
+void i915_gem_fence_wait_priority(struct dma_fence *fence,
+ const struct i915_sched_attr *attr)
+{
+ if (dma_fence_is_signaled(fence))
+ return;
+
+ local_bh_disable();
+
/* Recurse once into a fence-array */
if (dma_fence_is_array(fence)) {
struct dma_fence_array *array = to_dma_fence_array(fence);
int i;
for (i = 0; i < array->num_fences; i++)
- __fence_set_priority(array->fences[i], attr);
+ fence_set_priority(array->fences[i], attr);
+ } else if (__dma_fence_is_chain(fence)) {
+ struct dma_fence *iter;
+
+ /* The chain is ordered; if we boost the last, we boost all */
+ dma_fence_chain_for_each(iter, fence) {
+ fence_set_priority(to_dma_fence_chain(iter)->fence,
+ attr);
+ break;
+ }
+ dma_fence_put(iter);
} else {
- __fence_set_priority(fence, attr);
+ fence_set_priority(fence, attr);
}
+
+ local_bh_enable(); /* kick the tasklets if queues were reprioritised */
}
int
@@ -141,12 +159,12 @@ i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
int ret;
ret = dma_resv_get_fences_rcu(obj->base.resv,
- &excl, &count, &shared);
+ &excl, &count, &shared);
if (ret)
return ret;
for (i = 0; i < count; i++) {
- fence_set_priority(shared[i], attr);
+ i915_gem_fence_wait_priority(shared[i], attr);
dma_fence_put(shared[i]);
}
@@ -156,7 +174,7 @@ i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
}
if (excl) {
- fence_set_priority(excl, attr);
+ i915_gem_fence_wait_priority(excl, attr);
dma_fence_put(excl);
}
return 0;
diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c b/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c
index a768ec61e966..2fb501a78a85 100644
--- a/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c
@@ -27,7 +27,7 @@ static void huge_free_pages(struct drm_i915_gem_object *obj,
static int huge_get_pages(struct drm_i915_gem_object *obj)
{
-#define GFP (GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY)
+#define GFP (GFP_KERNEL | __GFP_NOWARN | __GFP_RETRY_MAYFAIL)
const unsigned long nreal = obj->scratch / PAGE_SIZE;
const unsigned long npages = obj->base.size / PAGE_SIZE;
struct scatterlist *sg, *src, *end;
diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
index 1f35e71429b4..aacf4856ccb4 100644
--- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
+++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
@@ -368,6 +368,27 @@ static int igt_check_page_sizes(struct i915_vma *vma)
err = -EINVAL;
}
+ /*
+ * The dma-api is like a box of chocolates when it comes to the
+ * alignment of dma addresses, however for LMEM we have total control
+ * and so can guarantee alignment, likewise when we allocate our blocks
+ * they should appear in descending order, and if we know that we align
+ * to the largest page size for the GTT address, we should be able to
+ * assert that if we see 2M physical pages then we should also get 2M
+ * GTT pages. If we don't then something might be wrong in our
+ * construction of the backing pages.
+ *
+ * Maintaining alignment is required to utilise huge pages in the ppGGT.
+ */
+ if (i915_gem_object_is_lmem(obj) &&
+ IS_ALIGNED(vma->node.start, SZ_2M) &&
+ vma->page_sizes.sg & SZ_2M &&
+ vma->page_sizes.gtt < SZ_2M) {
+ pr_err("gtt pages mismatch for LMEM, expected 2M GTT pages, sg(%u), gtt(%u)\n",
+ vma->page_sizes.sg, vma->page_sizes.gtt);
+ err = -EINVAL;
+ }
+
if (obj->mm.page_sizes.gtt) {
pr_err("obj->page_sizes.gtt(%u) should never be set\n",
obj->mm.page_sizes.gtt);
@@ -1333,6 +1354,7 @@ static int igt_ppgtt_sanity_check(void *arg)
unsigned int flags;
} backends[] = {
{ igt_create_system, 0, },
+ { igt_create_local, 0, },
{ igt_create_local, I915_BO_ALLOC_CONTIGUOUS, },
};
struct {
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
index 4e36d4897ea6..6a674a7994df 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
@@ -20,13 +20,11 @@ static int __igt_client_fill(struct intel_engine_cs *engine)
{
struct intel_context *ce = engine->kernel_context;
struct drm_i915_gem_object *obj;
- struct rnd_state prng;
+ I915_RND_STATE(prng);
IGT_TIMEOUT(end);
u32 *vaddr;
int err = 0;
- prandom_seed_state(&prng, i915_selftest.random_seed);
-
intel_engine_pm_get(engine);
do {
const u32 max_block_size = S16_MAX * PAGE_SIZE;
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
index 7049a6bbc03d..1117d2a44518 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
@@ -7,6 +7,7 @@
#include <linux/prime_numbers.h>
#include "gt/intel_engine_pm.h"
+#include "gt/intel_gpu_commands.h"
#include "gt/intel_gt.h"
#include "gt/intel_gt_pm.h"
#include "gt/intel_ring.h"
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
index d27d87a678c8..d429c7643ff2 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
@@ -7,6 +7,7 @@
#include <linux/prime_numbers.h>
#include "gt/intel_engine_pm.h"
+#include "gt/intel_gpu_commands.h"
#include "gt/intel_gt.h"
#include "gt/intel_gt_pm.h"
#include "gem/i915_gem_region.h"
diff --git a/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c b/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c
index e21b5023ca7d..d6783061bc72 100644
--- a/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c
+++ b/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c
@@ -9,6 +9,7 @@
#include "gem/i915_gem_context.h"
#include "gem/i915_gem_pm.h"
#include "gt/intel_context.h"
+#include "gt/intel_gpu_commands.h"
#include "gt/intel_gt.h"
#include "i915_vma.h"
#include "i915_drv.h"
diff --git a/drivers/gpu/drm/i915/gt/debugfs_gt_pm.c b/drivers/gpu/drm/i915/gt/debugfs_gt_pm.c
index 174a24553322..d4f4452ce5ed 100644
--- a/drivers/gpu/drm/i915/gt/debugfs_gt_pm.c
+++ b/drivers/gpu/drm/i915/gt/debugfs_gt_pm.c
@@ -11,6 +11,7 @@
#include "i915_drv.h"
#include "intel_gt.h"
#include "intel_gt_clock_utils.h"
+#include "intel_gt_pm.h"
#include "intel_llc.h"
#include "intel_rc6.h"
#include "intel_rps.h"
@@ -403,34 +404,34 @@ static int frequency_show(struct seq_file *m, void *unused)
seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit);
seq_printf(m, "RPNSWREQ: %dMHz\n", reqf);
seq_printf(m, "CAGF: %dMHz\n", cagf);
- seq_printf(m, "RP CUR UP EI: %d (%dns)\n",
+ seq_printf(m, "RP CUR UP EI: %d (%lldns)\n",
rpcurupei,
intel_gt_pm_interval_to_ns(gt, rpcurupei));
- seq_printf(m, "RP CUR UP: %d (%dns)\n",
+ seq_printf(m, "RP CUR UP: %d (%lldns)\n",
rpcurup, intel_gt_pm_interval_to_ns(gt, rpcurup));
- seq_printf(m, "RP PREV UP: %d (%dns)\n",
+ seq_printf(m, "RP PREV UP: %d (%lldns)\n",
rpprevup, intel_gt_pm_interval_to_ns(gt, rpprevup));
seq_printf(m, "Up threshold: %d%%\n",
rps->power.up_threshold);
- seq_printf(m, "RP UP EI: %d (%dns)\n",
+ seq_printf(m, "RP UP EI: %d (%lldns)\n",
rpupei, intel_gt_pm_interval_to_ns(gt, rpupei));
- seq_printf(m, "RP UP THRESHOLD: %d (%dns)\n",
+ seq_printf(m, "RP UP THRESHOLD: %d (%lldns)\n",
rpupt, intel_gt_pm_interval_to_ns(gt, rpupt));
- seq_printf(m, "RP CUR DOWN EI: %d (%dns)\n",
+ seq_printf(m, "RP CUR DOWN EI: %d (%lldns)\n",
rpcurdownei,
intel_gt_pm_interval_to_ns(gt, rpcurdownei));
- seq_printf(m, "RP CUR DOWN: %d (%dns)\n",
+ seq_printf(m, "RP CUR DOWN: %d (%lldns)\n",
rpcurdown,
intel_gt_pm_interval_to_ns(gt, rpcurdown));
- seq_printf(m, "RP PREV DOWN: %d (%dns)\n",
+ seq_printf(m, "RP PREV DOWN: %d (%lldns)\n",
rpprevdown,
intel_gt_pm_interval_to_ns(gt, rpprevdown));
seq_printf(m, "Down threshold: %d%%\n",
rps->power.down_threshold);
- seq_printf(m, "RP DOWN EI: %d (%dns)\n",
+ seq_printf(m, "RP DOWN EI: %d (%lldns)\n",
rpdownei, intel_gt_pm_interval_to_ns(gt, rpdownei));
- seq_printf(m, "RP DOWN THRESHOLD: %d (%dns)\n",
+ seq_printf(m, "RP DOWN THRESHOLD: %d (%lldns)\n",
rpdownt, intel_gt_pm_interval_to_ns(gt, rpdownt));
max_freq = (IS_GEN9_LP(i915) ? rp_state_cap >> 0 :
@@ -558,7 +559,9 @@ static int rps_boost_show(struct seq_file *m, void *data)
seq_printf(m, "RPS enabled? %s\n", yesno(intel_rps_is_enabled(rps)));
seq_printf(m, "RPS active? %s\n", yesno(intel_rps_is_active(rps)));
- seq_printf(m, "GPU busy? %s\n", yesno(gt->awake));
+ seq_printf(m, "GPU busy? %s, %llums\n",
+ yesno(gt->awake),
+ ktime_to_ms(intel_gt_get_awake_time(gt)));
seq_printf(m, "Boosts outstanding? %d\n",
atomic_read(&rps->num_waiters));
seq_printf(m, "Interactive? %d\n", READ_ONCE(rps->power.interactive));
@@ -575,7 +578,7 @@ static int rps_boost_show(struct seq_file *m, void *data)
intel_gpu_freq(rps, rps->efficient_freq),
intel_gpu_freq(rps, rps->boost_freq));
- seq_printf(m, "Wait boosts: %d\n", atomic_read(&rps->boosts));
+ seq_printf(m, "Wait boosts: %d\n", READ_ONCE(rps->boosts));
if (INTEL_GEN(i915) >= 6 && intel_rps_is_active(rps)) {
struct intel_uncore *uncore = gt->uncore;
diff --git a/drivers/gpu/drm/i915/gt/gen6_ppgtt.c b/drivers/gpu/drm/i915/gt/gen6_ppgtt.c
index 680bd9442eb0..e08dff376339 100644
--- a/drivers/gpu/drm/i915/gt/gen6_ppgtt.c
+++ b/drivers/gpu/drm/i915/gt/gen6_ppgtt.c
@@ -12,9 +12,9 @@
#include "intel_gt.h"
/* Write pde (index) from the page directory @pd to the page table @pt */
-static inline void gen6_write_pde(const struct gen6_ppgtt *ppgtt,
- const unsigned int pde,
- const struct i915_page_table *pt)
+static void gen6_write_pde(const struct gen6_ppgtt *ppgtt,
+ const unsigned int pde,
+ const struct i915_page_table *pt)
{
dma_addr_t addr = pt ? px_dma(pt) : px_dma(ppgtt->base.vm.scratch[1]);
@@ -27,8 +27,6 @@ void gen7_ppgtt_enable(struct intel_gt *gt)
{
struct drm_i915_private *i915 = gt->i915;
struct intel_uncore *uncore = gt->uncore;
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
u32 ecochk;
intel_uncore_rmw(uncore, GAC_ECO_BITS, 0, ECOBITS_PPGTT_CACHE64B);
@@ -41,13 +39,6 @@ void gen7_ppgtt_enable(struct intel_gt *gt)
ecochk &= ~ECOCHK_PPGTT_GFDT_IVB;
}
intel_uncore_write(uncore, GAM_ECOCHK, ecochk);
-
- for_each_engine(engine, gt, id) {
- /* GFX_MODE is per-ring on gen7+ */
- ENGINE_WRITE(engine,
- RING_MODE_GEN7,
- _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
- }
}
void gen6_ppgtt_enable(struct intel_gt *gt)
diff --git a/drivers/gpu/drm/i915/gt/gen7_renderclear.c b/drivers/gpu/drm/i915/gt/gen7_renderclear.c
index d93d85cd3027..8551e6de50e8 100644
--- a/drivers/gpu/drm/i915/gt/gen7_renderclear.c
+++ b/drivers/gpu/drm/i915/gt/gen7_renderclear.c
@@ -7,8 +7,6 @@
#include "i915_drv.h"
#include "intel_gpu_commands.h"
-#define MAX_URB_ENTRIES 64
-#define STATE_SIZE (4 * 1024)
#define GT3_INLINE_DATA_DELAYS 0x1E00
#define batch_advance(Y, CS) GEM_BUG_ON((Y)->end != (CS))
@@ -34,38 +32,59 @@ struct batch_chunk {
};
struct batch_vals {
- u32 max_primitives;
- u32 max_urb_entries;
- u32 cmd_size;
- u32 state_size;
+ u32 max_threads;
u32 state_start;
- u32 batch_size;
+ u32 surface_start;
u32 surface_height;
u32 surface_width;
- u32 scratch_size;
- u32 max_size;
+ u32 size;
};
+static int num_primitives(const struct batch_vals *bv)
+{
+ /*
+ * We need to saturate the GPU with work in order to dispatch
+ * a shader on every HW thread, and clear the thread-local registers.
+ * In short, we have to dispatch work faster than the shaders can
+ * run in order to fill the EU and occupy each HW thread.
+ */
+ return bv->max_threads;
+}
+
static void
batch_get_defaults(struct drm_i915_private *i915, struct batch_vals *bv)
{
if (IS_HASWELL(i915)) {
- bv->max_primitives = 280;
- bv->max_urb_entries = MAX_URB_ENTRIES;
+ switch (INTEL_INFO(i915)->gt) {
+ default:
+ case 1:
+ bv->max_threads = 70;
+ break;
+ case 2:
+ bv->max_threads = 140;
+ break;
+ case 3:
+ bv->max_threads = 280;
+ break;
+ }
bv->surface_height = 16 * 16;
bv->surface_width = 32 * 2 * 16;
} else {
- bv->max_primitives = 128;
- bv->max_urb_entries = MAX_URB_ENTRIES / 2;
+ switch (INTEL_INFO(i915)->gt) {
+ default:
+ case 1: /* including vlv */
+ bv->max_threads = 36;
+ break;
+ case 2:
+ bv->max_threads = 128;
+ break;
+ }
bv->surface_height = 16 * 8;
bv->surface_width = 32 * 16;
}
- bv->cmd_size = bv->max_primitives * 4096;
- bv->state_size = STATE_SIZE;
- bv->state_start = bv->cmd_size;
- bv->batch_size = bv->cmd_size + bv->state_size;
- bv->scratch_size = bv->surface_height * bv->surface_width;
- bv->max_size = bv->batch_size + bv->scratch_size;
+ bv->state_start = round_up(SZ_1K + num_primitives(bv) * 64, SZ_4K);
+ bv->surface_start = bv->state_start + SZ_4K;
+ bv->size = bv->surface_start + bv->surface_height * bv->surface_width;
}
static void batch_init(struct batch_chunk *bc,
@@ -155,7 +174,8 @@ static u32
gen7_fill_binding_table(struct batch_chunk *state,
const struct batch_vals *bv)
{
- u32 surface_start = gen7_fill_surface_state(state, bv->batch_size, bv);
+ u32 surface_start =
+ gen7_fill_surface_state(state, bv->surface_start, bv);
u32 *cs = batch_alloc_items(state, 32, 8);
u32 offset = batch_offset(state, cs);
@@ -214,9 +234,9 @@ static void
gen7_emit_state_base_address(struct batch_chunk *batch,
u32 surface_state_base)
{
- u32 *cs = batch_alloc_items(batch, 0, 12);
+ u32 *cs = batch_alloc_items(batch, 0, 10);
- *cs++ = STATE_BASE_ADDRESS | (12 - 2);
+ *cs++ = STATE_BASE_ADDRESS | (10 - 2);
/* general */
*cs++ = batch_addr(batch) | BASE_ADDRESS_MODIFY;
/* surface */
@@ -233,8 +253,6 @@ gen7_emit_state_base_address(struct batch_chunk *batch,
*cs++ = BASE_ADDRESS_MODIFY;
*cs++ = 0;
*cs++ = BASE_ADDRESS_MODIFY;
- *cs++ = 0;
- *cs++ = 0;
batch_advance(batch, cs);
}
@@ -244,8 +262,7 @@ gen7_emit_vfe_state(struct batch_chunk *batch,
u32 urb_size, u32 curbe_size,
u32 mode)
{
- u32 urb_entries = bv->max_urb_entries;
- u32 threads = bv->max_primitives - 1;
+ u32 threads = bv->max_threads - 1;
u32 *cs = batch_alloc_items(batch, 32, 8);
*cs++ = MEDIA_VFE_STATE | (8 - 2);
@@ -254,7 +271,7 @@ gen7_emit_vfe_state(struct batch_chunk *batch,
*cs++ = 0;
/* number of threads & urb entries for GPGPU vs Media Mode */
- *cs++ = threads << 16 | urb_entries << 8 | mode << 2;
+ *cs++ = threads << 16 | 1 << 8 | mode << 2;
*cs++ = 0;
@@ -293,17 +310,12 @@ gen7_emit_media_object(struct batch_chunk *batch,
{
unsigned int x_offset = (media_object_index % 16) * 64;
unsigned int y_offset = (media_object_index / 16) * 16;
- unsigned int inline_data_size;
- unsigned int media_batch_size;
- unsigned int i;
+ unsigned int pkt = 6 + 3;
u32 *cs;
- inline_data_size = 112 * 8;
- media_batch_size = inline_data_size + 6;
+ cs = batch_alloc_items(batch, 8, pkt);
- cs = batch_alloc_items(batch, 8, media_batch_size);
-
- *cs++ = MEDIA_OBJECT | (media_batch_size - 2);
+ *cs++ = MEDIA_OBJECT | (pkt - 2);
/* interface descriptor offset */
*cs++ = 0;
@@ -317,25 +329,46 @@ gen7_emit_media_object(struct batch_chunk *batch,
*cs++ = 0;
/* inline */
- *cs++ = (y_offset << 16) | (x_offset);
+ *cs++ = y_offset << 16 | x_offset;
*cs++ = 0;
*cs++ = GT3_INLINE_DATA_DELAYS;
- for (i = 3; i < inline_data_size; i++)
- *cs++ = 0;
batch_advance(batch, cs);
}
static void gen7_emit_pipeline_flush(struct batch_chunk *batch)
{
- u32 *cs = batch_alloc_items(batch, 0, 5);
+ u32 *cs = batch_alloc_items(batch, 0, 4);
+
+ *cs++ = GFX_OP_PIPE_CONTROL(4);
+ *cs++ = PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
+ PIPE_CONTROL_DEPTH_CACHE_FLUSH |
+ PIPE_CONTROL_DC_FLUSH_ENABLE |
+ PIPE_CONTROL_CS_STALL;
+ *cs++ = 0;
+ *cs++ = 0;
+
+ batch_advance(batch, cs);
+}
+
+static void gen7_emit_pipeline_invalidate(struct batch_chunk *batch)
+{
+ u32 *cs = batch_alloc_items(batch, 0, 10);
+
+ /* ivb: Stall before STATE_CACHE_INVALIDATE */
+ *cs++ = GFX_OP_PIPE_CONTROL(5);
+ *cs++ = PIPE_CONTROL_STALL_AT_SCOREBOARD |
+ PIPE_CONTROL_CS_STALL;
+ *cs++ = 0;
+ *cs++ = 0;
+ *cs++ = 0;
*cs++ = GFX_OP_PIPE_CONTROL(5);
- *cs++ = PIPE_CONTROL_STATE_CACHE_INVALIDATE |
- PIPE_CONTROL_GLOBAL_GTT_IVB;
+ *cs++ = PIPE_CONTROL_STATE_CACHE_INVALIDATE;
*cs++ = 0;
*cs++ = 0;
*cs++ = 0;
+
batch_advance(batch, cs);
}
@@ -344,34 +377,47 @@ static void emit_batch(struct i915_vma * const vma,
const struct batch_vals *bv)
{
struct drm_i915_private *i915 = vma->vm->i915;
- unsigned int desc_count = 64;
- const u32 urb_size = 112;
+ const unsigned int desc_count = 1;
+ const unsigned int urb_size = 1;
struct batch_chunk cmds, state;
- u32 interface_descriptor;
+ u32 descriptors;
unsigned int i;
- batch_init(&cmds, vma, start, 0, bv->cmd_size);
- batch_init(&state, vma, start, bv->state_start, bv->state_size);
-
- interface_descriptor =
- gen7_fill_interface_descriptor(&state, bv,
- IS_HASWELL(i915) ?
- &cb_kernel_hsw :
- &cb_kernel_ivb,
- desc_count);
+ batch_init(&cmds, vma, start, 0, bv->state_start);
+ batch_init(&state, vma, start, bv->state_start, SZ_4K);
+
+ descriptors = gen7_fill_interface_descriptor(&state, bv,
+ IS_HASWELL(i915) ?
+ &cb_kernel_hsw :
+ &cb_kernel_ivb,
+ desc_count);
+
+ /* Reset inherited context registers */
+ gen7_emit_pipeline_invalidate(&cmds);
+ batch_add(&cmds, MI_LOAD_REGISTER_IMM(2));
+ batch_add(&cmds, i915_mmio_reg_offset(CACHE_MODE_0_GEN7));
+ batch_add(&cmds, 0xffff0000);
+ batch_add(&cmds, i915_mmio_reg_offset(CACHE_MODE_1));
+ batch_add(&cmds, 0xffff0000 | PIXEL_SUBSPAN_COLLECT_OPT_DISABLE);
+ gen7_emit_pipeline_invalidate(&cmds);
gen7_emit_pipeline_flush(&cmds);
+
+ /* Switch to the media pipeline and our base address */
+ gen7_emit_pipeline_invalidate(&cmds);
batch_add(&cmds, PIPELINE_SELECT | PIPELINE_SELECT_MEDIA);
batch_add(&cmds, MI_NOOP);
- gen7_emit_state_base_address(&cmds, interface_descriptor);
+ gen7_emit_pipeline_invalidate(&cmds);
+
gen7_emit_pipeline_flush(&cmds);
+ gen7_emit_state_base_address(&cmds, descriptors);
+ gen7_emit_pipeline_invalidate(&cmds);
+ /* Set the clear-residual kernel state */
gen7_emit_vfe_state(&cmds, bv, urb_size - 1, 0, 0);
+ gen7_emit_interface_descriptor_load(&cmds, descriptors, desc_count);
- gen7_emit_interface_descriptor_load(&cmds,
- interface_descriptor,
- desc_count);
-
- for (i = 0; i < bv->max_primitives; i++)
+ /* Execute the kernel on all HW threads */
+ for (i = 0; i < num_primitives(bv); i++)
gen7_emit_media_object(&cmds, i);
batch_add(&cmds, MI_BATCH_BUFFER_END);
@@ -385,15 +431,15 @@ int gen7_setup_clear_gpr_bb(struct intel_engine_cs * const engine,
batch_get_defaults(engine->i915, &bv);
if (!vma)
- return bv.max_size;
+ return bv.size;
- GEM_BUG_ON(vma->obj->base.size < bv.max_size);
+ GEM_BUG_ON(vma->obj->base.size < bv.size);
batch = i915_gem_object_pin_map(vma->obj, I915_MAP_WC);
if (IS_ERR(batch))
return PTR_ERR(batch);
- emit_batch(vma, memset(batch, 0, bv.max_size), &bv);
+ emit_batch(vma, memset(batch, 0, bv.size), &bv);
i915_gem_object_flush_map(vma->obj);
__i915_gem_object_release_map(vma->obj);
diff --git a/drivers/gpu/drm/i915/gt/gen8_engine_cs.c b/drivers/gpu/drm/i915/gt/gen8_engine_cs.c
new file mode 100644
index 000000000000..07ba524da90b
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/gen8_engine_cs.c
@@ -0,0 +1,635 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2014 Intel Corporation
+ */
+
+#include "gen8_engine_cs.h"
+#include "i915_drv.h"
+#include "intel_lrc.h"
+#include "intel_gpu_commands.h"
+#include "intel_ring.h"
+
+int gen8_emit_flush_rcs(struct i915_request *rq, u32 mode)
+{
+ bool vf_flush_wa = false, dc_flush_wa = false;
+ u32 *cs, flags = 0;
+ int len;
+
+ flags |= PIPE_CONTROL_CS_STALL;
+
+ if (mode & EMIT_FLUSH) {
+ flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
+ flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
+ flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
+ flags |= PIPE_CONTROL_FLUSH_ENABLE;
+ }
+
+ if (mode & EMIT_INVALIDATE) {
+ flags |= PIPE_CONTROL_TLB_INVALIDATE;
+ flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_QW_WRITE;
+ flags |= PIPE_CONTROL_STORE_DATA_INDEX;
+
+ /*
+ * On GEN9: before VF_CACHE_INVALIDATE we need to emit a NULL
+ * pipe control.
+ */
+ if (IS_GEN(rq->engine->i915, 9))
+ vf_flush_wa = true;
+
+ /* WaForGAMHang:kbl */
+ if (IS_KBL_GT_REVID(rq->engine->i915, 0, KBL_REVID_B0))
+ dc_flush_wa = true;
+ }
+
+ len = 6;
+
+ if (vf_flush_wa)
+ len += 6;
+
+ if (dc_flush_wa)
+ len += 12;
+
+ cs = intel_ring_begin(rq, len);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ if (vf_flush_wa)
+ cs = gen8_emit_pipe_control(cs, 0, 0);
+
+ if (dc_flush_wa)
+ cs = gen8_emit_pipe_control(cs, PIPE_CONTROL_DC_FLUSH_ENABLE,
+ 0);
+
+ cs = gen8_emit_pipe_control(cs, flags, LRC_PPHWSP_SCRATCH_ADDR);
+
+ if (dc_flush_wa)
+ cs = gen8_emit_pipe_control(cs, PIPE_CONTROL_CS_STALL, 0);
+
+ intel_ring_advance(rq, cs);
+
+ return 0;
+}
+
+int gen8_emit_flush_xcs(struct i915_request *rq, u32 mode)
+{
+ u32 cmd, *cs;
+
+ cs = intel_ring_begin(rq, 4);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ cmd = MI_FLUSH_DW + 1;
+
+ /*
+ * We always require a command barrier so that subsequent
+ * commands, such as breadcrumb interrupts, are strictly ordered
+ * wrt the contents of the write cache being flushed to memory
+ * (and thus being coherent from the CPU).
+ */
+ cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
+
+ if (mode & EMIT_INVALIDATE) {
+ cmd |= MI_INVALIDATE_TLB;
+ if (rq->engine->class == VIDEO_DECODE_CLASS)
+ cmd |= MI_INVALIDATE_BSD;
+ }
+
+ *cs++ = cmd;
+ *cs++ = LRC_PPHWSP_SCRATCH_ADDR;
+ *cs++ = 0; /* upper addr */
+ *cs++ = 0; /* value */
+ intel_ring_advance(rq, cs);
+
+ return 0;
+}
+
+int gen11_emit_flush_rcs(struct i915_request *rq, u32 mode)
+{
+ if (mode & EMIT_FLUSH) {
+ u32 *cs;
+ u32 flags = 0;
+
+ flags |= PIPE_CONTROL_CS_STALL;
+
+ flags |= PIPE_CONTROL_TILE_CACHE_FLUSH;
+ flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
+ flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
+ flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
+ flags |= PIPE_CONTROL_FLUSH_ENABLE;
+ flags |= PIPE_CONTROL_QW_WRITE;
+ flags |= PIPE_CONTROL_STORE_DATA_INDEX;
+
+ cs = intel_ring_begin(rq, 6);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ cs = gen8_emit_pipe_control(cs, flags, LRC_PPHWSP_SCRATCH_ADDR);
+ intel_ring_advance(rq, cs);
+ }
+
+ if (mode & EMIT_INVALIDATE) {
+ u32 *cs;
+ u32 flags = 0;
+
+ flags |= PIPE_CONTROL_CS_STALL;
+
+ flags |= PIPE_CONTROL_COMMAND_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_TLB_INVALIDATE;
+ flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_QW_WRITE;
+ flags |= PIPE_CONTROL_STORE_DATA_INDEX;
+
+ cs = intel_ring_begin(rq, 6);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ cs = gen8_emit_pipe_control(cs, flags, LRC_PPHWSP_SCRATCH_ADDR);
+ intel_ring_advance(rq, cs);
+ }
+
+ return 0;
+}
+
+static u32 preparser_disable(bool state)
+{
+ return MI_ARB_CHECK | 1 << 8 | state;
+}
+
+static i915_reg_t aux_inv_reg(const struct intel_engine_cs *engine)
+{
+ static const i915_reg_t vd[] = {
+ GEN12_VD0_AUX_NV,
+ GEN12_VD1_AUX_NV,
+ GEN12_VD2_AUX_NV,
+ GEN12_VD3_AUX_NV,
+ };
+
+ static const i915_reg_t ve[] = {
+ GEN12_VE0_AUX_NV,
+ GEN12_VE1_AUX_NV,
+ };
+
+ if (engine->class == VIDEO_DECODE_CLASS)
+ return vd[engine->instance];
+
+ if (engine->class == VIDEO_ENHANCEMENT_CLASS)
+ return ve[engine->instance];
+
+ GEM_BUG_ON("unknown aux_inv reg\n");
+ return INVALID_MMIO_REG;
+}
+
+static u32 *gen12_emit_aux_table_inv(const i915_reg_t inv_reg, u32 *cs)
+{
+ *cs++ = MI_LOAD_REGISTER_IMM(1);
+ *cs++ = i915_mmio_reg_offset(inv_reg);
+ *cs++ = AUX_INV;
+ *cs++ = MI_NOOP;
+
+ return cs;
+}
+
+int gen12_emit_flush_rcs(struct i915_request *rq, u32 mode)
+{
+ if (mode & EMIT_FLUSH) {
+ u32 flags = 0;
+ u32 *cs;
+
+ flags |= PIPE_CONTROL_TILE_CACHE_FLUSH;
+ flags |= PIPE_CONTROL_FLUSH_L3;
+ flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
+ flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
+ /* Wa_1409600907:tgl */
+ flags |= PIPE_CONTROL_DEPTH_STALL;
+ flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
+ flags |= PIPE_CONTROL_FLUSH_ENABLE;
+
+ flags |= PIPE_CONTROL_STORE_DATA_INDEX;
+ flags |= PIPE_CONTROL_QW_WRITE;
+
+ flags |= PIPE_CONTROL_CS_STALL;
+
+ cs = intel_ring_begin(rq, 6);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ cs = gen12_emit_pipe_control(cs,
+ PIPE_CONTROL0_HDC_PIPELINE_FLUSH,
+ flags, LRC_PPHWSP_SCRATCH_ADDR);
+ intel_ring_advance(rq, cs);
+ }
+
+ if (mode & EMIT_INVALIDATE) {
+ u32 flags = 0;
+ u32 *cs;
+
+ flags |= PIPE_CONTROL_COMMAND_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_TLB_INVALIDATE;
+ flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
+
+ flags |= PIPE_CONTROL_STORE_DATA_INDEX;
+ flags |= PIPE_CONTROL_QW_WRITE;
+
+ flags |= PIPE_CONTROL_CS_STALL;
+
+ cs = intel_ring_begin(rq, 8 + 4);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ /*
+ * Prevent the pre-parser from skipping past the TLB
+ * invalidate and loading a stale page for the batch
+ * buffer / request payload.
+ */
+ *cs++ = preparser_disable(true);
+
+ cs = gen8_emit_pipe_control(cs, flags, LRC_PPHWSP_SCRATCH_ADDR);
+
+ /* hsdes: 1809175790 */
+ cs = gen12_emit_aux_table_inv(GEN12_GFX_CCS_AUX_NV, cs);
+
+ *cs++ = preparser_disable(false);
+ intel_ring_advance(rq, cs);
+ }
+
+ return 0;
+}
+
+int gen12_emit_flush_xcs(struct i915_request *rq, u32 mode)
+{
+ intel_engine_mask_t aux_inv = 0;
+ u32 cmd, *cs;
+
+ cmd = 4;
+ if (mode & EMIT_INVALIDATE)
+ cmd += 2;
+ if (mode & EMIT_INVALIDATE)
+ aux_inv = rq->engine->mask & ~BIT(BCS0);
+ if (aux_inv)
+ cmd += 2 * hweight8(aux_inv) + 2;
+
+ cs = intel_ring_begin(rq, cmd);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ if (mode & EMIT_INVALIDATE)
+ *cs++ = preparser_disable(true);
+
+ cmd = MI_FLUSH_DW + 1;
+
+ /*
+ * We always require a command barrier so that subsequent
+ * commands, such as breadcrumb interrupts, are strictly ordered
+ * wrt the contents of the write cache being flushed to memory
+ * (and thus being coherent from the CPU).
+ */
+ cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
+
+ if (mode & EMIT_INVALIDATE) {
+ cmd |= MI_INVALIDATE_TLB;
+ if (rq->engine->class == VIDEO_DECODE_CLASS)
+ cmd |= MI_INVALIDATE_BSD;
+ }
+
+ *cs++ = cmd;
+ *cs++ = LRC_PPHWSP_SCRATCH_ADDR;
+ *cs++ = 0; /* upper addr */
+ *cs++ = 0; /* value */
+
+ if (aux_inv) { /* hsdes: 1809175790 */
+ struct intel_engine_cs *engine;
+ unsigned int tmp;
+
+ *cs++ = MI_LOAD_REGISTER_IMM(hweight8(aux_inv));
+ for_each_engine_masked(engine, rq->engine->gt,
+ aux_inv, tmp) {
+ *cs++ = i915_mmio_reg_offset(aux_inv_reg(engine));
+ *cs++ = AUX_INV;
+ }
+ *cs++ = MI_NOOP;
+ }
+
+ if (mode & EMIT_INVALIDATE)
+ *cs++ = preparser_disable(false);
+
+ intel_ring_advance(rq, cs);
+
+ return 0;
+}
+
+static u32 preempt_address(struct intel_engine_cs *engine)
+{
+ return (i915_ggtt_offset(engine->status_page.vma) +
+ I915_GEM_HWS_PREEMPT_ADDR);
+}
+
+static u32 hwsp_offset(const struct i915_request *rq)
+{
+ const struct intel_timeline_cacheline *cl;
+
+ /* Before the request is executed, the timeline/cachline is fixed */
+
+ cl = rcu_dereference_protected(rq->hwsp_cacheline, 1);
+ if (cl)
+ return cl->ggtt_offset;
+
+ return rcu_dereference_protected(rq->timeline, 1)->hwsp_offset;
+}
+
+int gen8_emit_init_breadcrumb(struct i915_request *rq)
+{
+ u32 *cs;
+
+ GEM_BUG_ON(i915_request_has_initial_breadcrumb(rq));
+ if (!i915_request_timeline(rq)->has_initial_breadcrumb)
+ return 0;
+
+ cs = intel_ring_begin(rq, 6);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
+ *cs++ = hwsp_offset(rq);
+ *cs++ = 0;
+ *cs++ = rq->fence.seqno - 1;
+
+ /*
+ * Check if we have been preempted before we even get started.
+ *
+ * After this point i915_request_started() reports true, even if
+ * we get preempted and so are no longer running.
+ *
+ * i915_request_started() is used during preemption processing
+ * to decide if the request is currently inside the user payload
+ * or spinning on a kernel semaphore (or earlier). For no-preemption
+ * requests, we do allow preemption on the semaphore before the user
+ * payload, but do not allow preemption once the request is started.
+ *
+ * i915_request_started() is similarly used during GPU hangs to
+ * determine if the user's payload was guilty, and if so, the
+ * request is banned. Before the request is started, it is assumed
+ * to be unharmed and an innocent victim of another's hang.
+ */
+ *cs++ = MI_NOOP;
+ *cs++ = MI_ARB_CHECK;
+
+ intel_ring_advance(rq, cs);
+
+ /* Record the updated position of the request's payload */
+ rq->infix = intel_ring_offset(rq, cs);
+
+ __set_bit(I915_FENCE_FLAG_INITIAL_BREADCRUMB, &rq->fence.flags);
+
+ return 0;
+}
+
+int gen8_emit_bb_start_noarb(struct i915_request *rq,
+ u64 offset, u32 len,
+ const unsigned int flags)
+{
+ u32 *cs;
+
+ cs = intel_ring_begin(rq, 4);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ /*
+ * WaDisableCtxRestoreArbitration:bdw,chv
+ *
+ * We don't need to perform MI_ARB_ENABLE as often as we do (in
+ * particular all the gen that do not need the w/a at all!), if we
+ * took care to make sure that on every switch into this context
+ * (both ordinary and for preemption) that arbitrartion was enabled
+ * we would be fine. However, for gen8 there is another w/a that
+ * requires us to not preempt inside GPGPU execution, so we keep
+ * arbitration disabled for gen8 batches. Arbitration will be
+ * re-enabled before we close the request
+ * (engine->emit_fini_breadcrumb).
+ */
+ *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
+
+ /* FIXME(BDW+): Address space and security selectors. */
+ *cs++ = MI_BATCH_BUFFER_START_GEN8 |
+ (flags & I915_DISPATCH_SECURE ? 0 : BIT(8));
+ *cs++ = lower_32_bits(offset);
+ *cs++ = upper_32_bits(offset);
+
+ intel_ring_advance(rq, cs);
+
+ return 0;
+}
+
+int gen8_emit_bb_start(struct i915_request *rq,
+ u64 offset, u32 len,
+ const unsigned int flags)
+{
+ u32 *cs;
+
+ if (unlikely(i915_request_has_nopreempt(rq)))
+ return gen8_emit_bb_start_noarb(rq, offset, len, flags);
+
+ cs = intel_ring_begin(rq, 6);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
+
+ *cs++ = MI_BATCH_BUFFER_START_GEN8 |
+ (flags & I915_DISPATCH_SECURE ? 0 : BIT(8));
+ *cs++ = lower_32_bits(offset);
+ *cs++ = upper_32_bits(offset);
+
+ *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
+ *cs++ = MI_NOOP;
+
+ intel_ring_advance(rq, cs);
+
+ return 0;
+}
+
+static void assert_request_valid(struct i915_request *rq)
+{
+ struct intel_ring *ring __maybe_unused = rq->ring;
+
+ /* Can we unwind this request without appearing to go forwards? */
+ GEM_BUG_ON(intel_ring_direction(ring, rq->wa_tail, rq->head) <= 0);
+}
+
+/*
+ * Reserve space for 2 NOOPs at the end of each request to be
+ * used as a workaround for not being allowed to do lite
+ * restore with HEAD==TAIL (WaIdleLiteRestore).
+ */
+static u32 *gen8_emit_wa_tail(struct i915_request *rq, u32 *cs)
+{
+ /* Ensure there's always at least one preemption point per-request. */
+ *cs++ = MI_ARB_CHECK;
+ *cs++ = MI_NOOP;
+ rq->wa_tail = intel_ring_offset(rq, cs);
+
+ /* Check that entire request is less than half the ring */
+ assert_request_valid(rq);
+
+ return cs;
+}
+
+static u32 *emit_preempt_busywait(struct i915_request *rq, u32 *cs)
+{
+ *cs++ = MI_ARB_CHECK; /* trigger IDLE->ACTIVE first */
+ *cs++ = MI_SEMAPHORE_WAIT |
+ MI_SEMAPHORE_GLOBAL_GTT |
+ MI_SEMAPHORE_POLL |
+ MI_SEMAPHORE_SAD_EQ_SDD;
+ *cs++ = 0;
+ *cs++ = preempt_address(rq->engine);
+ *cs++ = 0;
+ *cs++ = MI_NOOP;
+
+ return cs;
+}
+
+static __always_inline u32*
+gen8_emit_fini_breadcrumb_tail(struct i915_request *rq, u32 *cs)
+{
+ *cs++ = MI_USER_INTERRUPT;
+
+ *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
+ if (intel_engine_has_semaphores(rq->engine))
+ cs = emit_preempt_busywait(rq, cs);
+
+ rq->tail = intel_ring_offset(rq, cs);
+ assert_ring_tail_valid(rq->ring, rq->tail);
+
+ return gen8_emit_wa_tail(rq, cs);
+}
+
+static u32 *emit_xcs_breadcrumb(struct i915_request *rq, u32 *cs)
+{
+ return gen8_emit_ggtt_write(cs, rq->fence.seqno, hwsp_offset(rq), 0);
+}
+
+u32 *gen8_emit_fini_breadcrumb_xcs(struct i915_request *rq, u32 *cs)
+{
+ return gen8_emit_fini_breadcrumb_tail(rq, emit_xcs_breadcrumb(rq, cs));
+}
+
+u32 *gen8_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs)
+{
+ cs = gen8_emit_pipe_control(cs,
+ PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
+ PIPE_CONTROL_DEPTH_CACHE_FLUSH |
+ PIPE_CONTROL_DC_FLUSH_ENABLE,
+ 0);
+
+ /* XXX flush+write+CS_STALL all in one upsets gem_concurrent_blt:kbl */
+ cs = gen8_emit_ggtt_write_rcs(cs,
+ rq->fence.seqno,
+ hwsp_offset(rq),
+ PIPE_CONTROL_FLUSH_ENABLE |
+ PIPE_CONTROL_CS_STALL);
+
+ return gen8_emit_fini_breadcrumb_tail(rq, cs);
+}
+
+u32 *gen11_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs)
+{
+ cs = gen8_emit_ggtt_write_rcs(cs,
+ rq->fence.seqno,
+ hwsp_offset(rq),
+ PIPE_CONTROL_CS_STALL |
+ PIPE_CONTROL_TILE_CACHE_FLUSH |
+ PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
+ PIPE_CONTROL_DEPTH_CACHE_FLUSH |
+ PIPE_CONTROL_DC_FLUSH_ENABLE |
+ PIPE_CONTROL_FLUSH_ENABLE);
+
+ return gen8_emit_fini_breadcrumb_tail(rq, cs);
+}
+
+/*
+ * Note that the CS instruction pre-parser will not stall on the breadcrumb
+ * flush and will continue pre-fetching the instructions after it before the
+ * memory sync is completed. On pre-gen12 HW, the pre-parser will stop at
+ * BB_START/END instructions, so, even though we might pre-fetch the pre-amble
+ * of the next request before the memory has been flushed, we're guaranteed that
+ * we won't access the batch itself too early.
+ * However, on gen12+ the parser can pre-fetch across the BB_START/END commands,
+ * so, if the current request is modifying an instruction in the next request on
+ * the same intel_context, we might pre-fetch and then execute the pre-update
+ * instruction. To avoid this, the users of self-modifying code should either
+ * disable the parser around the code emitting the memory writes, via a new flag
+ * added to MI_ARB_CHECK, or emit the writes from a different intel_context. For
+ * the in-kernel use-cases we've opted to use a separate context, see
+ * reloc_gpu() as an example.
+ * All the above applies only to the instructions themselves. Non-inline data
+ * used by the instructions is not pre-fetched.
+ */
+
+static u32 *gen12_emit_preempt_busywait(struct i915_request *rq, u32 *cs)
+{
+ *cs++ = MI_ARB_CHECK; /* trigger IDLE->ACTIVE first */
+ *cs++ = MI_SEMAPHORE_WAIT_TOKEN |
+ MI_SEMAPHORE_GLOBAL_GTT |
+ MI_SEMAPHORE_POLL |
+ MI_SEMAPHORE_SAD_EQ_SDD;
+ *cs++ = 0;
+ *cs++ = preempt_address(rq->engine);
+ *cs++ = 0;
+ *cs++ = 0;
+
+ return cs;
+}
+
+static __always_inline u32*
+gen12_emit_fini_breadcrumb_tail(struct i915_request *rq, u32 *cs)
+{
+ *cs++ = MI_USER_INTERRUPT;
+
+ *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
+ if (intel_engine_has_semaphores(rq->engine))
+ cs = gen12_emit_preempt_busywait(rq, cs);
+
+ rq->tail = intel_ring_offset(rq, cs);
+ assert_ring_tail_valid(rq->ring, rq->tail);
+
+ return gen8_emit_wa_tail(rq, cs);
+}
+
+u32 *gen12_emit_fini_breadcrumb_xcs(struct i915_request *rq, u32 *cs)
+{
+ /* XXX Stalling flush before seqno write; post-sync not */
+ cs = emit_xcs_breadcrumb(rq, __gen8_emit_flush_dw(cs, 0, 0, 0));
+ return gen12_emit_fini_breadcrumb_tail(rq, cs);
+}
+
+u32 *gen12_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs)
+{
+ cs = gen12_emit_ggtt_write_rcs(cs,
+ rq->fence.seqno,
+ hwsp_offset(rq),
+ PIPE_CONTROL0_HDC_PIPELINE_FLUSH,
+ PIPE_CONTROL_CS_STALL |
+ PIPE_CONTROL_TILE_CACHE_FLUSH |
+ PIPE_CONTROL_FLUSH_L3 |
+ PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
+ PIPE_CONTROL_DEPTH_CACHE_FLUSH |
+ /* Wa_1409600907:tgl */
+ PIPE_CONTROL_DEPTH_STALL |
+ PIPE_CONTROL_DC_FLUSH_ENABLE |
+ PIPE_CONTROL_FLUSH_ENABLE);
+
+ return gen12_emit_fini_breadcrumb_tail(rq, cs);
+}
diff --git a/drivers/gpu/drm/i915/gt/gen8_engine_cs.h b/drivers/gpu/drm/i915/gt/gen8_engine_cs.h
new file mode 100644
index 000000000000..cc6e21d3662a
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/gen8_engine_cs.h
@@ -0,0 +1,127 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2014 Intel Corporation
+ */
+
+#ifndef __GEN8_ENGINE_CS_H__
+#define __GEN8_ENGINE_CS_H__
+
+#include <linux/string.h>
+#include <linux/types.h>
+
+#include "i915_gem.h" /* GEM_BUG_ON */
+
+#include "intel_gpu_commands.h"
+
+struct i915_request;
+
+int gen8_emit_flush_rcs(struct i915_request *rq, u32 mode);
+int gen11_emit_flush_rcs(struct i915_request *rq, u32 mode);
+int gen12_emit_flush_rcs(struct i915_request *rq, u32 mode);
+
+int gen8_emit_flush_xcs(struct i915_request *rq, u32 mode);
+int gen12_emit_flush_xcs(struct i915_request *rq, u32 mode);
+
+int gen8_emit_init_breadcrumb(struct i915_request *rq);
+
+int gen8_emit_bb_start_noarb(struct i915_request *rq,
+ u64 offset, u32 len,
+ const unsigned int flags);
+int gen8_emit_bb_start(struct i915_request *rq,
+ u64 offset, u32 len,
+ const unsigned int flags);
+
+u32 *gen8_emit_fini_breadcrumb_xcs(struct i915_request *rq, u32 *cs);
+u32 *gen12_emit_fini_breadcrumb_xcs(struct i915_request *rq, u32 *cs);
+
+u32 *gen8_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs);
+u32 *gen11_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs);
+u32 *gen12_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs);
+
+static inline u32 *
+__gen8_emit_pipe_control(u32 *batch, u32 flags0, u32 flags1, u32 offset)
+{
+ memset(batch, 0, 6 * sizeof(u32));
+
+ batch[0] = GFX_OP_PIPE_CONTROL(6) | flags0;
+ batch[1] = flags1;
+ batch[2] = offset;
+
+ return batch + 6;
+}
+
+static inline u32 *gen8_emit_pipe_control(u32 *batch, u32 flags, u32 offset)
+{
+ return __gen8_emit_pipe_control(batch, 0, flags, offset);
+}
+
+static inline u32 *gen12_emit_pipe_control(u32 *batch, u32 flags0, u32 flags1, u32 offset)
+{
+ return __gen8_emit_pipe_control(batch, flags0, flags1, offset);
+}
+
+static inline u32 *
+__gen8_emit_write_rcs(u32 *cs, u32 value, u32 offset, u32 flags0, u32 flags1)
+{
+ *cs++ = GFX_OP_PIPE_CONTROL(6) | flags0;
+ *cs++ = flags1 | PIPE_CONTROL_QW_WRITE;
+ *cs++ = offset;
+ *cs++ = 0;
+ *cs++ = value;
+ *cs++ = 0; /* We're thrashing one extra dword. */
+
+ return cs;
+}
+
+static inline u32*
+gen8_emit_ggtt_write_rcs(u32 *cs, u32 value, u32 gtt_offset, u32 flags)
+{
+ /* We're using qword write, offset should be aligned to 8 bytes. */
+ GEM_BUG_ON(!IS_ALIGNED(gtt_offset, 8));
+
+ return __gen8_emit_write_rcs(cs,
+ value,
+ gtt_offset,
+ 0,
+ flags | PIPE_CONTROL_GLOBAL_GTT_IVB);
+}
+
+static inline u32*
+gen12_emit_ggtt_write_rcs(u32 *cs, u32 value, u32 gtt_offset, u32 flags0, u32 flags1)
+{
+ /* We're using qword write, offset should be aligned to 8 bytes. */
+ GEM_BUG_ON(!IS_ALIGNED(gtt_offset, 8));
+
+ return __gen8_emit_write_rcs(cs,
+ value,
+ gtt_offset,
+ flags0,
+ flags1 | PIPE_CONTROL_GLOBAL_GTT_IVB);
+}
+
+static inline u32 *
+__gen8_emit_flush_dw(u32 *cs, u32 value, u32 gtt_offset, u32 flags)
+{
+ *cs++ = (MI_FLUSH_DW + 1) | flags;
+ *cs++ = gtt_offset;
+ *cs++ = 0;
+ *cs++ = value;
+
+ return cs;
+}
+
+static inline u32 *
+gen8_emit_ggtt_write(u32 *cs, u32 value, u32 gtt_offset, u32 flags)
+{
+ /* w/a: bit 5 needs to be zero for MI_FLUSH_DW address. */
+ GEM_BUG_ON(gtt_offset & (1 << 5));
+ /* Offset should be aligned to 8 bytes for both (QW/DW) write types */
+ GEM_BUG_ON(!IS_ALIGNED(gtt_offset, 8));
+
+ return __gen8_emit_flush_dw(cs,
+ value,
+ gtt_offset | MI_FLUSH_DW_USE_GTT,
+ flags | MI_FLUSH_DW_OP_STOREDW);
+}
+
+#endif /* __GEN8_ENGINE_CS_H__ */
diff --git a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
index a37c968ef8f7..755522ced60d 100644
--- a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
+++ b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
@@ -109,7 +109,7 @@ static void gen8_ppgtt_notify_vgt(struct i915_ppgtt *ppgtt, bool create)
#define as_pd(x) container_of((x), typeof(struct i915_page_directory), pt)
-static inline unsigned int
+static unsigned int
gen8_pd_range(u64 start, u64 end, int lvl, unsigned int *idx)
{
const int shift = gen8_pd_shift(lvl);
@@ -125,7 +125,7 @@ gen8_pd_range(u64 start, u64 end, int lvl, unsigned int *idx)
return i915_pde_index(end, shift) - *idx;
}
-static inline bool gen8_pd_contains(u64 start, u64 end, int lvl)
+static bool gen8_pd_contains(u64 start, u64 end, int lvl)
{
const u64 mask = ~0ull << gen8_pd_shift(lvl + 1);
@@ -133,7 +133,7 @@ static inline bool gen8_pd_contains(u64 start, u64 end, int lvl)
return (start ^ end) & mask && (start & ~mask) == 0;
}
-static inline unsigned int gen8_pt_count(u64 start, u64 end)
+static unsigned int gen8_pt_count(u64 start, u64 end)
{
GEM_BUG_ON(start >= end);
if ((start ^ end) >> gen8_pd_shift(1))
@@ -142,14 +142,13 @@ static inline unsigned int gen8_pt_count(u64 start, u64 end)
return end - start;
}
-static inline unsigned int
-gen8_pd_top_count(const struct i915_address_space *vm)
+static unsigned int gen8_pd_top_count(const struct i915_address_space *vm)
{
unsigned int shift = __gen8_pte_shift(vm->top);
return (vm->total + (1ull << shift) - 1) >> shift;
}
-static inline struct i915_page_directory *
+static struct i915_page_directory *
gen8_pdp_for_page_index(struct i915_address_space * const vm, const u64 idx)
{
struct i915_ppgtt * const ppgtt = i915_vm_to_ppgtt(vm);
@@ -160,7 +159,7 @@ gen8_pdp_for_page_index(struct i915_address_space * const vm, const u64 idx)
return i915_pd_entry(ppgtt->pd, gen8_pd_index(idx, vm->top));
}
-static inline struct i915_page_directory *
+static struct i915_page_directory *
gen8_pdp_for_page_address(struct i915_address_space * const vm, const u64 addr)
{
return gen8_pdp_for_page_index(vm, addr >> GEN8_PTE_SHIFT);
diff --git a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
index a24cc1ff08a0..34a645d6babd 100644
--- a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
@@ -134,11 +134,6 @@ static bool remove_signaling_context(struct intel_breadcrumbs *b,
return true;
}
-static inline bool __request_completed(const struct i915_request *rq)
-{
- return i915_seqno_passed(__hwsp_seqno(rq), rq->fence.seqno);
-}
-
__maybe_unused static bool
check_signal_order(struct intel_context *ce, struct i915_request *rq)
{
@@ -192,18 +187,6 @@ static void add_retire(struct intel_breadcrumbs *b, struct intel_timeline *tl)
intel_engine_add_retire(b->irq_engine, tl);
}
-static bool __signal_request(struct i915_request *rq)
-{
- GEM_BUG_ON(test_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags));
-
- if (!__dma_fence_signal(&rq->fence)) {
- i915_request_put(rq);
- return false;
- }
-
- return true;
-}
-
static struct llist_node *
slist_add(struct llist_node *node, struct llist_node *head)
{
@@ -251,13 +234,14 @@ static void signal_irq_work(struct irq_work *work)
intel_breadcrumbs_disarm_irq(b);
rcu_read_lock();
+ atomic_inc(&b->signaler_active);
list_for_each_entry_rcu(ce, &b->signalers, signal_link) {
struct i915_request *rq;
list_for_each_entry_rcu(rq, &ce->signals, signal_link) {
bool release;
- if (!__request_completed(rq))
+ if (!__i915_request_is_complete(rq))
break;
if (!test_and_clear_bit(I915_FENCE_FLAG_SIGNAL,
@@ -273,17 +257,20 @@ static void signal_irq_work(struct irq_work *work)
list_del_rcu(&rq->signal_link);
release = remove_signaling_context(b, ce);
spin_unlock(&ce->signal_lock);
-
- if (__signal_request(rq))
- /* We own signal_node now, xfer to local list */
- signal = slist_add(&rq->signal_node, signal);
-
if (release) {
- add_retire(b, ce->timeline);
+ if (intel_timeline_is_last(ce->timeline, rq))
+ add_retire(b, ce->timeline);
intel_context_put(ce);
}
+
+ if (__dma_fence_signal(&rq->fence))
+ /* We own signal_node now, xfer to local list */
+ signal = slist_add(&rq->signal_node, signal);
+ else
+ i915_request_put(rq);
}
}
+ atomic_dec(&b->signaler_active);
rcu_read_unlock();
llist_for_each_safe(signal, sn, signal) {
@@ -342,17 +329,19 @@ void intel_breadcrumbs_reset(struct intel_breadcrumbs *b)
spin_unlock_irqrestore(&b->irq_lock, flags);
}
-void intel_breadcrumbs_park(struct intel_breadcrumbs *b)
+void __intel_breadcrumbs_park(struct intel_breadcrumbs *b)
{
- /* Kick the work once more to drain the signalers */
+ if (!READ_ONCE(b->irq_armed))
+ return;
+
+ /* Kick the work once more to drain the signalers, and disarm the irq */
irq_work_sync(&b->irq_work);
- while (unlikely(READ_ONCE(b->irq_armed))) {
+ while (READ_ONCE(b->irq_armed) && !atomic_read(&b->active)) {
local_irq_disable();
signal_irq_work(&b->irq_work);
local_irq_enable();
cond_resched();
}
- GEM_BUG_ON(!list_empty(&b->signalers));
}
void intel_breadcrumbs_free(struct intel_breadcrumbs *b)
@@ -363,6 +352,17 @@ void intel_breadcrumbs_free(struct intel_breadcrumbs *b)
kfree(b);
}
+static void irq_signal_request(struct i915_request *rq,
+ struct intel_breadcrumbs *b)
+{
+ if (!__dma_fence_signal(&rq->fence))
+ return;
+
+ i915_request_get(rq);
+ if (llist_add(&rq->signal_node, &b->signaled_requests))
+ irq_work_queue(&b->irq_work);
+}
+
static void insert_breadcrumb(struct i915_request *rq)
{
struct intel_breadcrumbs *b = READ_ONCE(rq->engine)->breadcrumbs;
@@ -372,17 +372,13 @@ static void insert_breadcrumb(struct i915_request *rq)
if (test_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags))
return;
- i915_request_get(rq);
-
/*
* If the request is already completed, we can transfer it
* straight onto a signaled list, and queue the irq worker for
* its signal completion.
*/
- if (__request_completed(rq)) {
- if (__signal_request(rq) &&
- llist_add(&rq->signal_node, &b->signaled_requests))
- irq_work_queue(&b->irq_work);
+ if (__i915_request_is_complete(rq)) {
+ irq_signal_request(rq, b);
return;
}
@@ -413,6 +409,8 @@ static void insert_breadcrumb(struct i915_request *rq)
break;
}
}
+
+ i915_request_get(rq);
list_add_rcu(&rq->signal_link, pos);
GEM_BUG_ON(!check_signal_order(ce, rq));
GEM_BUG_ON(test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags));
@@ -453,22 +451,61 @@ bool i915_request_enable_breadcrumb(struct i915_request *rq)
void i915_request_cancel_breadcrumb(struct i915_request *rq)
{
+ struct intel_breadcrumbs *b = READ_ONCE(rq->engine)->breadcrumbs;
struct intel_context *ce = rq->context;
bool release;
- if (!test_and_clear_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags))
+ spin_lock(&ce->signal_lock);
+ if (!test_and_clear_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags)) {
+ spin_unlock(&ce->signal_lock);
return;
+ }
- spin_lock(&ce->signal_lock);
list_del_rcu(&rq->signal_link);
- release = remove_signaling_context(rq->engine->breadcrumbs, ce);
+ release = remove_signaling_context(b, ce);
spin_unlock(&ce->signal_lock);
if (release)
intel_context_put(ce);
+ if (__i915_request_is_complete(rq))
+ irq_signal_request(rq, b);
+
i915_request_put(rq);
}
+void intel_context_remove_breadcrumbs(struct intel_context *ce,
+ struct intel_breadcrumbs *b)
+{
+ struct i915_request *rq, *rn;
+ bool release = false;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ce->signal_lock, flags);
+
+ if (list_empty(&ce->signals))
+ goto unlock;
+
+ list_for_each_entry_safe(rq, rn, &ce->signals, signal_link) {
+ GEM_BUG_ON(!__i915_request_is_complete(rq));
+ if (!test_and_clear_bit(I915_FENCE_FLAG_SIGNAL,
+ &rq->fence.flags))
+ continue;
+
+ list_del_rcu(&rq->signal_link);
+ irq_signal_request(rq, b);
+ i915_request_put(rq);
+ }
+ release = remove_signaling_context(b, ce);
+
+unlock:
+ spin_unlock_irqrestore(&ce->signal_lock, flags);
+ if (release)
+ intel_context_put(ce);
+
+ while (atomic_read(&b->signaler_active))
+ cpu_relax();
+}
+
static void print_signals(struct intel_breadcrumbs *b, struct drm_printer *p)
{
struct intel_context *ce;
@@ -481,8 +518,8 @@ static void print_signals(struct intel_breadcrumbs *b, struct drm_printer *p)
list_for_each_entry_rcu(rq, &ce->signals, signal_link)
drm_printf(p, "\t[%llx:%llx%s] @ %dms\n",
rq->fence.context, rq->fence.seqno,
- i915_request_completed(rq) ? "!" :
- i915_request_started(rq) ? "*" :
+ __i915_request_is_complete(rq) ? "!" :
+ __i915_request_has_started(rq) ? "*" :
"",
jiffies_to_msecs(jiffies - rq->emitted_jiffies));
}
diff --git a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.h b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.h
index ed3d1deabfbd..3ce5ce270b04 100644
--- a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.h
+++ b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.h
@@ -6,6 +6,7 @@
#ifndef __INTEL_BREADCRUMBS__
#define __INTEL_BREADCRUMBS__
+#include <linux/atomic.h>
#include <linux/irq_work.h>
#include "intel_engine_types.h"
@@ -19,7 +20,18 @@ intel_breadcrumbs_create(struct intel_engine_cs *irq_engine);
void intel_breadcrumbs_free(struct intel_breadcrumbs *b);
void intel_breadcrumbs_reset(struct intel_breadcrumbs *b);
-void intel_breadcrumbs_park(struct intel_breadcrumbs *b);
+void __intel_breadcrumbs_park(struct intel_breadcrumbs *b);
+
+static inline void intel_breadcrumbs_unpark(struct intel_breadcrumbs *b)
+{
+ atomic_inc(&b->active);
+}
+
+static inline void intel_breadcrumbs_park(struct intel_breadcrumbs *b)
+{
+ if (atomic_dec_and_test(&b->active))
+ __intel_breadcrumbs_park(b);
+}
static inline void
intel_engine_signal_breadcrumbs(struct intel_engine_cs *engine)
@@ -33,4 +45,7 @@ void intel_engine_print_breadcrumbs(struct intel_engine_cs *engine,
bool i915_request_enable_breadcrumb(struct i915_request *request);
void i915_request_cancel_breadcrumb(struct i915_request *request);
+void intel_context_remove_breadcrumbs(struct intel_context *ce,
+ struct intel_breadcrumbs *b);
+
#endif /* __INTEL_BREADCRUMBS__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_breadcrumbs_types.h b/drivers/gpu/drm/i915/gt/intel_breadcrumbs_types.h
index a74bb3062bd8..3a084ce8ff5e 100644
--- a/drivers/gpu/drm/i915/gt/intel_breadcrumbs_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_breadcrumbs_types.h
@@ -29,17 +29,20 @@
* the overhead of waking that client is much preferred.
*/
struct intel_breadcrumbs {
- /* Not all breadcrumbs are attached to physical HW */
- struct intel_engine_cs *irq_engine;
+ atomic_t active;
spinlock_t signalers_lock; /* protects the list of signalers */
struct list_head signalers;
struct llist_head signaled_requests;
+ atomic_t signaler_active;
spinlock_t irq_lock; /* protects the interrupt from hardirq context */
struct irq_work irq_work; /* for use from inside irq_lock */
unsigned int irq_enabled;
bool irq_armed;
+
+ /* Not all breadcrumbs are attached to physical HW */
+ struct intel_engine_cs *irq_engine;
};
#endif /* __INTEL_BREADCRUMBS_TYPES__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_context.h b/drivers/gpu/drm/i915/gt/intel_context.h
index fda2eba81e22..d24ab6fa0ee5 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.h
+++ b/drivers/gpu/drm/i915/gt/intel_context.h
@@ -191,6 +191,11 @@ static inline bool intel_context_is_closed(const struct intel_context *ce)
return test_bit(CONTEXT_CLOSED_BIT, &ce->flags);
}
+static inline bool intel_context_has_inflight(const struct intel_context *ce)
+{
+ return test_bit(COPS_HAS_INFLIGHT_BIT, &ce->ops->flags);
+}
+
static inline bool intel_context_use_semaphores(const struct intel_context *ce)
{
return test_bit(CONTEXT_USE_SEMAPHORES, &ce->flags);
@@ -248,16 +253,14 @@ intel_context_clear_nopreempt(struct intel_context *ce)
static inline u64 intel_context_get_total_runtime_ns(struct intel_context *ce)
{
- const u32 period =
- RUNTIME_INFO(ce->engine->i915)->cs_timestamp_period_ns;
+ const u32 period = ce->engine->gt->clock_period_ns;
return READ_ONCE(ce->runtime.total) * period;
}
static inline u64 intel_context_get_avg_runtime_ns(struct intel_context *ce)
{
- const u32 period =
- RUNTIME_INFO(ce->engine->i915)->cs_timestamp_period_ns;
+ const u32 period = ce->engine->gt->clock_period_ns;
return mul_u32_u32(ewma_runtime_read(&ce->runtime.avg), period);
}
diff --git a/drivers/gpu/drm/i915/gt/intel_context_sseu.c b/drivers/gpu/drm/i915/gt/intel_context_sseu.c
index b9c8163978a3..8dfd8f656aaa 100644
--- a/drivers/gpu/drm/i915/gt/intel_context_sseu.c
+++ b/drivers/gpu/drm/i915/gt/intel_context_sseu.c
@@ -9,7 +9,6 @@
#include "intel_engine_pm.h"
#include "intel_gpu_commands.h"
#include "intel_lrc.h"
-#include "intel_lrc_reg.h"
#include "intel_ring.h"
#include "intel_sseu.h"
diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h
index 52fa9c132746..e10d78601bbd 100644
--- a/drivers/gpu/drm/i915/gt/intel_context_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_context_types.h
@@ -30,6 +30,10 @@ struct intel_context;
struct intel_ring;
struct intel_context_ops {
+ unsigned long flags;
+#define COPS_HAS_INFLIGHT_BIT 0
+#define COPS_HAS_INFLIGHT BIT(COPS_HAS_INFLIGHT_BIT)
+
int (*alloc)(struct intel_context *ce);
int (*pre_pin)(struct intel_context *ce, struct i915_gem_ww_ctx *ww, void **vaddr);
@@ -58,8 +62,12 @@ struct intel_context {
struct intel_engine_cs *engine;
struct intel_engine_cs *inflight;
-#define intel_context_inflight(ce) ptr_mask_bits(READ_ONCE((ce)->inflight), 2)
-#define intel_context_inflight_count(ce) ptr_unmask_bits(READ_ONCE((ce)->inflight), 2)
+#define __intel_context_inflight(engine) ptr_mask_bits(engine, 3)
+#define __intel_context_inflight_count(engine) ptr_unmask_bits(engine, 3)
+#define intel_context_inflight(ce) \
+ __intel_context_inflight(READ_ONCE((ce)->inflight))
+#define intel_context_inflight_count(ce) \
+ __intel_context_inflight_count(READ_ONCE((ce)->inflight))
struct i915_address_space *vm;
struct i915_gem_context __rcu *gem_context;
@@ -81,12 +89,13 @@ struct intel_context {
unsigned long flags;
#define CONTEXT_BARRIER_BIT 0
#define CONTEXT_ALLOC_BIT 1
-#define CONTEXT_VALID_BIT 2
-#define CONTEXT_CLOSED_BIT 3
-#define CONTEXT_USE_SEMAPHORES 4
-#define CONTEXT_BANNED 5
-#define CONTEXT_FORCE_SINGLE_SUBMISSION 6
-#define CONTEXT_NOPREEMPT 7
+#define CONTEXT_INIT_BIT 2
+#define CONTEXT_VALID_BIT 3
+#define CONTEXT_CLOSED_BIT 4
+#define CONTEXT_USE_SEMAPHORES 5
+#define CONTEXT_BANNED 6
+#define CONTEXT_FORCE_SINGLE_SUBMISSION 7
+#define CONTEXT_NOPREEMPT 8
u32 *lrc_reg_state;
union {
diff --git a/drivers/gpu/drm/i915/gt/intel_engine.h b/drivers/gpu/drm/i915/gt/intel_engine.h
index 760fefdfe392..47ee8578e511 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine.h
@@ -15,7 +15,6 @@
#include "i915_selftest.h"
#include "gt/intel_timeline.h"
#include "intel_engine_types.h"
-#include "intel_gpu_commands.h"
#include "intel_workarounds.h"
struct drm_printer;
@@ -223,91 +222,6 @@ void intel_engine_get_instdone(const struct intel_engine_cs *engine,
void intel_engine_init_execlists(struct intel_engine_cs *engine);
-static inline u32 *__gen8_emit_pipe_control(u32 *batch, u32 flags0, u32 flags1, u32 offset)
-{
- memset(batch, 0, 6 * sizeof(u32));
-
- batch[0] = GFX_OP_PIPE_CONTROL(6) | flags0;
- batch[1] = flags1;
- batch[2] = offset;
-
- return batch + 6;
-}
-
-static inline u32 *gen8_emit_pipe_control(u32 *batch, u32 flags, u32 offset)
-{
- return __gen8_emit_pipe_control(batch, 0, flags, offset);
-}
-
-static inline u32 *gen12_emit_pipe_control(u32 *batch, u32 flags0, u32 flags1, u32 offset)
-{
- return __gen8_emit_pipe_control(batch, flags0, flags1, offset);
-}
-
-static inline u32 *
-__gen8_emit_write_rcs(u32 *cs, u32 value, u32 offset, u32 flags0, u32 flags1)
-{
- *cs++ = GFX_OP_PIPE_CONTROL(6) | flags0;
- *cs++ = flags1 | PIPE_CONTROL_QW_WRITE;
- *cs++ = offset;
- *cs++ = 0;
- *cs++ = value;
- *cs++ = 0; /* We're thrashing one extra dword. */
-
- return cs;
-}
-
-static inline u32*
-gen8_emit_ggtt_write_rcs(u32 *cs, u32 value, u32 gtt_offset, u32 flags)
-{
- /* We're using qword write, offset should be aligned to 8 bytes. */
- GEM_BUG_ON(!IS_ALIGNED(gtt_offset, 8));
-
- return __gen8_emit_write_rcs(cs,
- value,
- gtt_offset,
- 0,
- flags | PIPE_CONTROL_GLOBAL_GTT_IVB);
-}
-
-static inline u32*
-gen12_emit_ggtt_write_rcs(u32 *cs, u32 value, u32 gtt_offset, u32 flags0, u32 flags1)
-{
- /* We're using qword write, offset should be aligned to 8 bytes. */
- GEM_BUG_ON(!IS_ALIGNED(gtt_offset, 8));
-
- return __gen8_emit_write_rcs(cs,
- value,
- gtt_offset,
- flags0,
- flags1 | PIPE_CONTROL_GLOBAL_GTT_IVB);
-}
-
-static inline u32 *
-__gen8_emit_flush_dw(u32 *cs, u32 value, u32 gtt_offset, u32 flags)
-{
- *cs++ = (MI_FLUSH_DW + 1) | flags;
- *cs++ = gtt_offset;
- *cs++ = 0;
- *cs++ = value;
-
- return cs;
-}
-
-static inline u32 *
-gen8_emit_ggtt_write(u32 *cs, u32 value, u32 gtt_offset, u32 flags)
-{
- /* w/a: bit 5 needs to be zero for MI_FLUSH_DW address. */
- GEM_BUG_ON(gtt_offset & (1 << 5));
- /* Offset should be aligned to 8 bytes for both (QW/DW) write types */
- GEM_BUG_ON(!IS_ALIGNED(gtt_offset, 8));
-
- return __gen8_emit_flush_dw(cs,
- value,
- gtt_offset | MI_FLUSH_DW_USE_GTT,
- flags | MI_FLUSH_DW_OP_STOREDW);
-}
-
static inline void __intel_engine_reset(struct intel_engine_cs *engine,
bool stalled)
{
@@ -318,7 +232,12 @@ static inline void __intel_engine_reset(struct intel_engine_cs *engine,
bool intel_engines_are_idle(struct intel_gt *gt);
bool intel_engine_is_idle(struct intel_engine_cs *engine);
-void intel_engine_flush_submission(struct intel_engine_cs *engine);
+
+void __intel_engine_flush_submission(struct intel_engine_cs *engine, bool sync);
+static inline void intel_engine_flush_submission(struct intel_engine_cs *engine)
+{
+ __intel_engine_flush_submission(engine, true);
+}
void intel_engines_reset_default_submission(struct intel_gt *gt);
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index 0b31670343f5..fb1b1d096975 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -33,12 +33,14 @@
#include "intel_engine.h"
#include "intel_engine_pm.h"
#include "intel_engine_user.h"
+#include "intel_execlists_submission.h"
#include "intel_gt.h"
#include "intel_gt_requests.h"
#include "intel_gt_pm.h"
-#include "intel_lrc.h"
+#include "intel_lrc_reg.h"
#include "intel_reset.h"
#include "intel_ring.h"
+#include "uc/intel_guc_submission.h"
/* Haswell does have the CXT_SIZE register however it does not appear to be
* valid. Now, docs explain in dwords what is in the context object. The full
@@ -340,7 +342,7 @@ static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id)
engine->schedule = NULL;
ewma__engine_latency_init(&engine->latency);
- seqlock_init(&engine->stats.lock);
+ seqcount_init(&engine->stats.lock);
ATOMIC_INIT_NOTIFIER_HEAD(&engine->context_status_notifier);
@@ -647,6 +649,8 @@ static int init_status_page(struct intel_engine_cs *engine)
void *vaddr;
int ret;
+ INIT_LIST_HEAD(&engine->status_page.timelines);
+
/*
* Though the HWS register does support 36bit addresses, historically
* we have had hangs and corruption reported due to wild writes if
@@ -723,6 +727,9 @@ static int engine_setup_common(struct intel_engine_cs *engine)
intel_engine_init_whitelist(engine);
intel_engine_init_ctx_wa(engine);
+ if (INTEL_GEN(engine->i915) >= 12)
+ engine->flags |= I915_ENGINE_HAS_RELATIVE_MMIO;
+
return 0;
err_status:
@@ -829,6 +836,21 @@ create_pinned_context(struct intel_engine_cs *engine,
return ce;
}
+static void destroy_pinned_context(struct intel_context *ce)
+{
+ struct intel_engine_cs *engine = ce->engine;
+ struct i915_vma *hwsp = engine->status_page.vma;
+
+ GEM_BUG_ON(ce->timeline->hwsp_ggtt != hwsp);
+
+ mutex_lock(&hwsp->vm->mutex);
+ list_del(&ce->timeline->engine_link);
+ mutex_unlock(&hwsp->vm->mutex);
+
+ intel_context_unpin(ce);
+ intel_context_put(ce);
+}
+
static struct intel_context *
create_kernel_context(struct intel_engine_cs *engine)
{
@@ -889,7 +911,9 @@ int intel_engines_init(struct intel_gt *gt)
enum intel_engine_id id;
int err;
- if (HAS_EXECLISTS(gt->i915))
+ if (intel_uc_uses_guc_submission(&gt->uc))
+ setup = intel_guc_submission_setup;
+ else if (HAS_EXECLISTS(gt->i915))
setup = intel_execlists_submission_setup;
else
setup = intel_ring_submission_setup;
@@ -925,7 +949,6 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine)
GEM_BUG_ON(!list_empty(&engine->active.requests));
tasklet_kill(&engine->execlists.tasklet); /* flush the callback */
- cleanup_status_page(engine);
intel_breadcrumbs_free(engine->breadcrumbs);
intel_engine_fini_retire(engine);
@@ -934,11 +957,11 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine)
if (engine->default_state)
fput(engine->default_state);
- if (engine->kernel_context) {
- intel_context_unpin(engine->kernel_context);
- intel_context_put(engine->kernel_context);
- }
+ if (engine->kernel_context)
+ destroy_pinned_context(engine->kernel_context);
+
GEM_BUG_ON(!llist_empty(&engine->barrier_tasks));
+ cleanup_status_page(engine);
intel_wa_list_free(&engine->ctx_wa_list);
intel_wa_list_free(&engine->wa_list);
@@ -1002,32 +1025,50 @@ static unsigned long stop_timeout(const struct intel_engine_cs *engine)
return READ_ONCE(engine->props.stop_timeout_ms);
}
-int intel_engine_stop_cs(struct intel_engine_cs *engine)
+static int __intel_engine_stop_cs(struct intel_engine_cs *engine,
+ int fast_timeout_us,
+ int slow_timeout_ms)
{
struct intel_uncore *uncore = engine->uncore;
- const u32 base = engine->mmio_base;
- const i915_reg_t mode = RING_MI_MODE(base);
+ const i915_reg_t mode = RING_MI_MODE(engine->mmio_base);
int err;
+ intel_uncore_write_fw(uncore, mode, _MASKED_BIT_ENABLE(STOP_RING));
+ err = __intel_wait_for_register_fw(engine->uncore, mode,
+ MODE_IDLE, MODE_IDLE,
+ fast_timeout_us,
+ slow_timeout_ms,
+ NULL);
+
+ /* A final mmio read to let GPU writes be hopefully flushed to memory */
+ intel_uncore_posting_read_fw(uncore, mode);
+ return err;
+}
+
+int intel_engine_stop_cs(struct intel_engine_cs *engine)
+{
+ int err = 0;
+
if (INTEL_GEN(engine->i915) < 3)
return -ENODEV;
ENGINE_TRACE(engine, "\n");
+ if (__intel_engine_stop_cs(engine, 1000, stop_timeout(engine))) {
+ ENGINE_TRACE(engine,
+ "timed out on STOP_RING -> IDLE; HEAD:%04x, TAIL:%04x\n",
+ ENGINE_READ_FW(engine, RING_HEAD) & HEAD_ADDR,
+ ENGINE_READ_FW(engine, RING_TAIL) & TAIL_ADDR);
- intel_uncore_write_fw(uncore, mode, _MASKED_BIT_ENABLE(STOP_RING));
-
- err = 0;
- if (__intel_wait_for_register_fw(uncore,
- mode, MODE_IDLE, MODE_IDLE,
- 1000, stop_timeout(engine),
- NULL)) {
- ENGINE_TRACE(engine, "timed out on STOP_RING -> IDLE\n");
- err = -ETIMEDOUT;
+ /*
+ * Sometimes we observe that the idle flag is not
+ * set even though the ring is empty. So double
+ * check before giving up.
+ */
+ if ((ENGINE_READ_FW(engine, RING_HEAD) & HEAD_ADDR) !=
+ (ENGINE_READ_FW(engine, RING_TAIL) & TAIL_ADDR))
+ err = -ETIMEDOUT;
}
- /* A final mmio read to let GPU writes be hopefully flushed to memory */
- intel_uncore_posting_read_fw(uncore, mode);
-
return err;
}
@@ -1189,17 +1230,13 @@ static bool ring_is_idle(struct intel_engine_cs *engine)
return idle;
}
-void intel_engine_flush_submission(struct intel_engine_cs *engine)
+void __intel_engine_flush_submission(struct intel_engine_cs *engine, bool sync)
{
struct tasklet_struct *t = &engine->execlists.tasklet;
if (!t->func)
return;
- /* Synchronise and wait for the tasklet on another CPU */
- tasklet_kill(t);
-
- /* Having cancelled the tasklet, ensure that is run */
local_bh_disable();
if (tasklet_trylock(t)) {
/* Must wait for any GPU reset in progress. */
@@ -1208,6 +1245,10 @@ void intel_engine_flush_submission(struct intel_engine_cs *engine)
tasklet_unlock(t);
}
local_bh_enable();
+
+ /* Synchronise and wait for the tasklet on another CPU */
+ if (sync)
+ tasklet_unlock_wait(t);
}
/**
@@ -1273,8 +1314,12 @@ void intel_engines_reset_default_submission(struct intel_gt *gt)
struct intel_engine_cs *engine;
enum intel_engine_id id;
- for_each_engine(engine, gt, id)
+ for_each_engine(engine, gt, id) {
+ if (engine->sanitize)
+ engine->sanitize(engine);
+
engine->set_default_submission(engine);
+ }
}
bool intel_engine_can_store_dword(struct intel_engine_cs *engine)
@@ -1294,44 +1339,6 @@ bool intel_engine_can_store_dword(struct intel_engine_cs *engine)
}
}
-static int print_sched_attr(const struct i915_sched_attr *attr,
- char *buf, int x, int len)
-{
- if (attr->priority == I915_PRIORITY_INVALID)
- return x;
-
- x += snprintf(buf + x, len - x,
- " prio=%d", attr->priority);
-
- return x;
-}
-
-static void print_request(struct drm_printer *m,
- struct i915_request *rq,
- const char *prefix)
-{
- const char *name = rq->fence.ops->get_timeline_name(&rq->fence);
- char buf[80] = "";
- int x = 0;
-
- x = print_sched_attr(&rq->sched.attr, buf, x, sizeof(buf));
-
- drm_printf(m, "%s %llx:%llx%s%s %s @ %dms: %s\n",
- prefix,
- rq->fence.context, rq->fence.seqno,
- i915_request_completed(rq) ? "!" :
- i915_request_started(rq) ? "*" :
- "",
- test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
- &rq->fence.flags) ? "+" :
- test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
- &rq->fence.flags) ? "-" :
- "",
- buf,
- jiffies_to_msecs(jiffies - rq->emitted_jiffies),
- name);
-}
-
static struct intel_timeline *get_timeline(struct i915_request *rq)
{
struct intel_timeline *tl;
@@ -1480,7 +1487,9 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine,
drm_printf(m, "\tIPEHR: 0x%08x\n", ENGINE_READ(engine, IPEHR));
}
- if (HAS_EXECLISTS(dev_priv)) {
+ if (intel_engine_in_guc_submission_mode(engine)) {
+ /* nothing to print yet */
+ } else if (HAS_EXECLISTS(dev_priv)) {
struct i915_request * const *port, *rq;
const u32 *hws =
&engine->status_page.addr[I915_HWS_CSB_BUF0_INDEX];
@@ -1529,7 +1538,7 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine,
intel_context_is_banned(rq->context) ? "*" : "");
len += print_ring(hdr + len, sizeof(hdr) - len, rq);
scnprintf(hdr + len, sizeof(hdr) - len, "rq: ");
- print_request(m, rq, hdr);
+ i915_request_show(m, rq, hdr, 0);
}
for (port = execlists->pending; (rq = *port); port++) {
char hdr[160];
@@ -1543,7 +1552,7 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine,
intel_context_is_banned(rq->context) ? "*" : "");
len += print_ring(hdr + len, sizeof(hdr) - len, rq);
scnprintf(hdr + len, sizeof(hdr) - len, "rq: ");
- print_request(m, rq, hdr);
+ i915_request_show(m, rq, hdr, 0);
}
rcu_read_unlock();
execlists_active_unlock_bh(execlists);
@@ -1667,7 +1676,7 @@ void intel_engine_dump(struct intel_engine_cs *engine,
ktime_to_ms(intel_engine_get_busy_time(engine,
&dummy)));
drm_printf(m, "\tForcewake: %x domains, %d active\n",
- engine->fw_domain, atomic_read(&engine->fw_active));
+ engine->fw_domain, READ_ONCE(engine->fw_active));
rcu_read_lock();
rq = READ_ONCE(engine->heartbeat.systole);
@@ -1687,7 +1696,7 @@ void intel_engine_dump(struct intel_engine_cs *engine,
if (rq) {
struct intel_timeline *tl = get_timeline(rq);
- print_request(m, rq, "\t\tactive ");
+ i915_request_show(m, rq, "\t\tactive ", 0);
drm_printf(m, "\t\tring->start: 0x%08x\n",
i915_ggtt_offset(rq->ring->vma));
@@ -1725,7 +1734,7 @@ void intel_engine_dump(struct intel_engine_cs *engine,
drm_printf(m, "\tDevice is asleep; skipping register dump\n");
}
- intel_execlists_show_requests(engine, m, print_request, 8);
+ intel_execlists_show_requests(engine, m, i915_request_show, 8);
drm_printf(m, "HWSP:\n");
hexdump(m, engine->status_page.addr, PAGE_SIZE);
@@ -1745,7 +1754,7 @@ static ktime_t __intel_engine_get_busy_time(struct intel_engine_cs *engine,
* add it to the total.
*/
*now = ktime_get();
- if (atomic_read(&engine->stats.active))
+ if (READ_ONCE(engine->stats.active))
total = ktime_add(total, ktime_sub(*now, engine->stats.start));
return total;
@@ -1764,9 +1773,9 @@ ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine, ktime_t *now)
ktime_t total;
do {
- seq = read_seqbegin(&engine->stats.lock);
+ seq = read_seqcount_begin(&engine->stats.lock);
total = __intel_engine_get_busy_time(engine, now);
- } while (read_seqretry(&engine->stats.lock, seq));
+ } while (read_seqcount_retry(&engine->stats.lock, seq));
return total;
}
@@ -1802,7 +1811,7 @@ intel_engine_find_active_request(struct intel_engine_cs *engine)
struct intel_timeline *tl = request->context->timeline;
list_for_each_entry_from_reverse(request, &tl->requests, link) {
- if (i915_request_completed(request))
+ if (__i915_request_is_complete(request))
break;
active = request;
@@ -1813,10 +1822,10 @@ intel_engine_find_active_request(struct intel_engine_cs *engine)
return active;
list_for_each_entry(request, &engine->active.requests, sched.link) {
- if (i915_request_completed(request))
+ if (__i915_request_is_complete(request))
continue;
- if (!i915_request_started(request))
+ if (!__i915_request_has_started(request))
continue;
/* More than one preemptible request may match! */
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
index 9060385cd69e..d7be2b9339f9 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
@@ -37,6 +37,18 @@ static bool next_heartbeat(struct intel_engine_cs *engine)
return true;
}
+static struct i915_request *
+heartbeat_create(struct intel_context *ce, gfp_t gfp)
+{
+ struct i915_request *rq;
+
+ intel_context_enter(ce);
+ rq = __i915_request_create(ce, gfp);
+ intel_context_exit(ce);
+
+ return rq;
+}
+
static void idle_pulse(struct intel_engine_cs *engine, struct i915_request *rq)
{
engine->wakeref_serial = READ_ONCE(engine->serial) + 1;
@@ -45,6 +57,15 @@ static void idle_pulse(struct intel_engine_cs *engine, struct i915_request *rq)
engine->heartbeat.systole = i915_request_get(rq);
}
+static void heartbeat_commit(struct i915_request *rq,
+ const struct i915_sched_attr *attr)
+{
+ idle_pulse(rq->engine, rq);
+
+ __i915_request_commit(rq);
+ __i915_request_queue(rq, attr);
+}
+
static void show_heartbeat(const struct i915_request *rq,
struct intel_engine_cs *engine)
{
@@ -139,16 +160,11 @@ static void heartbeat(struct work_struct *wrk)
goto out;
}
- intel_context_enter(ce);
- rq = __i915_request_create(ce, GFP_NOWAIT | __GFP_NOWARN);
- intel_context_exit(ce);
+ rq = heartbeat_create(ce, GFP_NOWAIT | __GFP_NOWARN);
if (IS_ERR(rq))
goto unlock;
- idle_pulse(engine, rq);
-
- __i915_request_commit(rq);
- __i915_request_queue(rq, &attr);
+ heartbeat_commit(rq, &attr);
unlock:
mutex_unlock(&ce->timeline->mutex);
@@ -187,17 +203,13 @@ static int __intel_engine_pulse(struct intel_engine_cs *engine)
GEM_BUG_ON(!intel_engine_has_preemption(engine));
GEM_BUG_ON(!intel_engine_pm_is_awake(engine));
- intel_context_enter(ce);
- rq = __i915_request_create(ce, GFP_NOWAIT | __GFP_NOWARN);
- intel_context_exit(ce);
+ rq = heartbeat_create(ce, GFP_NOWAIT | __GFP_NOWARN);
if (IS_ERR(rq))
return PTR_ERR(rq);
__set_bit(I915_FENCE_FLAG_SENTINEL, &rq->fence.flags);
- idle_pulse(engine, rq);
- __i915_request_commit(rq);
- __i915_request_queue(rq, &attr);
+ heartbeat_commit(rq, &attr);
GEM_BUG_ON(rq->sched.attr.priority < I915_PRIORITY_BARRIER);
return 0;
@@ -273,8 +285,12 @@ int intel_engine_pulse(struct intel_engine_cs *engine)
int intel_engine_flush_barriers(struct intel_engine_cs *engine)
{
+ struct i915_sched_attr attr = {
+ .priority = I915_USER_PRIORITY(I915_PRIORITY_MIN),
+ };
+ struct intel_context *ce = engine->kernel_context;
struct i915_request *rq;
- int err = 0;
+ int err;
if (llist_empty(&engine->barrier_tasks))
return 0;
@@ -282,15 +298,22 @@ int intel_engine_flush_barriers(struct intel_engine_cs *engine)
if (!intel_engine_pm_get_if_awake(engine))
return 0;
- rq = i915_request_create(engine->kernel_context);
+ if (mutex_lock_interruptible(&ce->timeline->mutex)) {
+ err = -EINTR;
+ goto out_rpm;
+ }
+
+ rq = heartbeat_create(ce, GFP_KERNEL);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
- goto out_rpm;
+ goto out_unlock;
}
- idle_pulse(engine, rq);
- i915_request_add(rq);
+ heartbeat_commit(rq, &attr);
+ err = 0;
+out_unlock:
+ mutex_unlock(&ce->timeline->mutex);
out_rpm:
intel_engine_pm_put(engine);
return err;
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.c b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
index 499b09cb4acf..e67d09259dd0 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
@@ -60,18 +60,26 @@ static int __engine_unpark(struct intel_wakeref *wf)
/* Scrub the context image after our loss of control */
ce->ops->reset(ce);
+
+ CE_TRACE(ce, "reset { seqno:%x, *hwsp:%x, ring:%x }\n",
+ ce->timeline->seqno,
+ READ_ONCE(*ce->timeline->hwsp_seqno),
+ ce->ring->emit);
+ GEM_BUG_ON(ce->timeline->seqno !=
+ READ_ONCE(*ce->timeline->hwsp_seqno));
}
if (engine->unpark)
engine->unpark(engine);
+ intel_breadcrumbs_unpark(engine->breadcrumbs);
intel_engine_unpark_heartbeat(engine);
return 0;
}
#if IS_ENABLED(CONFIG_LOCKDEP)
-static inline unsigned long __timeline_mark_lock(struct intel_context *ce)
+static unsigned long __timeline_mark_lock(struct intel_context *ce)
{
unsigned long flags;
@@ -81,8 +89,8 @@ static inline unsigned long __timeline_mark_lock(struct intel_context *ce)
return flags;
}
-static inline void __timeline_mark_unlock(struct intel_context *ce,
- unsigned long flags)
+static void __timeline_mark_unlock(struct intel_context *ce,
+ unsigned long flags)
{
mutex_release(&ce->timeline->mutex.dep_map, _THIS_IP_);
local_irq_restore(flags);
@@ -90,13 +98,13 @@ static inline void __timeline_mark_unlock(struct intel_context *ce,
#else
-static inline unsigned long __timeline_mark_lock(struct intel_context *ce)
+static unsigned long __timeline_mark_lock(struct intel_context *ce)
{
return 0;
}
-static inline void __timeline_mark_unlock(struct intel_context *ce,
- unsigned long flags)
+static void __timeline_mark_unlock(struct intel_context *ce,
+ unsigned long flags)
{
}
@@ -136,7 +144,7 @@ __queue_and_release_pm(struct i915_request *rq,
list_add_tail(&tl->link, &timelines->active_list);
/* Hand the request over to HW and so engine_retire() */
- __i915_request_queue(rq, NULL);
+ __i915_request_queue_bh(rq);
/* Let new submissions commence (and maybe retire this timeline) */
__intel_wakeref_defer_park(&engine->wakeref);
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_stats.h b/drivers/gpu/drm/i915/gt/intel_engine_stats.h
new file mode 100644
index 000000000000..24fbdd94351a
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_engine_stats.h
@@ -0,0 +1,60 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef __INTEL_ENGINE_STATS_H__
+#define __INTEL_ENGINE_STATS_H__
+
+#include <linux/atomic.h>
+#include <linux/ktime.h>
+#include <linux/seqlock.h>
+
+#include "i915_gem.h" /* GEM_BUG_ON */
+#include "intel_engine.h"
+
+static inline void intel_engine_context_in(struct intel_engine_cs *engine)
+{
+ unsigned long flags;
+
+ if (engine->stats.active) {
+ engine->stats.active++;
+ return;
+ }
+
+ /* The writer is serialised; but the pmu reader may be from hardirq */
+ local_irq_save(flags);
+ write_seqcount_begin(&engine->stats.lock);
+
+ engine->stats.start = ktime_get();
+ engine->stats.active++;
+
+ write_seqcount_end(&engine->stats.lock);
+ local_irq_restore(flags);
+
+ GEM_BUG_ON(!engine->stats.active);
+}
+
+static inline void intel_engine_context_out(struct intel_engine_cs *engine)
+{
+ unsigned long flags;
+
+ GEM_BUG_ON(!engine->stats.active);
+ if (engine->stats.active > 1) {
+ engine->stats.active--;
+ return;
+ }
+
+ local_irq_save(flags);
+ write_seqcount_begin(&engine->stats.lock);
+
+ engine->stats.active--;
+ engine->stats.total =
+ ktime_add(engine->stats.total,
+ ktime_sub(ktime_get(), engine->stats.start));
+
+ write_seqcount_end(&engine->stats.lock);
+ local_irq_restore(flags);
+}
+
+#endif /* __INTEL_ENGINE_STATS_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
index ee6312601c56..d2346b425547 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
@@ -68,6 +68,7 @@ typedef u8 intel_engine_mask_t;
#define ALL_ENGINES ((intel_engine_mask_t)~0ul)
struct intel_hw_status_page {
+ struct list_head timelines;
struct i915_vma *vma;
u32 *addr;
};
@@ -183,7 +184,8 @@ struct intel_engine_execlists {
* Reserve the upper 16b for tracking internal errors.
*/
u32 error_interrupt;
-#define ERROR_CSB BIT(31)
+#define ERROR_CSB BIT(31)
+#define ERROR_PREEMPT BIT(30)
/**
* @reset_ccid: Active CCID [EXECLISTS_STATUS_HI] at the time of reset
@@ -237,16 +239,6 @@ struct intel_engine_execlists {
unsigned int port_mask;
/**
- * @switch_priority_hint: Second context priority.
- *
- * We submit multiple contexts to the HW simultaneously and would
- * like to occasionally switch between them to emulate timeslicing.
- * To know when timeslicing is suitable, we track the priority of
- * the context submitted second.
- */
- int switch_priority_hint;
-
- /**
* @queue_priority_hint: Highest pending priority.
*
* When we add requests into the queue, or adjust the priority of
@@ -327,7 +319,7 @@ struct intel_engine_cs {
* as possible.
*/
enum forcewake_domains fw_domain;
- atomic_t fw_active;
+ unsigned int fw_active;
unsigned long context_tag;
@@ -524,12 +516,12 @@ struct intel_engine_cs {
/**
* @active: Number of contexts currently scheduled in.
*/
- atomic_t active;
+ unsigned int active;
/**
* @lock: Lock protecting the below fields.
*/
- seqlock_t lock;
+ seqcount_t lock;
/**
* @total: Total time this engine was busy.
@@ -559,6 +551,8 @@ struct intel_engine_cs {
unsigned long stop_timeout_ms;
unsigned long timeslice_duration_ms;
} props, defaults;
+
+ I915_SELFTEST_DECLARE(struct fault_attr reset_timeout);
};
static inline bool
diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
new file mode 100644
index 000000000000..ac1be7a632d3
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
@@ -0,0 +1,3896 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2014 Intel Corporation
+ */
+
+/**
+ * DOC: Logical Rings, Logical Ring Contexts and Execlists
+ *
+ * Motivation:
+ * GEN8 brings an expansion of the HW contexts: "Logical Ring Contexts".
+ * These expanded contexts enable a number of new abilities, especially
+ * "Execlists" (also implemented in this file).
+ *
+ * One of the main differences with the legacy HW contexts is that logical
+ * ring contexts incorporate many more things to the context's state, like
+ * PDPs or ringbuffer control registers:
+ *
+ * The reason why PDPs are included in the context is straightforward: as
+ * PPGTTs (per-process GTTs) are actually per-context, having the PDPs
+ * contained there mean you don't need to do a ppgtt->switch_mm yourself,
+ * instead, the GPU will do it for you on the context switch.
+ *
+ * But, what about the ringbuffer control registers (head, tail, etc..)?
+ * shouldn't we just need a set of those per engine command streamer? This is
+ * where the name "Logical Rings" starts to make sense: by virtualizing the
+ * rings, the engine cs shifts to a new "ring buffer" with every context
+ * switch. When you want to submit a workload to the GPU you: A) choose your
+ * context, B) find its appropriate virtualized ring, C) write commands to it
+ * and then, finally, D) tell the GPU to switch to that context.
+ *
+ * Instead of the legacy MI_SET_CONTEXT, the way you tell the GPU to switch
+ * to a contexts is via a context execution list, ergo "Execlists".
+ *
+ * LRC implementation:
+ * Regarding the creation of contexts, we have:
+ *
+ * - One global default context.
+ * - One local default context for each opened fd.
+ * - One local extra context for each context create ioctl call.
+ *
+ * Now that ringbuffers belong per-context (and not per-engine, like before)
+ * and that contexts are uniquely tied to a given engine (and not reusable,
+ * like before) we need:
+ *
+ * - One ringbuffer per-engine inside each context.
+ * - One backing object per-engine inside each context.
+ *
+ * The global default context starts its life with these new objects fully
+ * allocated and populated. The local default context for each opened fd is
+ * more complex, because we don't know at creation time which engine is going
+ * to use them. To handle this, we have implemented a deferred creation of LR
+ * contexts:
+ *
+ * The local context starts its life as a hollow or blank holder, that only
+ * gets populated for a given engine once we receive an execbuffer. If later
+ * on we receive another execbuffer ioctl for the same context but a different
+ * engine, we allocate/populate a new ringbuffer and context backing object and
+ * so on.
+ *
+ * Finally, regarding local contexts created using the ioctl call: as they are
+ * only allowed with the render ring, we can allocate & populate them right
+ * away (no need to defer anything, at least for now).
+ *
+ * Execlists implementation:
+ * Execlists are the new method by which, on gen8+ hardware, workloads are
+ * submitted for execution (as opposed to the legacy, ringbuffer-based, method).
+ * This method works as follows:
+ *
+ * When a request is committed, its commands (the BB start and any leading or
+ * trailing commands, like the seqno breadcrumbs) are placed in the ringbuffer
+ * for the appropriate context. The tail pointer in the hardware context is not
+ * updated at this time, but instead, kept by the driver in the ringbuffer
+ * structure. A structure representing this request is added to a request queue
+ * for the appropriate engine: this structure contains a copy of the context's
+ * tail after the request was written to the ring buffer and a pointer to the
+ * context itself.
+ *
+ * If the engine's request queue was empty before the request was added, the
+ * queue is processed immediately. Otherwise the queue will be processed during
+ * a context switch interrupt. In any case, elements on the queue will get sent
+ * (in pairs) to the GPU's ExecLists Submit Port (ELSP, for short) with a
+ * globally unique 20-bits submission ID.
+ *
+ * When execution of a request completes, the GPU updates the context status
+ * buffer with a context complete event and generates a context switch interrupt.
+ * During the interrupt handling, the driver examines the events in the buffer:
+ * for each context complete event, if the announced ID matches that on the head
+ * of the request queue, then that request is retired and removed from the queue.
+ *
+ * After processing, if any requests were retired and the queue is not empty
+ * then a new execution list can be submitted. The two requests at the front of
+ * the queue are next to be submitted but since a context may not occur twice in
+ * an execution list, if subsequent requests have the same ID as the first then
+ * the two requests must be combined. This is done simply by discarding requests
+ * at the head of the queue until either only one requests is left (in which case
+ * we use a NULL second context) or the first two requests have unique IDs.
+ *
+ * By always executing the first two requests in the queue the driver ensures
+ * that the GPU is kept as busy as possible. In the case where a single context
+ * completes but a second context is still executing, the request for this second
+ * context will be at the head of the queue when we remove the first one. This
+ * request will then be resubmitted along with a new request for a different context,
+ * which will cause the hardware to continue executing the second request and queue
+ * the new request (the GPU detects the condition of a context getting preempted
+ * with the same context and optimizes the context switch flow by not doing
+ * preemption, but just sampling the new tail pointer).
+ *
+ */
+#include <linux/interrupt.h>
+
+#include "i915_drv.h"
+#include "i915_trace.h"
+#include "i915_vgpu.h"
+#include "gen8_engine_cs.h"
+#include "intel_breadcrumbs.h"
+#include "intel_context.h"
+#include "intel_engine_pm.h"
+#include "intel_engine_stats.h"
+#include "intel_execlists_submission.h"
+#include "intel_gt.h"
+#include "intel_gt_pm.h"
+#include "intel_gt_requests.h"
+#include "intel_lrc.h"
+#include "intel_lrc_reg.h"
+#include "intel_mocs.h"
+#include "intel_reset.h"
+#include "intel_ring.h"
+#include "intel_workarounds.h"
+#include "shmem_utils.h"
+
+#define RING_EXECLIST_QFULL (1 << 0x2)
+#define RING_EXECLIST1_VALID (1 << 0x3)
+#define RING_EXECLIST0_VALID (1 << 0x4)
+#define RING_EXECLIST_ACTIVE_STATUS (3 << 0xE)
+#define RING_EXECLIST1_ACTIVE (1 << 0x11)
+#define RING_EXECLIST0_ACTIVE (1 << 0x12)
+
+#define GEN8_CTX_STATUS_IDLE_ACTIVE (1 << 0)
+#define GEN8_CTX_STATUS_PREEMPTED (1 << 1)
+#define GEN8_CTX_STATUS_ELEMENT_SWITCH (1 << 2)
+#define GEN8_CTX_STATUS_ACTIVE_IDLE (1 << 3)
+#define GEN8_CTX_STATUS_COMPLETE (1 << 4)
+#define GEN8_CTX_STATUS_LITE_RESTORE (1 << 15)
+
+#define GEN8_CTX_STATUS_COMPLETED_MASK \
+ (GEN8_CTX_STATUS_COMPLETE | GEN8_CTX_STATUS_PREEMPTED)
+
+#define GEN12_CTX_STATUS_SWITCHED_TO_NEW_QUEUE (0x1) /* lower csb dword */
+#define GEN12_CTX_SWITCH_DETAIL(csb_dw) ((csb_dw) & 0xF) /* upper csb dword */
+#define GEN12_CSB_SW_CTX_ID_MASK GENMASK(25, 15)
+#define GEN12_IDLE_CTX_ID 0x7FF
+#define GEN12_CSB_CTX_VALID(csb_dw) \
+ (FIELD_GET(GEN12_CSB_SW_CTX_ID_MASK, csb_dw) != GEN12_IDLE_CTX_ID)
+
+/* Typical size of the average request (2 pipecontrols and a MI_BB) */
+#define EXECLISTS_REQUEST_SIZE 64 /* bytes */
+
+struct virtual_engine {
+ struct intel_engine_cs base;
+ struct intel_context context;
+ struct rcu_work rcu;
+
+ /*
+ * We allow only a single request through the virtual engine at a time
+ * (each request in the timeline waits for the completion fence of
+ * the previous before being submitted). By restricting ourselves to
+ * only submitting a single request, each request is placed on to a
+ * physical to maximise load spreading (by virtue of the late greedy
+ * scheduling -- each real engine takes the next available request
+ * upon idling).
+ */
+ struct i915_request *request;
+
+ /*
+ * We keep a rbtree of available virtual engines inside each physical
+ * engine, sorted by priority. Here we preallocate the nodes we need
+ * for the virtual engine, indexed by physical_engine->id.
+ */
+ struct ve_node {
+ struct rb_node rb;
+ int prio;
+ } nodes[I915_NUM_ENGINES];
+
+ /*
+ * Keep track of bonded pairs -- restrictions upon on our selection
+ * of physical engines any particular request may be submitted to.
+ * If we receive a submit-fence from a master engine, we will only
+ * use one of sibling_mask physical engines.
+ */
+ struct ve_bond {
+ const struct intel_engine_cs *master;
+ intel_engine_mask_t sibling_mask;
+ } *bonds;
+ unsigned int num_bonds;
+
+ /* And finally, which physical engines this virtual engine maps onto. */
+ unsigned int num_siblings;
+ struct intel_engine_cs *siblings[];
+};
+
+static struct virtual_engine *to_virtual_engine(struct intel_engine_cs *engine)
+{
+ GEM_BUG_ON(!intel_engine_is_virtual(engine));
+ return container_of(engine, struct virtual_engine, base);
+}
+
+static struct i915_request *
+__active_request(const struct intel_timeline * const tl,
+ struct i915_request *rq,
+ int error)
+{
+ struct i915_request *active = rq;
+
+ list_for_each_entry_from_reverse(rq, &tl->requests, link) {
+ if (__i915_request_is_complete(rq))
+ break;
+
+ if (error) {
+ i915_request_set_error_once(rq, error);
+ __i915_request_skip(rq);
+ }
+ active = rq;
+ }
+
+ return active;
+}
+
+static struct i915_request *
+active_request(const struct intel_timeline * const tl, struct i915_request *rq)
+{
+ return __active_request(tl, rq, 0);
+}
+
+static void ring_set_paused(const struct intel_engine_cs *engine, int state)
+{
+ /*
+ * We inspect HWS_PREEMPT with a semaphore inside
+ * engine->emit_fini_breadcrumb. If the dword is true,
+ * the ring is paused as the semaphore will busywait
+ * until the dword is false.
+ */
+ engine->status_page.addr[I915_GEM_HWS_PREEMPT] = state;
+ if (state)
+ wmb();
+}
+
+static struct i915_priolist *to_priolist(struct rb_node *rb)
+{
+ return rb_entry(rb, struct i915_priolist, node);
+}
+
+static int rq_prio(const struct i915_request *rq)
+{
+ return READ_ONCE(rq->sched.attr.priority);
+}
+
+static int effective_prio(const struct i915_request *rq)
+{
+ int prio = rq_prio(rq);
+
+ /*
+ * If this request is special and must not be interrupted at any
+ * cost, so be it. Note we are only checking the most recent request
+ * in the context and so may be masking an earlier vip request. It
+ * is hoped that under the conditions where nopreempt is used, this
+ * will not matter (i.e. all requests to that context will be
+ * nopreempt for as long as desired).
+ */
+ if (i915_request_has_nopreempt(rq))
+ prio = I915_PRIORITY_UNPREEMPTABLE;
+
+ return prio;
+}
+
+static int queue_prio(const struct intel_engine_execlists *execlists)
+{
+ struct i915_priolist *p;
+ struct rb_node *rb;
+
+ rb = rb_first_cached(&execlists->queue);
+ if (!rb)
+ return INT_MIN;
+
+ /*
+ * As the priolist[] are inverted, with the highest priority in [0],
+ * we have to flip the index value to become priority.
+ */
+ p = to_priolist(rb);
+ if (!I915_USER_PRIORITY_SHIFT)
+ return p->priority;
+
+ return ((p->priority + 1) << I915_USER_PRIORITY_SHIFT) - ffs(p->used);
+}
+
+static int virtual_prio(const struct intel_engine_execlists *el)
+{
+ struct rb_node *rb = rb_first_cached(&el->virtual);
+
+ return rb ? rb_entry(rb, struct ve_node, rb)->prio : INT_MIN;
+}
+
+static bool need_preempt(const struct intel_engine_cs *engine,
+ const struct i915_request *rq)
+{
+ int last_prio;
+
+ if (!intel_engine_has_semaphores(engine))
+ return false;
+
+ /*
+ * Check if the current priority hint merits a preemption attempt.
+ *
+ * We record the highest value priority we saw during rescheduling
+ * prior to this dequeue, therefore we know that if it is strictly
+ * less than the current tail of ESLP[0], we do not need to force
+ * a preempt-to-idle cycle.
+ *
+ * However, the priority hint is a mere hint that we may need to
+ * preempt. If that hint is stale or we may be trying to preempt
+ * ourselves, ignore the request.
+ *
+ * More naturally we would write
+ * prio >= max(0, last);
+ * except that we wish to prevent triggering preemption at the same
+ * priority level: the task that is running should remain running
+ * to preserve FIFO ordering of dependencies.
+ */
+ last_prio = max(effective_prio(rq), I915_PRIORITY_NORMAL - 1);
+ if (engine->execlists.queue_priority_hint <= last_prio)
+ return false;
+
+ /*
+ * Check against the first request in ELSP[1], it will, thanks to the
+ * power of PI, be the highest priority of that context.
+ */
+ if (!list_is_last(&rq->sched.link, &engine->active.requests) &&
+ rq_prio(list_next_entry(rq, sched.link)) > last_prio)
+ return true;
+
+ /*
+ * If the inflight context did not trigger the preemption, then maybe
+ * it was the set of queued requests? Pick the highest priority in
+ * the queue (the first active priolist) and see if it deserves to be
+ * running instead of ELSP[0].
+ *
+ * The highest priority request in the queue can not be either
+ * ELSP[0] or ELSP[1] as, thanks again to PI, if it was the same
+ * context, it's priority would not exceed ELSP[0] aka last_prio.
+ */
+ return max(virtual_prio(&engine->execlists),
+ queue_prio(&engine->execlists)) > last_prio;
+}
+
+__maybe_unused static bool
+assert_priority_queue(const struct i915_request *prev,
+ const struct i915_request *next)
+{
+ /*
+ * Without preemption, the prev may refer to the still active element
+ * which we refuse to let go.
+ *
+ * Even with preemption, there are times when we think it is better not
+ * to preempt and leave an ostensibly lower priority request in flight.
+ */
+ if (i915_request_is_active(prev))
+ return true;
+
+ return rq_prio(prev) >= rq_prio(next);
+}
+
+static struct i915_request *
+__unwind_incomplete_requests(struct intel_engine_cs *engine)
+{
+ struct i915_request *rq, *rn, *active = NULL;
+ struct list_head *pl;
+ int prio = I915_PRIORITY_INVALID;
+
+ lockdep_assert_held(&engine->active.lock);
+
+ list_for_each_entry_safe_reverse(rq, rn,
+ &engine->active.requests,
+ sched.link) {
+ if (__i915_request_is_complete(rq)) {
+ list_del_init(&rq->sched.link);
+ continue;
+ }
+
+ __i915_request_unsubmit(rq);
+
+ GEM_BUG_ON(rq_prio(rq) == I915_PRIORITY_INVALID);
+ if (rq_prio(rq) != prio) {
+ prio = rq_prio(rq);
+ pl = i915_sched_lookup_priolist(engine, prio);
+ }
+ GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root));
+
+ list_move(&rq->sched.link, pl);
+ set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
+
+ /* Check in case we rollback so far we wrap [size/2] */
+ if (intel_ring_direction(rq->ring,
+ rq->tail,
+ rq->ring->tail + 8) > 0)
+ rq->context->lrc.desc |= CTX_DESC_FORCE_RESTORE;
+
+ active = rq;
+ }
+
+ return active;
+}
+
+struct i915_request *
+execlists_unwind_incomplete_requests(struct intel_engine_execlists *execlists)
+{
+ struct intel_engine_cs *engine =
+ container_of(execlists, typeof(*engine), execlists);
+
+ return __unwind_incomplete_requests(engine);
+}
+
+static void
+execlists_context_status_change(struct i915_request *rq, unsigned long status)
+{
+ /*
+ * Only used when GVT-g is enabled now. When GVT-g is disabled,
+ * The compiler should eliminate this function as dead-code.
+ */
+ if (!IS_ENABLED(CONFIG_DRM_I915_GVT))
+ return;
+
+ atomic_notifier_call_chain(&rq->engine->context_status_notifier,
+ status, rq);
+}
+
+static void reset_active(struct i915_request *rq,
+ struct intel_engine_cs *engine)
+{
+ struct intel_context * const ce = rq->context;
+ u32 head;
+
+ /*
+ * The executing context has been cancelled. We want to prevent
+ * further execution along this context and propagate the error on
+ * to anything depending on its results.
+ *
+ * In __i915_request_submit(), we apply the -EIO and remove the
+ * requests' payloads for any banned requests. But first, we must
+ * rewind the context back to the start of the incomplete request so
+ * that we do not jump back into the middle of the batch.
+ *
+ * We preserve the breadcrumbs and semaphores of the incomplete
+ * requests so that inter-timeline dependencies (i.e other timelines)
+ * remain correctly ordered. And we defer to __i915_request_submit()
+ * so that all asynchronous waits are correctly handled.
+ */
+ ENGINE_TRACE(engine, "{ reset rq=%llx:%lld }\n",
+ rq->fence.context, rq->fence.seqno);
+
+ /* On resubmission of the active request, payload will be scrubbed */
+ if (__i915_request_is_complete(rq))
+ head = rq->tail;
+ else
+ head = __active_request(ce->timeline, rq, -EIO)->head;
+ head = intel_ring_wrap(ce->ring, head);
+
+ /* Scrub the context image to prevent replaying the previous batch */
+ lrc_init_regs(ce, engine, true);
+
+ /* We've switched away, so this should be a no-op, but intent matters */
+ ce->lrc.lrca = lrc_update_regs(ce, engine, head);
+}
+
+static struct intel_engine_cs *
+__execlists_schedule_in(struct i915_request *rq)
+{
+ struct intel_engine_cs * const engine = rq->engine;
+ struct intel_context * const ce = rq->context;
+
+ intel_context_get(ce);
+
+ if (unlikely(intel_context_is_closed(ce) &&
+ !intel_engine_has_heartbeat(engine)))
+ intel_context_set_banned(ce);
+
+ if (unlikely(intel_context_is_banned(ce)))
+ reset_active(rq, engine);
+
+ if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
+ lrc_check_regs(ce, engine, "before");
+
+ if (ce->tag) {
+ /* Use a fixed tag for OA and friends */
+ GEM_BUG_ON(ce->tag <= BITS_PER_LONG);
+ ce->lrc.ccid = ce->tag;
+ } else {
+ /* We don't need a strict matching tag, just different values */
+ unsigned int tag = __ffs(engine->context_tag);
+
+ GEM_BUG_ON(tag >= BITS_PER_LONG);
+ __clear_bit(tag, &engine->context_tag);
+ ce->lrc.ccid = (1 + tag) << (GEN11_SW_CTX_ID_SHIFT - 32);
+
+ BUILD_BUG_ON(BITS_PER_LONG > GEN12_MAX_CONTEXT_HW_ID);
+ }
+
+ ce->lrc.ccid |= engine->execlists.ccid;
+
+ __intel_gt_pm_get(engine->gt);
+ if (engine->fw_domain && !engine->fw_active++)
+ intel_uncore_forcewake_get(engine->uncore, engine->fw_domain);
+ execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_IN);
+ intel_engine_context_in(engine);
+
+ CE_TRACE(ce, "schedule-in, ccid:%x\n", ce->lrc.ccid);
+
+ return engine;
+}
+
+static void execlists_schedule_in(struct i915_request *rq, int idx)
+{
+ struct intel_context * const ce = rq->context;
+ struct intel_engine_cs *old;
+
+ GEM_BUG_ON(!intel_engine_pm_is_awake(rq->engine));
+ trace_i915_request_in(rq, idx);
+
+ old = ce->inflight;
+ if (!old)
+ old = __execlists_schedule_in(rq);
+ WRITE_ONCE(ce->inflight, ptr_inc(old));
+
+ GEM_BUG_ON(intel_context_inflight(ce) != rq->engine);
+}
+
+static void
+resubmit_virtual_request(struct i915_request *rq, struct virtual_engine *ve)
+{
+ struct intel_engine_cs *engine = rq->engine;
+
+ spin_lock_irq(&engine->active.lock);
+
+ clear_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
+ WRITE_ONCE(rq->engine, &ve->base);
+ ve->base.submit_request(rq);
+
+ spin_unlock_irq(&engine->active.lock);
+}
+
+static void kick_siblings(struct i915_request *rq, struct intel_context *ce)
+{
+ struct virtual_engine *ve = container_of(ce, typeof(*ve), context);
+ struct intel_engine_cs *engine = rq->engine;
+
+ /*
+ * After this point, the rq may be transferred to a new sibling, so
+ * before we clear ce->inflight make sure that the context has been
+ * removed from the b->signalers and furthermore we need to make sure
+ * that the concurrent iterator in signal_irq_work is no longer
+ * following ce->signal_link.
+ */
+ if (!list_empty(&ce->signals))
+ intel_context_remove_breadcrumbs(ce, engine->breadcrumbs);
+
+ /*
+ * This engine is now too busy to run this virtual request, so
+ * see if we can find an alternative engine for it to execute on.
+ * Once a request has become bonded to this engine, we treat it the
+ * same as other native request.
+ */
+ if (i915_request_in_priority_queue(rq) &&
+ rq->execution_mask != engine->mask)
+ resubmit_virtual_request(rq, ve);
+
+ if (READ_ONCE(ve->request))
+ tasklet_hi_schedule(&ve->base.execlists.tasklet);
+}
+
+static void __execlists_schedule_out(struct i915_request * const rq,
+ struct intel_context * const ce)
+{
+ struct intel_engine_cs * const engine = rq->engine;
+ unsigned int ccid;
+
+ /*
+ * NB process_csb() is not under the engine->active.lock and hence
+ * schedule_out can race with schedule_in meaning that we should
+ * refrain from doing non-trivial work here.
+ */
+
+ CE_TRACE(ce, "schedule-out, ccid:%x\n", ce->lrc.ccid);
+ GEM_BUG_ON(ce->inflight != engine);
+
+ if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
+ lrc_check_regs(ce, engine, "after");
+
+ /*
+ * If we have just completed this context, the engine may now be
+ * idle and we want to re-enter powersaving.
+ */
+ if (intel_timeline_is_last(ce->timeline, rq) &&
+ __i915_request_is_complete(rq))
+ intel_engine_add_retire(engine, ce->timeline);
+
+ ccid = ce->lrc.ccid;
+ ccid >>= GEN11_SW_CTX_ID_SHIFT - 32;
+ ccid &= GEN12_MAX_CONTEXT_HW_ID;
+ if (ccid < BITS_PER_LONG) {
+ GEM_BUG_ON(ccid == 0);
+ GEM_BUG_ON(test_bit(ccid - 1, &engine->context_tag));
+ __set_bit(ccid - 1, &engine->context_tag);
+ }
+
+ lrc_update_runtime(ce);
+ intel_engine_context_out(engine);
+ execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_OUT);
+ if (engine->fw_domain && !--engine->fw_active)
+ intel_uncore_forcewake_put(engine->uncore, engine->fw_domain);
+ intel_gt_pm_put_async(engine->gt);
+
+ /*
+ * If this is part of a virtual engine, its next request may
+ * have been blocked waiting for access to the active context.
+ * We have to kick all the siblings again in case we need to
+ * switch (e.g. the next request is not runnable on this
+ * engine). Hopefully, we will already have submitted the next
+ * request before the tasklet runs and do not need to rebuild
+ * each virtual tree and kick everyone again.
+ */
+ if (ce->engine != engine)
+ kick_siblings(rq, ce);
+
+ WRITE_ONCE(ce->inflight, NULL);
+ intel_context_put(ce);
+}
+
+static inline void execlists_schedule_out(struct i915_request *rq)
+{
+ struct intel_context * const ce = rq->context;
+
+ trace_i915_request_out(rq);
+
+ GEM_BUG_ON(!ce->inflight);
+ ce->inflight = ptr_dec(ce->inflight);
+ if (!__intel_context_inflight_count(ce->inflight))
+ __execlists_schedule_out(rq, ce);
+
+ i915_request_put(rq);
+}
+
+static u64 execlists_update_context(struct i915_request *rq)
+{
+ struct intel_context *ce = rq->context;
+ u64 desc = ce->lrc.desc;
+ u32 tail, prev;
+
+ /*
+ * WaIdleLiteRestore:bdw,skl
+ *
+ * We should never submit the context with the same RING_TAIL twice
+ * just in case we submit an empty ring, which confuses the HW.
+ *
+ * We append a couple of NOOPs (gen8_emit_wa_tail) after the end of
+ * the normal request to be able to always advance the RING_TAIL on
+ * subsequent resubmissions (for lite restore). Should that fail us,
+ * and we try and submit the same tail again, force the context
+ * reload.
+ *
+ * If we need to return to a preempted context, we need to skip the
+ * lite-restore and force it to reload the RING_TAIL. Otherwise, the
+ * HW has a tendency to ignore us rewinding the TAIL to the end of
+ * an earlier request.
+ */
+ GEM_BUG_ON(ce->lrc_reg_state[CTX_RING_TAIL] != rq->ring->tail);
+ prev = rq->ring->tail;
+ tail = intel_ring_set_tail(rq->ring, rq->tail);
+ if (unlikely(intel_ring_direction(rq->ring, tail, prev) <= 0))
+ desc |= CTX_DESC_FORCE_RESTORE;
+ ce->lrc_reg_state[CTX_RING_TAIL] = tail;
+ rq->tail = rq->wa_tail;
+
+ /*
+ * Make sure the context image is complete before we submit it to HW.
+ *
+ * Ostensibly, writes (including the WCB) should be flushed prior to
+ * an uncached write such as our mmio register access, the empirical
+ * evidence (esp. on Braswell) suggests that the WC write into memory
+ * may not be visible to the HW prior to the completion of the UC
+ * register write and that we may begin execution from the context
+ * before its image is complete leading to invalid PD chasing.
+ */
+ wmb();
+
+ ce->lrc.desc &= ~CTX_DESC_FORCE_RESTORE;
+ return desc;
+}
+
+static void write_desc(struct intel_engine_execlists *execlists, u64 desc, u32 port)
+{
+ if (execlists->ctrl_reg) {
+ writel(lower_32_bits(desc), execlists->submit_reg + port * 2);
+ writel(upper_32_bits(desc), execlists->submit_reg + port * 2 + 1);
+ } else {
+ writel(upper_32_bits(desc), execlists->submit_reg);
+ writel(lower_32_bits(desc), execlists->submit_reg);
+ }
+}
+
+static __maybe_unused char *
+dump_port(char *buf, int buflen, const char *prefix, struct i915_request *rq)
+{
+ if (!rq)
+ return "";
+
+ snprintf(buf, buflen, "%sccid:%x %llx:%lld%s prio %d",
+ prefix,
+ rq->context->lrc.ccid,
+ rq->fence.context, rq->fence.seqno,
+ __i915_request_is_complete(rq) ? "!" :
+ __i915_request_has_started(rq) ? "*" :
+ "",
+ rq_prio(rq));
+
+ return buf;
+}
+
+static __maybe_unused noinline void
+trace_ports(const struct intel_engine_execlists *execlists,
+ const char *msg,
+ struct i915_request * const *ports)
+{
+ const struct intel_engine_cs *engine =
+ container_of(execlists, typeof(*engine), execlists);
+ char __maybe_unused p0[40], p1[40];
+
+ if (!ports[0])
+ return;
+
+ ENGINE_TRACE(engine, "%s { %s%s }\n", msg,
+ dump_port(p0, sizeof(p0), "", ports[0]),
+ dump_port(p1, sizeof(p1), ", ", ports[1]));
+}
+
+static bool
+reset_in_progress(const struct intel_engine_execlists *execlists)
+{
+ return unlikely(!__tasklet_is_enabled(&execlists->tasklet));
+}
+
+static __maybe_unused noinline bool
+assert_pending_valid(const struct intel_engine_execlists *execlists,
+ const char *msg)
+{
+ struct intel_engine_cs *engine =
+ container_of(execlists, typeof(*engine), execlists);
+ struct i915_request * const *port, *rq;
+ struct intel_context *ce = NULL;
+ bool sentinel = false;
+ u32 ccid = -1;
+
+ trace_ports(execlists, msg, execlists->pending);
+
+ /* We may be messing around with the lists during reset, lalala */
+ if (reset_in_progress(execlists))
+ return true;
+
+ if (!execlists->pending[0]) {
+ GEM_TRACE_ERR("%s: Nothing pending for promotion!\n",
+ engine->name);
+ return false;
+ }
+
+ if (execlists->pending[execlists_num_ports(execlists)]) {
+ GEM_TRACE_ERR("%s: Excess pending[%d] for promotion!\n",
+ engine->name, execlists_num_ports(execlists));
+ return false;
+ }
+
+ for (port = execlists->pending; (rq = *port); port++) {
+ unsigned long flags;
+ bool ok = true;
+
+ GEM_BUG_ON(!kref_read(&rq->fence.refcount));
+ GEM_BUG_ON(!i915_request_is_active(rq));
+
+ if (ce == rq->context) {
+ GEM_TRACE_ERR("%s: Dup context:%llx in pending[%zd]\n",
+ engine->name,
+ ce->timeline->fence_context,
+ port - execlists->pending);
+ return false;
+ }
+ ce = rq->context;
+
+ if (ccid == ce->lrc.ccid) {
+ GEM_TRACE_ERR("%s: Dup ccid:%x context:%llx in pending[%zd]\n",
+ engine->name,
+ ccid, ce->timeline->fence_context,
+ port - execlists->pending);
+ return false;
+ }
+ ccid = ce->lrc.ccid;
+
+ /*
+ * Sentinels are supposed to be the last request so they flush
+ * the current execution off the HW. Check that they are the only
+ * request in the pending submission.
+ */
+ if (sentinel) {
+ GEM_TRACE_ERR("%s: context:%llx after sentinel in pending[%zd]\n",
+ engine->name,
+ ce->timeline->fence_context,
+ port - execlists->pending);
+ return false;
+ }
+ sentinel = i915_request_has_sentinel(rq);
+
+ /*
+ * We want virtual requests to only be in the first slot so
+ * that they are never stuck behind a hog and can be immediately
+ * transferred onto the next idle engine.
+ */
+ if (rq->execution_mask != engine->mask &&
+ port != execlists->pending) {
+ GEM_TRACE_ERR("%s: virtual engine:%llx not in prime position[%zd]\n",
+ engine->name,
+ ce->timeline->fence_context,
+ port - execlists->pending);
+ return false;
+ }
+
+ /* Hold tightly onto the lock to prevent concurrent retires! */
+ if (!spin_trylock_irqsave(&rq->lock, flags))
+ continue;
+
+ if (__i915_request_is_complete(rq))
+ goto unlock;
+
+ if (i915_active_is_idle(&ce->active) &&
+ !intel_context_is_barrier(ce)) {
+ GEM_TRACE_ERR("%s: Inactive context:%llx in pending[%zd]\n",
+ engine->name,
+ ce->timeline->fence_context,
+ port - execlists->pending);
+ ok = false;
+ goto unlock;
+ }
+
+ if (!i915_vma_is_pinned(ce->state)) {
+ GEM_TRACE_ERR("%s: Unpinned context:%llx in pending[%zd]\n",
+ engine->name,
+ ce->timeline->fence_context,
+ port - execlists->pending);
+ ok = false;
+ goto unlock;
+ }
+
+ if (!i915_vma_is_pinned(ce->ring->vma)) {
+ GEM_TRACE_ERR("%s: Unpinned ring:%llx in pending[%zd]\n",
+ engine->name,
+ ce->timeline->fence_context,
+ port - execlists->pending);
+ ok = false;
+ goto unlock;
+ }
+
+unlock:
+ spin_unlock_irqrestore(&rq->lock, flags);
+ if (!ok)
+ return false;
+ }
+
+ return ce;
+}
+
+static void execlists_submit_ports(struct intel_engine_cs *engine)
+{
+ struct intel_engine_execlists *execlists = &engine->execlists;
+ unsigned int n;
+
+ GEM_BUG_ON(!assert_pending_valid(execlists, "submit"));
+
+ /*
+ * We can skip acquiring intel_runtime_pm_get() here as it was taken
+ * on our behalf by the request (see i915_gem_mark_busy()) and it will
+ * not be relinquished until the device is idle (see
+ * i915_gem_idle_work_handler()). As a precaution, we make sure
+ * that all ELSP are drained i.e. we have processed the CSB,
+ * before allowing ourselves to idle and calling intel_runtime_pm_put().
+ */
+ GEM_BUG_ON(!intel_engine_pm_is_awake(engine));
+
+ /*
+ * ELSQ note: the submit queue is not cleared after being submitted
+ * to the HW so we need to make sure we always clean it up. This is
+ * currently ensured by the fact that we always write the same number
+ * of elsq entries, keep this in mind before changing the loop below.
+ */
+ for (n = execlists_num_ports(execlists); n--; ) {
+ struct i915_request *rq = execlists->pending[n];
+
+ write_desc(execlists,
+ rq ? execlists_update_context(rq) : 0,
+ n);
+ }
+
+ /* we need to manually load the submit queue */
+ if (execlists->ctrl_reg)
+ writel(EL_CTRL_LOAD, execlists->ctrl_reg);
+}
+
+static bool ctx_single_port_submission(const struct intel_context *ce)
+{
+ return (IS_ENABLED(CONFIG_DRM_I915_GVT) &&
+ intel_context_force_single_submission(ce));
+}
+
+static bool can_merge_ctx(const struct intel_context *prev,
+ const struct intel_context *next)
+{
+ if (prev != next)
+ return false;
+
+ if (ctx_single_port_submission(prev))
+ return false;
+
+ return true;
+}
+
+static unsigned long i915_request_flags(const struct i915_request *rq)
+{
+ return READ_ONCE(rq->fence.flags);
+}
+
+static bool can_merge_rq(const struct i915_request *prev,
+ const struct i915_request *next)
+{
+ GEM_BUG_ON(prev == next);
+ GEM_BUG_ON(!assert_priority_queue(prev, next));
+
+ /*
+ * We do not submit known completed requests. Therefore if the next
+ * request is already completed, we can pretend to merge it in
+ * with the previous context (and we will skip updating the ELSP
+ * and tracking). Thus hopefully keeping the ELSP full with active
+ * contexts, despite the best efforts of preempt-to-busy to confuse
+ * us.
+ */
+ if (__i915_request_is_complete(next))
+ return true;
+
+ if (unlikely((i915_request_flags(prev) ^ i915_request_flags(next)) &
+ (BIT(I915_FENCE_FLAG_NOPREEMPT) |
+ BIT(I915_FENCE_FLAG_SENTINEL))))
+ return false;
+
+ if (!can_merge_ctx(prev->context, next->context))
+ return false;
+
+ GEM_BUG_ON(i915_seqno_passed(prev->fence.seqno, next->fence.seqno));
+ return true;
+}
+
+static bool virtual_matches(const struct virtual_engine *ve,
+ const struct i915_request *rq,
+ const struct intel_engine_cs *engine)
+{
+ const struct intel_engine_cs *inflight;
+
+ if (!rq)
+ return false;
+
+ if (!(rq->execution_mask & engine->mask)) /* We peeked too soon! */
+ return false;
+
+ /*
+ * We track when the HW has completed saving the context image
+ * (i.e. when we have seen the final CS event switching out of
+ * the context) and must not overwrite the context image before
+ * then. This restricts us to only using the active engine
+ * while the previous virtualized request is inflight (so
+ * we reuse the register offsets). This is a very small
+ * hystersis on the greedy seelction algorithm.
+ */
+ inflight = intel_context_inflight(&ve->context);
+ if (inflight && inflight != engine)
+ return false;
+
+ return true;
+}
+
+static struct virtual_engine *
+first_virtual_engine(struct intel_engine_cs *engine)
+{
+ struct intel_engine_execlists *el = &engine->execlists;
+ struct rb_node *rb = rb_first_cached(&el->virtual);
+
+ while (rb) {
+ struct virtual_engine *ve =
+ rb_entry(rb, typeof(*ve), nodes[engine->id].rb);
+ struct i915_request *rq = READ_ONCE(ve->request);
+
+ /* lazily cleanup after another engine handled rq */
+ if (!rq || !virtual_matches(ve, rq, engine)) {
+ rb_erase_cached(rb, &el->virtual);
+ RB_CLEAR_NODE(rb);
+ rb = rb_first_cached(&el->virtual);
+ continue;
+ }
+
+ return ve;
+ }
+
+ return NULL;
+}
+
+static void virtual_xfer_context(struct virtual_engine *ve,
+ struct intel_engine_cs *engine)
+{
+ unsigned int n;
+
+ if (likely(engine == ve->siblings[0]))
+ return;
+
+ GEM_BUG_ON(READ_ONCE(ve->context.inflight));
+ if (!intel_engine_has_relative_mmio(engine))
+ lrc_update_offsets(&ve->context, engine);
+
+ /*
+ * Move the bound engine to the top of the list for
+ * future execution. We then kick this tasklet first
+ * before checking others, so that we preferentially
+ * reuse this set of bound registers.
+ */
+ for (n = 1; n < ve->num_siblings; n++) {
+ if (ve->siblings[n] == engine) {
+ swap(ve->siblings[n], ve->siblings[0]);
+ break;
+ }
+ }
+}
+
+static void defer_request(struct i915_request *rq, struct list_head * const pl)
+{
+ LIST_HEAD(list);
+
+ /*
+ * We want to move the interrupted request to the back of
+ * the round-robin list (i.e. its priority level), but
+ * in doing so, we must then move all requests that were in
+ * flight and were waiting for the interrupted request to
+ * be run after it again.
+ */
+ do {
+ struct i915_dependency *p;
+
+ GEM_BUG_ON(i915_request_is_active(rq));
+ list_move_tail(&rq->sched.link, pl);
+
+ for_each_waiter(p, rq) {
+ struct i915_request *w =
+ container_of(p->waiter, typeof(*w), sched);
+
+ if (p->flags & I915_DEPENDENCY_WEAK)
+ continue;
+
+ /* Leave semaphores spinning on the other engines */
+ if (w->engine != rq->engine)
+ continue;
+
+ /* No waiter should start before its signaler */
+ GEM_BUG_ON(i915_request_has_initial_breadcrumb(w) &&
+ __i915_request_has_started(w) &&
+ !__i915_request_is_complete(rq));
+
+ GEM_BUG_ON(i915_request_is_active(w));
+ if (!i915_request_is_ready(w))
+ continue;
+
+ if (rq_prio(w) < rq_prio(rq))
+ continue;
+
+ GEM_BUG_ON(rq_prio(w) > rq_prio(rq));
+ list_move_tail(&w->sched.link, &list);
+ }
+
+ rq = list_first_entry_or_null(&list, typeof(*rq), sched.link);
+ } while (rq);
+}
+
+static void defer_active(struct intel_engine_cs *engine)
+{
+ struct i915_request *rq;
+
+ rq = __unwind_incomplete_requests(engine);
+ if (!rq)
+ return;
+
+ defer_request(rq, i915_sched_lookup_priolist(engine, rq_prio(rq)));
+}
+
+static bool
+timeslice_yield(const struct intel_engine_execlists *el,
+ const struct i915_request *rq)
+{
+ /*
+ * Once bitten, forever smitten!
+ *
+ * If the active context ever busy-waited on a semaphore,
+ * it will be treated as a hog until the end of its timeslice (i.e.
+ * until it is scheduled out and replaced by a new submission,
+ * possibly even its own lite-restore). The HW only sends an interrupt
+ * on the first miss, and we do know if that semaphore has been
+ * signaled, or even if it is now stuck on another semaphore. Play
+ * safe, yield if it might be stuck -- it will be given a fresh
+ * timeslice in the near future.
+ */
+ return rq->context->lrc.ccid == READ_ONCE(el->yield);
+}
+
+static bool needs_timeslice(const struct intel_engine_cs *engine,
+ const struct i915_request *rq)
+{
+ if (!intel_engine_has_timeslices(engine))
+ return false;
+
+ /* If not currently active, or about to switch, wait for next event */
+ if (!rq || __i915_request_is_complete(rq))
+ return false;
+
+ /* We do not need to start the timeslice until after the ACK */
+ if (READ_ONCE(engine->execlists.pending[0]))
+ return false;
+
+ /* If ELSP[1] is occupied, always check to see if worth slicing */
+ if (!list_is_last_rcu(&rq->sched.link, &engine->active.requests)) {
+ ENGINE_TRACE(engine, "timeslice required for second inflight context\n");
+ return true;
+ }
+
+ /* Otherwise, ELSP[0] is by itself, but may be waiting in the queue */
+ if (!RB_EMPTY_ROOT(&engine->execlists.queue.rb_root)) {
+ ENGINE_TRACE(engine, "timeslice required for queue\n");
+ return true;
+ }
+
+ if (!RB_EMPTY_ROOT(&engine->execlists.virtual.rb_root)) {
+ ENGINE_TRACE(engine, "timeslice required for virtual\n");
+ return true;
+ }
+
+ return false;
+}
+
+static bool
+timeslice_expired(struct intel_engine_cs *engine, const struct i915_request *rq)
+{
+ const struct intel_engine_execlists *el = &engine->execlists;
+
+ if (i915_request_has_nopreempt(rq) && __i915_request_has_started(rq))
+ return false;
+
+ if (!needs_timeslice(engine, rq))
+ return false;
+
+ return timer_expired(&el->timer) || timeslice_yield(el, rq);
+}
+
+static unsigned long timeslice(const struct intel_engine_cs *engine)
+{
+ return READ_ONCE(engine->props.timeslice_duration_ms);
+}
+
+static void start_timeslice(struct intel_engine_cs *engine)
+{
+ struct intel_engine_execlists *el = &engine->execlists;
+ unsigned long duration;
+
+ /* Disable the timer if there is nothing to switch to */
+ duration = 0;
+ if (needs_timeslice(engine, *el->active)) {
+ /* Avoid continually prolonging an active timeslice */
+ if (timer_active(&el->timer)) {
+ /*
+ * If we just submitted a new ELSP after an old
+ * context, that context may have already consumed
+ * its timeslice, so recheck.
+ */
+ if (!timer_pending(&el->timer))
+ tasklet_hi_schedule(&el->tasklet);
+ return;
+ }
+
+ duration = timeslice(engine);
+ }
+
+ set_timer_ms(&el->timer, duration);
+}
+
+static void record_preemption(struct intel_engine_execlists *execlists)
+{
+ (void)I915_SELFTEST_ONLY(execlists->preempt_hang.count++);
+}
+
+static unsigned long active_preempt_timeout(struct intel_engine_cs *engine,
+ const struct i915_request *rq)
+{
+ if (!rq)
+ return 0;
+
+ /* Force a fast reset for terminated contexts (ignoring sysfs!) */
+ if (unlikely(intel_context_is_banned(rq->context)))
+ return 1;
+
+ return READ_ONCE(engine->props.preempt_timeout_ms);
+}
+
+static void set_preempt_timeout(struct intel_engine_cs *engine,
+ const struct i915_request *rq)
+{
+ if (!intel_engine_has_preempt_reset(engine))
+ return;
+
+ set_timer_ms(&engine->execlists.preempt,
+ active_preempt_timeout(engine, rq));
+}
+
+static bool completed(const struct i915_request *rq)
+{
+ if (i915_request_has_sentinel(rq))
+ return false;
+
+ return __i915_request_is_complete(rq);
+}
+
+static void execlists_dequeue(struct intel_engine_cs *engine)
+{
+ struct intel_engine_execlists * const execlists = &engine->execlists;
+ struct i915_request **port = execlists->pending;
+ struct i915_request ** const last_port = port + execlists->port_mask;
+ struct i915_request *last, * const *active;
+ struct virtual_engine *ve;
+ struct rb_node *rb;
+ bool submit = false;
+
+ /*
+ * Hardware submission is through 2 ports. Conceptually each port
+ * has a (RING_START, RING_HEAD, RING_TAIL) tuple. RING_START is
+ * static for a context, and unique to each, so we only execute
+ * requests belonging to a single context from each ring. RING_HEAD
+ * is maintained by the CS in the context image, it marks the place
+ * where it got up to last time, and through RING_TAIL we tell the CS
+ * where we want to execute up to this time.
+ *
+ * In this list the requests are in order of execution. Consecutive
+ * requests from the same context are adjacent in the ringbuffer. We
+ * can combine these requests into a single RING_TAIL update:
+ *
+ * RING_HEAD...req1...req2
+ * ^- RING_TAIL
+ * since to execute req2 the CS must first execute req1.
+ *
+ * Our goal then is to point each port to the end of a consecutive
+ * sequence of requests as being the most optimal (fewest wake ups
+ * and context switches) submission.
+ */
+
+ spin_lock(&engine->active.lock);
+
+ /*
+ * If the queue is higher priority than the last
+ * request in the currently active context, submit afresh.
+ * We will resubmit again afterwards in case we need to split
+ * the active context to interject the preemption request,
+ * i.e. we will retrigger preemption following the ack in case
+ * of trouble.
+ *
+ */
+ active = execlists->active;
+ while ((last = *active) && completed(last))
+ active++;
+
+ if (last) {
+ if (need_preempt(engine, last)) {
+ ENGINE_TRACE(engine,
+ "preempting last=%llx:%lld, prio=%d, hint=%d\n",
+ last->fence.context,
+ last->fence.seqno,
+ last->sched.attr.priority,
+ execlists->queue_priority_hint);
+ record_preemption(execlists);
+
+ /*
+ * Don't let the RING_HEAD advance past the breadcrumb
+ * as we unwind (and until we resubmit) so that we do
+ * not accidentally tell it to go backwards.
+ */
+ ring_set_paused(engine, 1);
+
+ /*
+ * Note that we have not stopped the GPU at this point,
+ * so we are unwinding the incomplete requests as they
+ * remain inflight and so by the time we do complete
+ * the preemption, some of the unwound requests may
+ * complete!
+ */
+ __unwind_incomplete_requests(engine);
+
+ last = NULL;
+ } else if (timeslice_expired(engine, last)) {
+ ENGINE_TRACE(engine,
+ "expired:%s last=%llx:%lld, prio=%d, hint=%d, yield?=%s\n",
+ yesno(timer_expired(&execlists->timer)),
+ last->fence.context, last->fence.seqno,
+ rq_prio(last),
+ execlists->queue_priority_hint,
+ yesno(timeslice_yield(execlists, last)));
+
+ /*
+ * Consume this timeslice; ensure we start a new one.
+ *
+ * The timeslice expired, and we will unwind the
+ * running contexts and recompute the next ELSP.
+ * If that submit will be the same pair of contexts
+ * (due to dependency ordering), we will skip the
+ * submission. If we don't cancel the timer now,
+ * we will see that the timer has expired and
+ * reschedule the tasklet; continually until the
+ * next context switch or other preeemption event.
+ *
+ * Since we have decided to reschedule based on
+ * consumption of this timeslice, if we submit the
+ * same context again, grant it a full timeslice.
+ */
+ cancel_timer(&execlists->timer);
+ ring_set_paused(engine, 1);
+ defer_active(engine);
+
+ /*
+ * Unlike for preemption, if we rewind and continue
+ * executing the same context as previously active,
+ * the order of execution will remain the same and
+ * the tail will only advance. We do not need to
+ * force a full context restore, as a lite-restore
+ * is sufficient to resample the monotonic TAIL.
+ *
+ * If we switch to any other context, similarly we
+ * will not rewind TAIL of current context, and
+ * normal save/restore will preserve state and allow
+ * us to later continue executing the same request.
+ */
+ last = NULL;
+ } else {
+ /*
+ * Otherwise if we already have a request pending
+ * for execution after the current one, we can
+ * just wait until the next CS event before
+ * queuing more. In either case we will force a
+ * lite-restore preemption event, but if we wait
+ * we hopefully coalesce several updates into a single
+ * submission.
+ */
+ if (active[1]) {
+ /*
+ * Even if ELSP[1] is occupied and not worthy
+ * of timeslices, our queue might be.
+ */
+ spin_unlock(&engine->active.lock);
+ return;
+ }
+ }
+ }
+
+ /* XXX virtual is always taking precedence */
+ while ((ve = first_virtual_engine(engine))) {
+ struct i915_request *rq;
+
+ spin_lock(&ve->base.active.lock);
+
+ rq = ve->request;
+ if (unlikely(!virtual_matches(ve, rq, engine)))
+ goto unlock; /* lost the race to a sibling */
+
+ GEM_BUG_ON(rq->engine != &ve->base);
+ GEM_BUG_ON(rq->context != &ve->context);
+
+ if (unlikely(rq_prio(rq) < queue_prio(execlists))) {
+ spin_unlock(&ve->base.active.lock);
+ break;
+ }
+
+ if (last && !can_merge_rq(last, rq)) {
+ spin_unlock(&ve->base.active.lock);
+ spin_unlock(&engine->active.lock);
+ return; /* leave this for another sibling */
+ }
+
+ ENGINE_TRACE(engine,
+ "virtual rq=%llx:%lld%s, new engine? %s\n",
+ rq->fence.context,
+ rq->fence.seqno,
+ __i915_request_is_complete(rq) ? "!" :
+ __i915_request_has_started(rq) ? "*" :
+ "",
+ yesno(engine != ve->siblings[0]));
+
+ WRITE_ONCE(ve->request, NULL);
+ WRITE_ONCE(ve->base.execlists.queue_priority_hint, INT_MIN);
+
+ rb = &ve->nodes[engine->id].rb;
+ rb_erase_cached(rb, &execlists->virtual);
+ RB_CLEAR_NODE(rb);
+
+ GEM_BUG_ON(!(rq->execution_mask & engine->mask));
+ WRITE_ONCE(rq->engine, engine);
+
+ if (__i915_request_submit(rq)) {
+ /*
+ * Only after we confirm that we will submit
+ * this request (i.e. it has not already
+ * completed), do we want to update the context.
+ *
+ * This serves two purposes. It avoids
+ * unnecessary work if we are resubmitting an
+ * already completed request after timeslicing.
+ * But more importantly, it prevents us altering
+ * ve->siblings[] on an idle context, where
+ * we may be using ve->siblings[] in
+ * virtual_context_enter / virtual_context_exit.
+ */
+ virtual_xfer_context(ve, engine);
+ GEM_BUG_ON(ve->siblings[0] != engine);
+
+ submit = true;
+ last = rq;
+ }
+
+ i915_request_put(rq);
+unlock:
+ spin_unlock(&ve->base.active.lock);
+
+ /*
+ * Hmm, we have a bunch of virtual engine requests,
+ * but the first one was already completed (thanks
+ * preempt-to-busy!). Keep looking at the veng queue
+ * until we have no more relevant requests (i.e.
+ * the normal submit queue has higher priority).
+ */
+ if (submit)
+ break;
+ }
+
+ while ((rb = rb_first_cached(&execlists->queue))) {
+ struct i915_priolist *p = to_priolist(rb);
+ struct i915_request *rq, *rn;
+ int i;
+
+ priolist_for_each_request_consume(rq, rn, p, i) {
+ bool merge = true;
+
+ /*
+ * Can we combine this request with the current port?
+ * It has to be the same context/ringbuffer and not
+ * have any exceptions (e.g. GVT saying never to
+ * combine contexts).
+ *
+ * If we can combine the requests, we can execute both
+ * by updating the RING_TAIL to point to the end of the
+ * second request, and so we never need to tell the
+ * hardware about the first.
+ */
+ if (last && !can_merge_rq(last, rq)) {
+ /*
+ * If we are on the second port and cannot
+ * combine this request with the last, then we
+ * are done.
+ */
+ if (port == last_port)
+ goto done;
+
+ /*
+ * We must not populate both ELSP[] with the
+ * same LRCA, i.e. we must submit 2 different
+ * contexts if we submit 2 ELSP.
+ */
+ if (last->context == rq->context)
+ goto done;
+
+ if (i915_request_has_sentinel(last))
+ goto done;
+
+ /*
+ * We avoid submitting virtual requests into
+ * the secondary ports so that we can migrate
+ * the request immediately to another engine
+ * rather than wait for the primary request.
+ */
+ if (rq->execution_mask != engine->mask)
+ goto done;
+
+ /*
+ * If GVT overrides us we only ever submit
+ * port[0], leaving port[1] empty. Note that we
+ * also have to be careful that we don't queue
+ * the same context (even though a different
+ * request) to the second port.
+ */
+ if (ctx_single_port_submission(last->context) ||
+ ctx_single_port_submission(rq->context))
+ goto done;
+
+ merge = false;
+ }
+
+ if (__i915_request_submit(rq)) {
+ if (!merge) {
+ *port++ = i915_request_get(last);
+ last = NULL;
+ }
+
+ GEM_BUG_ON(last &&
+ !can_merge_ctx(last->context,
+ rq->context));
+ GEM_BUG_ON(last &&
+ i915_seqno_passed(last->fence.seqno,
+ rq->fence.seqno));
+
+ submit = true;
+ last = rq;
+ }
+ }
+
+ rb_erase_cached(&p->node, &execlists->queue);
+ i915_priolist_free(p);
+ }
+done:
+ *port++ = i915_request_get(last);
+
+ /*
+ * Here be a bit of magic! Or sleight-of-hand, whichever you prefer.
+ *
+ * We choose the priority hint such that if we add a request of greater
+ * priority than this, we kick the submission tasklet to decide on
+ * the right order of submitting the requests to hardware. We must
+ * also be prepared to reorder requests as they are in-flight on the
+ * HW. We derive the priority hint then as the first "hole" in
+ * the HW submission ports and if there are no available slots,
+ * the priority of the lowest executing request, i.e. last.
+ *
+ * When we do receive a higher priority request ready to run from the
+ * user, see queue_request(), the priority hint is bumped to that
+ * request triggering preemption on the next dequeue (or subsequent
+ * interrupt for secondary ports).
+ */
+ execlists->queue_priority_hint = queue_prio(execlists);
+ spin_unlock(&engine->active.lock);
+
+ /*
+ * We can skip poking the HW if we ended up with exactly the same set
+ * of requests as currently running, e.g. trying to timeslice a pair
+ * of ordered contexts.
+ */
+ if (submit &&
+ memcmp(active,
+ execlists->pending,
+ (port - execlists->pending) * sizeof(*port))) {
+ *port = NULL;
+ while (port-- != execlists->pending)
+ execlists_schedule_in(*port, port - execlists->pending);
+
+ WRITE_ONCE(execlists->yield, -1);
+ set_preempt_timeout(engine, *active);
+ execlists_submit_ports(engine);
+ } else {
+ ring_set_paused(engine, 0);
+ while (port-- != execlists->pending)
+ i915_request_put(*port);
+ *execlists->pending = NULL;
+ }
+}
+
+static void execlists_dequeue_irq(struct intel_engine_cs *engine)
+{
+ local_irq_disable(); /* Suspend interrupts across request submission */
+ execlists_dequeue(engine);
+ local_irq_enable(); /* flush irq_work (e.g. breadcrumb enabling) */
+}
+
+static void clear_ports(struct i915_request **ports, int count)
+{
+ memset_p((void **)ports, NULL, count);
+}
+
+static void
+copy_ports(struct i915_request **dst, struct i915_request **src, int count)
+{
+ /* A memcpy_p() would be very useful here! */
+ while (count--)
+ WRITE_ONCE(*dst++, *src++); /* avoid write tearing */
+}
+
+static struct i915_request **
+cancel_port_requests(struct intel_engine_execlists * const execlists,
+ struct i915_request **inactive)
+{
+ struct i915_request * const *port;
+
+ for (port = execlists->pending; *port; port++)
+ *inactive++ = *port;
+ clear_ports(execlists->pending, ARRAY_SIZE(execlists->pending));
+
+ /* Mark the end of active before we overwrite *active */
+ for (port = xchg(&execlists->active, execlists->pending); *port; port++)
+ *inactive++ = *port;
+ clear_ports(execlists->inflight, ARRAY_SIZE(execlists->inflight));
+
+ smp_wmb(); /* complete the seqlock for execlists_active() */
+ WRITE_ONCE(execlists->active, execlists->inflight);
+
+ /* Having cancelled all outstanding process_csb(), stop their timers */
+ GEM_BUG_ON(execlists->pending[0]);
+ cancel_timer(&execlists->timer);
+ cancel_timer(&execlists->preempt);
+
+ return inactive;
+}
+
+static void invalidate_csb_entries(const u64 *first, const u64 *last)
+{
+ clflush((void *)first);
+ clflush((void *)last);
+}
+
+/*
+ * Starting with Gen12, the status has a new format:
+ *
+ * bit 0: switched to new queue
+ * bit 1: reserved
+ * bit 2: semaphore wait mode (poll or signal), only valid when
+ * switch detail is set to "wait on semaphore"
+ * bits 3-5: engine class
+ * bits 6-11: engine instance
+ * bits 12-14: reserved
+ * bits 15-25: sw context id of the lrc the GT switched to
+ * bits 26-31: sw counter of the lrc the GT switched to
+ * bits 32-35: context switch detail
+ * - 0: ctx complete
+ * - 1: wait on sync flip
+ * - 2: wait on vblank
+ * - 3: wait on scanline
+ * - 4: wait on semaphore
+ * - 5: context preempted (not on SEMAPHORE_WAIT or
+ * WAIT_FOR_EVENT)
+ * bit 36: reserved
+ * bits 37-43: wait detail (for switch detail 1 to 4)
+ * bits 44-46: reserved
+ * bits 47-57: sw context id of the lrc the GT switched away from
+ * bits 58-63: sw counter of the lrc the GT switched away from
+ */
+static bool gen12_csb_parse(const u64 csb)
+{
+ bool ctx_away_valid = GEN12_CSB_CTX_VALID(upper_32_bits(csb));
+ bool new_queue =
+ lower_32_bits(csb) & GEN12_CTX_STATUS_SWITCHED_TO_NEW_QUEUE;
+
+ /*
+ * The context switch detail is not guaranteed to be 5 when a preemption
+ * occurs, so we can't just check for that. The check below works for
+ * all the cases we care about, including preemptions of WAIT
+ * instructions and lite-restore. Preempt-to-idle via the CTRL register
+ * would require some extra handling, but we don't support that.
+ */
+ if (!ctx_away_valid || new_queue) {
+ GEM_BUG_ON(!GEN12_CSB_CTX_VALID(lower_32_bits(csb)));
+ return true;
+ }
+
+ /*
+ * switch detail = 5 is covered by the case above and we do not expect a
+ * context switch on an unsuccessful wait instruction since we always
+ * use polling mode.
+ */
+ GEM_BUG_ON(GEN12_CTX_SWITCH_DETAIL(upper_32_bits(csb)));
+ return false;
+}
+
+static bool gen8_csb_parse(const u64 csb)
+{
+ return csb & (GEN8_CTX_STATUS_IDLE_ACTIVE | GEN8_CTX_STATUS_PREEMPTED);
+}
+
+static noinline u64
+wa_csb_read(const struct intel_engine_cs *engine, u64 * const csb)
+{
+ u64 entry;
+
+ /*
+ * Reading from the HWSP has one particular advantage: we can detect
+ * a stale entry. Since the write into HWSP is broken, we have no reason
+ * to trust the HW at all, the mmio entry may equally be unordered, so
+ * we prefer the path that is self-checking and as a last resort,
+ * return the mmio value.
+ *
+ * tgl,dg1:HSDES#22011327657
+ */
+ preempt_disable();
+ if (wait_for_atomic_us((entry = READ_ONCE(*csb)) != -1, 10)) {
+ int idx = csb - engine->execlists.csb_status;
+ int status;
+
+ status = GEN8_EXECLISTS_STATUS_BUF;
+ if (idx >= 6) {
+ status = GEN11_EXECLISTS_STATUS_BUF2;
+ idx -= 6;
+ }
+ status += sizeof(u64) * idx;
+
+ entry = intel_uncore_read64(engine->uncore,
+ _MMIO(engine->mmio_base + status));
+ }
+ preempt_enable();
+
+ return entry;
+}
+
+static u64 csb_read(const struct intel_engine_cs *engine, u64 * const csb)
+{
+ u64 entry = READ_ONCE(*csb);
+
+ /*
+ * Unfortunately, the GPU does not always serialise its write
+ * of the CSB entries before its write of the CSB pointer, at least
+ * from the perspective of the CPU, using what is known as a Global
+ * Observation Point. We may read a new CSB tail pointer, but then
+ * read the stale CSB entries, causing us to misinterpret the
+ * context-switch events, and eventually declare the GPU hung.
+ *
+ * icl:HSDES#1806554093
+ * tgl:HSDES#22011248461
+ */
+ if (unlikely(entry == -1))
+ entry = wa_csb_read(engine, csb);
+
+ /* Consume this entry so that we can spot its future reuse. */
+ WRITE_ONCE(*csb, -1);
+
+ /* ELSP is an implicit wmb() before the GPU wraps and overwrites csb */
+ return entry;
+}
+
+static void new_timeslice(struct intel_engine_execlists *el)
+{
+ /* By cancelling, we will start afresh in start_timeslice() */
+ cancel_timer(&el->timer);
+}
+
+static struct i915_request **
+process_csb(struct intel_engine_cs *engine, struct i915_request **inactive)
+{
+ struct intel_engine_execlists * const execlists = &engine->execlists;
+ u64 * const buf = execlists->csb_status;
+ const u8 num_entries = execlists->csb_size;
+ struct i915_request **prev;
+ u8 head, tail;
+
+ /*
+ * As we modify our execlists state tracking we require exclusive
+ * access. Either we are inside the tasklet, or the tasklet is disabled
+ * and we assume that is only inside the reset paths and so serialised.
+ */
+ GEM_BUG_ON(!tasklet_is_locked(&execlists->tasklet) &&
+ !reset_in_progress(execlists));
+ GEM_BUG_ON(!intel_engine_in_execlists_submission_mode(engine));
+
+ /*
+ * Note that csb_write, csb_status may be either in HWSP or mmio.
+ * When reading from the csb_write mmio register, we have to be
+ * careful to only use the GEN8_CSB_WRITE_PTR portion, which is
+ * the low 4bits. As it happens we know the next 4bits are always
+ * zero and so we can simply masked off the low u8 of the register
+ * and treat it identically to reading from the HWSP (without having
+ * to use explicit shifting and masking, and probably bifurcating
+ * the code to handle the legacy mmio read).
+ */
+ head = execlists->csb_head;
+ tail = READ_ONCE(*execlists->csb_write);
+ if (unlikely(head == tail))
+ return inactive;
+
+ /*
+ * We will consume all events from HW, or at least pretend to.
+ *
+ * The sequence of events from the HW is deterministic, and derived
+ * from our writes to the ELSP, with a smidgen of variability for
+ * the arrival of the asynchronous requests wrt to the inflight
+ * execution. If the HW sends an event that does not correspond with
+ * the one we are expecting, we have to abandon all hope as we lose
+ * all tracking of what the engine is actually executing. We will
+ * only detect we are out of sequence with the HW when we get an
+ * 'impossible' event because we have already drained our own
+ * preemption/promotion queue. If this occurs, we know that we likely
+ * lost track of execution earlier and must unwind and restart, the
+ * simplest way is by stop processing the event queue and force the
+ * engine to reset.
+ */
+ execlists->csb_head = tail;
+ ENGINE_TRACE(engine, "cs-irq head=%d, tail=%d\n", head, tail);
+
+ /*
+ * Hopefully paired with a wmb() in HW!
+ *
+ * We must complete the read of the write pointer before any reads
+ * from the CSB, so that we do not see stale values. Without an rmb
+ * (lfence) the HW may speculatively perform the CSB[] reads *before*
+ * we perform the READ_ONCE(*csb_write).
+ */
+ rmb();
+
+ /* Remember who was last running under the timer */
+ prev = inactive;
+ *prev = NULL;
+
+ do {
+ bool promote;
+ u64 csb;
+
+ if (++head == num_entries)
+ head = 0;
+
+ /*
+ * We are flying near dragons again.
+ *
+ * We hold a reference to the request in execlist_port[]
+ * but no more than that. We are operating in softirq
+ * context and so cannot hold any mutex or sleep. That
+ * prevents us stopping the requests we are processing
+ * in port[] from being retired simultaneously (the
+ * breadcrumb will be complete before we see the
+ * context-switch). As we only hold the reference to the
+ * request, any pointer chasing underneath the request
+ * is subject to a potential use-after-free. Thus we
+ * store all of the bookkeeping within port[] as
+ * required, and avoid using unguarded pointers beneath
+ * request itself. The same applies to the atomic
+ * status notifier.
+ */
+
+ csb = csb_read(engine, buf + head);
+ ENGINE_TRACE(engine, "csb[%d]: status=0x%08x:0x%08x\n",
+ head, upper_32_bits(csb), lower_32_bits(csb));
+
+ if (INTEL_GEN(engine->i915) >= 12)
+ promote = gen12_csb_parse(csb);
+ else
+ promote = gen8_csb_parse(csb);
+ if (promote) {
+ struct i915_request * const *old = execlists->active;
+
+ if (GEM_WARN_ON(!*execlists->pending)) {
+ execlists->error_interrupt |= ERROR_CSB;
+ break;
+ }
+
+ ring_set_paused(engine, 0);
+
+ /* Point active to the new ELSP; prevent overwriting */
+ WRITE_ONCE(execlists->active, execlists->pending);
+ smp_wmb(); /* notify execlists_active() */
+
+ /* cancel old inflight, prepare for switch */
+ trace_ports(execlists, "preempted", old);
+ while (*old)
+ *inactive++ = *old++;
+
+ /* switch pending to inflight */
+ GEM_BUG_ON(!assert_pending_valid(execlists, "promote"));
+ copy_ports(execlists->inflight,
+ execlists->pending,
+ execlists_num_ports(execlists));
+ smp_wmb(); /* complete the seqlock */
+ WRITE_ONCE(execlists->active, execlists->inflight);
+
+ /* XXX Magic delay for tgl */
+ ENGINE_POSTING_READ(engine, RING_CONTEXT_STATUS_PTR);
+
+ WRITE_ONCE(execlists->pending[0], NULL);
+ } else {
+ if (GEM_WARN_ON(!*execlists->active)) {
+ execlists->error_interrupt |= ERROR_CSB;
+ break;
+ }
+
+ /* port0 completed, advanced to port1 */
+ trace_ports(execlists, "completed", execlists->active);
+
+ /*
+ * We rely on the hardware being strongly
+ * ordered, that the breadcrumb write is
+ * coherent (visible from the CPU) before the
+ * user interrupt is processed. One might assume
+ * that the breadcrumb write being before the
+ * user interrupt and the CS event for the context
+ * switch would therefore be before the CS event
+ * itself...
+ */
+ if (GEM_SHOW_DEBUG() &&
+ !__i915_request_is_complete(*execlists->active)) {
+ struct i915_request *rq = *execlists->active;
+ const u32 *regs __maybe_unused =
+ rq->context->lrc_reg_state;
+
+ ENGINE_TRACE(engine,
+ "context completed before request!\n");
+ ENGINE_TRACE(engine,
+ "ring:{start:0x%08x, head:%04x, tail:%04x, ctl:%08x, mode:%08x}\n",
+ ENGINE_READ(engine, RING_START),
+ ENGINE_READ(engine, RING_HEAD) & HEAD_ADDR,
+ ENGINE_READ(engine, RING_TAIL) & TAIL_ADDR,
+ ENGINE_READ(engine, RING_CTL),
+ ENGINE_READ(engine, RING_MI_MODE));
+ ENGINE_TRACE(engine,
+ "rq:{start:%08x, head:%04x, tail:%04x, seqno:%llx:%d, hwsp:%d}, ",
+ i915_ggtt_offset(rq->ring->vma),
+ rq->head, rq->tail,
+ rq->fence.context,
+ lower_32_bits(rq->fence.seqno),
+ hwsp_seqno(rq));
+ ENGINE_TRACE(engine,
+ "ctx:{start:%08x, head:%04x, tail:%04x}, ",
+ regs[CTX_RING_START],
+ regs[CTX_RING_HEAD],
+ regs[CTX_RING_TAIL]);
+ }
+
+ *inactive++ = *execlists->active++;
+
+ GEM_BUG_ON(execlists->active - execlists->inflight >
+ execlists_num_ports(execlists));
+ }
+ } while (head != tail);
+
+ /*
+ * Gen11 has proven to fail wrt global observation point between
+ * entry and tail update, failing on the ordering and thus
+ * we see an old entry in the context status buffer.
+ *
+ * Forcibly evict out entries for the next gpu csb update,
+ * to increase the odds that we get a fresh entries with non
+ * working hardware. The cost for doing so comes out mostly with
+ * the wash as hardware, working or not, will need to do the
+ * invalidation before.
+ */
+ invalidate_csb_entries(&buf[0], &buf[num_entries - 1]);
+
+ /*
+ * We assume that any event reflects a change in context flow
+ * and merits a fresh timeslice. We reinstall the timer after
+ * inspecting the queue to see if we need to resumbit.
+ */
+ if (*prev != *execlists->active) /* elide lite-restores */
+ new_timeslice(execlists);
+
+ return inactive;
+}
+
+static void post_process_csb(struct i915_request **port,
+ struct i915_request **last)
+{
+ while (port != last)
+ execlists_schedule_out(*port++);
+}
+
+static void __execlists_hold(struct i915_request *rq)
+{
+ LIST_HEAD(list);
+
+ do {
+ struct i915_dependency *p;
+
+ if (i915_request_is_active(rq))
+ __i915_request_unsubmit(rq);
+
+ clear_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
+ list_move_tail(&rq->sched.link, &rq->engine->active.hold);
+ i915_request_set_hold(rq);
+ RQ_TRACE(rq, "on hold\n");
+
+ for_each_waiter(p, rq) {
+ struct i915_request *w =
+ container_of(p->waiter, typeof(*w), sched);
+
+ if (p->flags & I915_DEPENDENCY_WEAK)
+ continue;
+
+ /* Leave semaphores spinning on the other engines */
+ if (w->engine != rq->engine)
+ continue;
+
+ if (!i915_request_is_ready(w))
+ continue;
+
+ if (__i915_request_is_complete(w))
+ continue;
+
+ if (i915_request_on_hold(w))
+ continue;
+
+ list_move_tail(&w->sched.link, &list);
+ }
+
+ rq = list_first_entry_or_null(&list, typeof(*rq), sched.link);
+ } while (rq);
+}
+
+static bool execlists_hold(struct intel_engine_cs *engine,
+ struct i915_request *rq)
+{
+ if (i915_request_on_hold(rq))
+ return false;
+
+ spin_lock_irq(&engine->active.lock);
+
+ if (__i915_request_is_complete(rq)) { /* too late! */
+ rq = NULL;
+ goto unlock;
+ }
+
+ /*
+ * Transfer this request onto the hold queue to prevent it
+ * being resumbitted to HW (and potentially completed) before we have
+ * released it. Since we may have already submitted following
+ * requests, we need to remove those as well.
+ */
+ GEM_BUG_ON(i915_request_on_hold(rq));
+ GEM_BUG_ON(rq->engine != engine);
+ __execlists_hold(rq);
+ GEM_BUG_ON(list_empty(&engine->active.hold));
+
+unlock:
+ spin_unlock_irq(&engine->active.lock);
+ return rq;
+}
+
+static bool hold_request(const struct i915_request *rq)
+{
+ struct i915_dependency *p;
+ bool result = false;
+
+ /*
+ * If one of our ancestors is on hold, we must also be on hold,
+ * otherwise we will bypass it and execute before it.
+ */
+ rcu_read_lock();
+ for_each_signaler(p, rq) {
+ const struct i915_request *s =
+ container_of(p->signaler, typeof(*s), sched);
+
+ if (s->engine != rq->engine)
+ continue;
+
+ result = i915_request_on_hold(s);
+ if (result)
+ break;
+ }
+ rcu_read_unlock();
+
+ return result;
+}
+
+static void __execlists_unhold(struct i915_request *rq)
+{
+ LIST_HEAD(list);
+
+ do {
+ struct i915_dependency *p;
+
+ RQ_TRACE(rq, "hold release\n");
+
+ GEM_BUG_ON(!i915_request_on_hold(rq));
+ GEM_BUG_ON(!i915_sw_fence_signaled(&rq->submit));
+
+ i915_request_clear_hold(rq);
+ list_move_tail(&rq->sched.link,
+ i915_sched_lookup_priolist(rq->engine,
+ rq_prio(rq)));
+ set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
+
+ /* Also release any children on this engine that are ready */
+ for_each_waiter(p, rq) {
+ struct i915_request *w =
+ container_of(p->waiter, typeof(*w), sched);
+
+ if (p->flags & I915_DEPENDENCY_WEAK)
+ continue;
+
+ /* Propagate any change in error status */
+ if (rq->fence.error)
+ i915_request_set_error_once(w, rq->fence.error);
+
+ if (w->engine != rq->engine)
+ continue;
+
+ if (!i915_request_on_hold(w))
+ continue;
+
+ /* Check that no other parents are also on hold */
+ if (hold_request(w))
+ continue;
+
+ list_move_tail(&w->sched.link, &list);
+ }
+
+ rq = list_first_entry_or_null(&list, typeof(*rq), sched.link);
+ } while (rq);
+}
+
+static void execlists_unhold(struct intel_engine_cs *engine,
+ struct i915_request *rq)
+{
+ spin_lock_irq(&engine->active.lock);
+
+ /*
+ * Move this request back to the priority queue, and all of its
+ * children and grandchildren that were suspended along with it.
+ */
+ __execlists_unhold(rq);
+
+ if (rq_prio(rq) > engine->execlists.queue_priority_hint) {
+ engine->execlists.queue_priority_hint = rq_prio(rq);
+ tasklet_hi_schedule(&engine->execlists.tasklet);
+ }
+
+ spin_unlock_irq(&engine->active.lock);
+}
+
+struct execlists_capture {
+ struct work_struct work;
+ struct i915_request *rq;
+ struct i915_gpu_coredump *error;
+};
+
+static void execlists_capture_work(struct work_struct *work)
+{
+ struct execlists_capture *cap = container_of(work, typeof(*cap), work);
+ const gfp_t gfp = GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN;
+ struct intel_engine_cs *engine = cap->rq->engine;
+ struct intel_gt_coredump *gt = cap->error->gt;
+ struct intel_engine_capture_vma *vma;
+
+ /* Compress all the objects attached to the request, slow! */
+ vma = intel_engine_coredump_add_request(gt->engine, cap->rq, gfp);
+ if (vma) {
+ struct i915_vma_compress *compress =
+ i915_vma_capture_prepare(gt);
+
+ intel_engine_coredump_add_vma(gt->engine, vma, compress);
+ i915_vma_capture_finish(gt, compress);
+ }
+
+ gt->simulated = gt->engine->simulated;
+ cap->error->simulated = gt->simulated;
+
+ /* Publish the error state, and announce it to the world */
+ i915_error_state_store(cap->error);
+ i915_gpu_coredump_put(cap->error);
+
+ /* Return this request and all that depend upon it for signaling */
+ execlists_unhold(engine, cap->rq);
+ i915_request_put(cap->rq);
+
+ kfree(cap);
+}
+
+static struct execlists_capture *capture_regs(struct intel_engine_cs *engine)
+{
+ const gfp_t gfp = GFP_ATOMIC | __GFP_NOWARN;
+ struct execlists_capture *cap;
+
+ cap = kmalloc(sizeof(*cap), gfp);
+ if (!cap)
+ return NULL;
+
+ cap->error = i915_gpu_coredump_alloc(engine->i915, gfp);
+ if (!cap->error)
+ goto err_cap;
+
+ cap->error->gt = intel_gt_coredump_alloc(engine->gt, gfp);
+ if (!cap->error->gt)
+ goto err_gpu;
+
+ cap->error->gt->engine = intel_engine_coredump_alloc(engine, gfp);
+ if (!cap->error->gt->engine)
+ goto err_gt;
+
+ cap->error->gt->engine->hung = true;
+
+ return cap;
+
+err_gt:
+ kfree(cap->error->gt);
+err_gpu:
+ kfree(cap->error);
+err_cap:
+ kfree(cap);
+ return NULL;
+}
+
+static struct i915_request *
+active_context(struct intel_engine_cs *engine, u32 ccid)
+{
+ const struct intel_engine_execlists * const el = &engine->execlists;
+ struct i915_request * const *port, *rq;
+
+ /*
+ * Use the most recent result from process_csb(), but just in case
+ * we trigger an error (via interrupt) before the first CS event has
+ * been written, peek at the next submission.
+ */
+
+ for (port = el->active; (rq = *port); port++) {
+ if (rq->context->lrc.ccid == ccid) {
+ ENGINE_TRACE(engine,
+ "ccid:%x found at active:%zd\n",
+ ccid, port - el->active);
+ return rq;
+ }
+ }
+
+ for (port = el->pending; (rq = *port); port++) {
+ if (rq->context->lrc.ccid == ccid) {
+ ENGINE_TRACE(engine,
+ "ccid:%x found at pending:%zd\n",
+ ccid, port - el->pending);
+ return rq;
+ }
+ }
+
+ ENGINE_TRACE(engine, "ccid:%x not found\n", ccid);
+ return NULL;
+}
+
+static u32 active_ccid(struct intel_engine_cs *engine)
+{
+ return ENGINE_READ_FW(engine, RING_EXECLIST_STATUS_HI);
+}
+
+static void execlists_capture(struct intel_engine_cs *engine)
+{
+ struct execlists_capture *cap;
+
+ if (!IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR))
+ return;
+
+ /*
+ * We need to _quickly_ capture the engine state before we reset.
+ * We are inside an atomic section (softirq) here and we are delaying
+ * the forced preemption event.
+ */
+ cap = capture_regs(engine);
+ if (!cap)
+ return;
+
+ spin_lock_irq(&engine->active.lock);
+ cap->rq = active_context(engine, active_ccid(engine));
+ if (cap->rq) {
+ cap->rq = active_request(cap->rq->context->timeline, cap->rq);
+ cap->rq = i915_request_get_rcu(cap->rq);
+ }
+ spin_unlock_irq(&engine->active.lock);
+ if (!cap->rq)
+ goto err_free;
+
+ /*
+ * Remove the request from the execlists queue, and take ownership
+ * of the request. We pass it to our worker who will _slowly_ compress
+ * all the pages the _user_ requested for debugging their batch, after
+ * which we return it to the queue for signaling.
+ *
+ * By removing them from the execlists queue, we also remove the
+ * requests from being processed by __unwind_incomplete_requests()
+ * during the intel_engine_reset(), and so they will *not* be replayed
+ * afterwards.
+ *
+ * Note that because we have not yet reset the engine at this point,
+ * it is possible for the request that we have identified as being
+ * guilty, did in fact complete and we will then hit an arbitration
+ * point allowing the outstanding preemption to succeed. The likelihood
+ * of that is very low (as capturing of the engine registers should be
+ * fast enough to run inside an irq-off atomic section!), so we will
+ * simply hold that request accountable for being non-preemptible
+ * long enough to force the reset.
+ */
+ if (!execlists_hold(engine, cap->rq))
+ goto err_rq;
+
+ INIT_WORK(&cap->work, execlists_capture_work);
+ schedule_work(&cap->work);
+ return;
+
+err_rq:
+ i915_request_put(cap->rq);
+err_free:
+ i915_gpu_coredump_put(cap->error);
+ kfree(cap);
+}
+
+static void execlists_reset(struct intel_engine_cs *engine, const char *msg)
+{
+ const unsigned int bit = I915_RESET_ENGINE + engine->id;
+ unsigned long *lock = &engine->gt->reset.flags;
+
+ if (!intel_has_reset_engine(engine->gt))
+ return;
+
+ if (test_and_set_bit(bit, lock))
+ return;
+
+ ENGINE_TRACE(engine, "reset for %s\n", msg);
+
+ /* Mark this tasklet as disabled to avoid waiting for it to complete */
+ tasklet_disable_nosync(&engine->execlists.tasklet);
+
+ ring_set_paused(engine, 1); /* Freeze the current request in place */
+ execlists_capture(engine);
+ intel_engine_reset(engine, msg);
+
+ tasklet_enable(&engine->execlists.tasklet);
+ clear_and_wake_up_bit(bit, lock);
+}
+
+static bool preempt_timeout(const struct intel_engine_cs *const engine)
+{
+ const struct timer_list *t = &engine->execlists.preempt;
+
+ if (!CONFIG_DRM_I915_PREEMPT_TIMEOUT)
+ return false;
+
+ if (!timer_expired(t))
+ return false;
+
+ return engine->execlists.pending[0];
+}
+
+/*
+ * Check the unread Context Status Buffers and manage the submission of new
+ * contexts to the ELSP accordingly.
+ */
+static void execlists_submission_tasklet(unsigned long data)
+{
+ struct intel_engine_cs * const engine = (struct intel_engine_cs *)data;
+ struct i915_request *post[2 * EXECLIST_MAX_PORTS];
+ struct i915_request **inactive;
+
+ rcu_read_lock();
+ inactive = process_csb(engine, post);
+ GEM_BUG_ON(inactive - post > ARRAY_SIZE(post));
+
+ if (unlikely(preempt_timeout(engine))) {
+ cancel_timer(&engine->execlists.preempt);
+ engine->execlists.error_interrupt |= ERROR_PREEMPT;
+ }
+
+ if (unlikely(READ_ONCE(engine->execlists.error_interrupt))) {
+ const char *msg;
+
+ /* Generate the error message in priority wrt to the user! */
+ if (engine->execlists.error_interrupt & GENMASK(15, 0))
+ msg = "CS error"; /* thrown by a user payload */
+ else if (engine->execlists.error_interrupt & ERROR_CSB)
+ msg = "invalid CSB event";
+ else if (engine->execlists.error_interrupt & ERROR_PREEMPT)
+ msg = "preemption time out";
+ else
+ msg = "internal error";
+
+ engine->execlists.error_interrupt = 0;
+ execlists_reset(engine, msg);
+ }
+
+ if (!engine->execlists.pending[0]) {
+ execlists_dequeue_irq(engine);
+ start_timeslice(engine);
+ }
+
+ post_process_csb(post, inactive);
+ rcu_read_unlock();
+}
+
+static void __execlists_kick(struct intel_engine_execlists *execlists)
+{
+ /* Kick the tasklet for some interrupt coalescing and reset handling */
+ tasklet_hi_schedule(&execlists->tasklet);
+}
+
+#define execlists_kick(t, member) \
+ __execlists_kick(container_of(t, struct intel_engine_execlists, member))
+
+static void execlists_timeslice(struct timer_list *timer)
+{
+ execlists_kick(timer, timer);
+}
+
+static void execlists_preempt(struct timer_list *timer)
+{
+ execlists_kick(timer, preempt);
+}
+
+static void queue_request(struct intel_engine_cs *engine,
+ struct i915_request *rq)
+{
+ GEM_BUG_ON(!list_empty(&rq->sched.link));
+ list_add_tail(&rq->sched.link,
+ i915_sched_lookup_priolist(engine, rq_prio(rq)));
+ set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
+}
+
+static bool submit_queue(struct intel_engine_cs *engine,
+ const struct i915_request *rq)
+{
+ struct intel_engine_execlists *execlists = &engine->execlists;
+
+ if (rq_prio(rq) <= execlists->queue_priority_hint)
+ return false;
+
+ execlists->queue_priority_hint = rq_prio(rq);
+ return true;
+}
+
+static bool ancestor_on_hold(const struct intel_engine_cs *engine,
+ const struct i915_request *rq)
+{
+ GEM_BUG_ON(i915_request_on_hold(rq));
+ return !list_empty(&engine->active.hold) && hold_request(rq);
+}
+
+static void execlists_submit_request(struct i915_request *request)
+{
+ struct intel_engine_cs *engine = request->engine;
+ unsigned long flags;
+
+ /* Will be called from irq-context when using foreign fences. */
+ spin_lock_irqsave(&engine->active.lock, flags);
+
+ if (unlikely(ancestor_on_hold(engine, request))) {
+ RQ_TRACE(request, "ancestor on hold\n");
+ list_add_tail(&request->sched.link, &engine->active.hold);
+ i915_request_set_hold(request);
+ } else {
+ queue_request(engine, request);
+
+ GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root));
+ GEM_BUG_ON(list_empty(&request->sched.link));
+
+ if (submit_queue(engine, request))
+ __execlists_kick(&engine->execlists);
+ }
+
+ spin_unlock_irqrestore(&engine->active.lock, flags);
+}
+
+static int execlists_context_pre_pin(struct intel_context *ce,
+ struct i915_gem_ww_ctx *ww,
+ void **vaddr)
+{
+ return lrc_pre_pin(ce, ce->engine, ww, vaddr);
+}
+
+static int execlists_context_pin(struct intel_context *ce, void *vaddr)
+{
+ return lrc_pin(ce, ce->engine, vaddr);
+}
+
+static int execlists_context_alloc(struct intel_context *ce)
+{
+ return lrc_alloc(ce, ce->engine);
+}
+
+static const struct intel_context_ops execlists_context_ops = {
+ .flags = COPS_HAS_INFLIGHT,
+
+ .alloc = execlists_context_alloc,
+
+ .pre_pin = execlists_context_pre_pin,
+ .pin = execlists_context_pin,
+ .unpin = lrc_unpin,
+ .post_unpin = lrc_post_unpin,
+
+ .enter = intel_context_enter_engine,
+ .exit = intel_context_exit_engine,
+
+ .reset = lrc_reset,
+ .destroy = lrc_destroy,
+};
+
+static int emit_pdps(struct i915_request *rq)
+{
+ const struct intel_engine_cs * const engine = rq->engine;
+ struct i915_ppgtt * const ppgtt = i915_vm_to_ppgtt(rq->context->vm);
+ int err, i;
+ u32 *cs;
+
+ GEM_BUG_ON(intel_vgpu_active(rq->engine->i915));
+
+ /*
+ * Beware ye of the dragons, this sequence is magic!
+ *
+ * Small changes to this sequence can cause anything from
+ * GPU hangs to forcewake errors and machine lockups!
+ */
+
+ cs = intel_ring_begin(rq, 2);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
+ *cs++ = MI_NOOP;
+ intel_ring_advance(rq, cs);
+
+ /* Flush any residual operations from the context load */
+ err = engine->emit_flush(rq, EMIT_FLUSH);
+ if (err)
+ return err;
+
+ /* Magic required to prevent forcewake errors! */
+ err = engine->emit_flush(rq, EMIT_INVALIDATE);
+ if (err)
+ return err;
+
+ cs = intel_ring_begin(rq, 4 * GEN8_3LVL_PDPES + 2);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ /* Ensure the LRI have landed before we invalidate & continue */
+ *cs++ = MI_LOAD_REGISTER_IMM(2 * GEN8_3LVL_PDPES) | MI_LRI_FORCE_POSTED;
+ for (i = GEN8_3LVL_PDPES; i--; ) {
+ const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
+ u32 base = engine->mmio_base;
+
+ *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, i));
+ *cs++ = upper_32_bits(pd_daddr);
+ *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, i));
+ *cs++ = lower_32_bits(pd_daddr);
+ }
+ *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
+ intel_ring_advance(rq, cs);
+
+ intel_ring_advance(rq, cs);
+
+ return 0;
+}
+
+static int execlists_request_alloc(struct i915_request *request)
+{
+ int ret;
+
+ GEM_BUG_ON(!intel_context_is_pinned(request->context));
+
+ /*
+ * Flush enough space to reduce the likelihood of waiting after
+ * we start building the request - in which case we will just
+ * have to repeat work.
+ */
+ request->reserved_space += EXECLISTS_REQUEST_SIZE;
+
+ /*
+ * Note that after this point, we have committed to using
+ * this request as it is being used to both track the
+ * state of engine initialisation and liveness of the
+ * golden renderstate above. Think twice before you try
+ * to cancel/unwind this request now.
+ */
+
+ if (!i915_vm_is_4lvl(request->context->vm)) {
+ ret = emit_pdps(request);
+ if (ret)
+ return ret;
+ }
+
+ /* Unconditionally invalidate GPU caches and TLBs. */
+ ret = request->engine->emit_flush(request, EMIT_INVALIDATE);
+ if (ret)
+ return ret;
+
+ request->reserved_space -= EXECLISTS_REQUEST_SIZE;
+ return 0;
+}
+
+static void reset_csb_pointers(struct intel_engine_cs *engine)
+{
+ struct intel_engine_execlists * const execlists = &engine->execlists;
+ const unsigned int reset_value = execlists->csb_size - 1;
+
+ ring_set_paused(engine, 0);
+
+ /*
+ * Sometimes Icelake forgets to reset its pointers on a GPU reset.
+ * Bludgeon them with a mmio update to be sure.
+ */
+ ENGINE_WRITE(engine, RING_CONTEXT_STATUS_PTR,
+ 0xffff << 16 | reset_value << 8 | reset_value);
+ ENGINE_POSTING_READ(engine, RING_CONTEXT_STATUS_PTR);
+
+ /*
+ * After a reset, the HW starts writing into CSB entry [0]. We
+ * therefore have to set our HEAD pointer back one entry so that
+ * the *first* entry we check is entry 0. To complicate this further,
+ * as we don't wait for the first interrupt after reset, we have to
+ * fake the HW write to point back to the last entry so that our
+ * inline comparison of our cached head position against the last HW
+ * write works even before the first interrupt.
+ */
+ execlists->csb_head = reset_value;
+ WRITE_ONCE(*execlists->csb_write, reset_value);
+ wmb(); /* Make sure this is visible to HW (paranoia?) */
+
+ /* Check that the GPU does indeed update the CSB entries! */
+ memset(execlists->csb_status, -1, (reset_value + 1) * sizeof(u64));
+ invalidate_csb_entries(&execlists->csb_status[0],
+ &execlists->csb_status[reset_value]);
+
+ /* Once more for luck and our trusty paranoia */
+ ENGINE_WRITE(engine, RING_CONTEXT_STATUS_PTR,
+ 0xffff << 16 | reset_value << 8 | reset_value);
+ ENGINE_POSTING_READ(engine, RING_CONTEXT_STATUS_PTR);
+
+ GEM_BUG_ON(READ_ONCE(*execlists->csb_write) != reset_value);
+}
+
+static void sanitize_hwsp(struct intel_engine_cs *engine)
+{
+ struct intel_timeline *tl;
+
+ list_for_each_entry(tl, &engine->status_page.timelines, engine_link)
+ intel_timeline_reset_seqno(tl);
+}
+
+static void execlists_sanitize(struct intel_engine_cs *engine)
+{
+ GEM_BUG_ON(execlists_active(&engine->execlists));
+
+ /*
+ * Poison residual state on resume, in case the suspend didn't!
+ *
+ * We have to assume that across suspend/resume (or other loss
+ * of control) that the contents of our pinned buffers has been
+ * lost, replaced by garbage. Since this doesn't always happen,
+ * let's poison such state so that we more quickly spot when
+ * we falsely assume it has been preserved.
+ */
+ if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
+ memset(engine->status_page.addr, POISON_INUSE, PAGE_SIZE);
+
+ reset_csb_pointers(engine);
+
+ /*
+ * The kernel_context HWSP is stored in the status_page. As above,
+ * that may be lost on resume/initialisation, and so we need to
+ * reset the value in the HWSP.
+ */
+ sanitize_hwsp(engine);
+
+ /* And scrub the dirty cachelines for the HWSP */
+ clflush_cache_range(engine->status_page.addr, PAGE_SIZE);
+}
+
+static void enable_error_interrupt(struct intel_engine_cs *engine)
+{
+ u32 status;
+
+ engine->execlists.error_interrupt = 0;
+ ENGINE_WRITE(engine, RING_EMR, ~0u);
+ ENGINE_WRITE(engine, RING_EIR, ~0u); /* clear all existing errors */
+
+ status = ENGINE_READ(engine, RING_ESR);
+ if (unlikely(status)) {
+ drm_err(&engine->i915->drm,
+ "engine '%s' resumed still in error: %08x\n",
+ engine->name, status);
+ __intel_gt_reset(engine->gt, engine->mask);
+ }
+
+ /*
+ * On current gen8+, we have 2 signals to play with
+ *
+ * - I915_ERROR_INSTUCTION (bit 0)
+ *
+ * Generate an error if the command parser encounters an invalid
+ * instruction
+ *
+ * This is a fatal error.
+ *
+ * - CP_PRIV (bit 2)
+ *
+ * Generate an error on privilege violation (where the CP replaces
+ * the instruction with a no-op). This also fires for writes into
+ * read-only scratch pages.
+ *
+ * This is a non-fatal error, parsing continues.
+ *
+ * * there are a few others defined for odd HW that we do not use
+ *
+ * Since CP_PRIV fires for cases where we have chosen to ignore the
+ * error (as the HW is validating and suppressing the mistakes), we
+ * only unmask the instruction error bit.
+ */
+ ENGINE_WRITE(engine, RING_EMR, ~I915_ERROR_INSTRUCTION);
+}
+
+static void enable_execlists(struct intel_engine_cs *engine)
+{
+ u32 mode;
+
+ assert_forcewakes_active(engine->uncore, FORCEWAKE_ALL);
+
+ intel_engine_set_hwsp_writemask(engine, ~0u); /* HWSTAM */
+
+ if (INTEL_GEN(engine->i915) >= 11)
+ mode = _MASKED_BIT_ENABLE(GEN11_GFX_DISABLE_LEGACY_MODE);
+ else
+ mode = _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE);
+ ENGINE_WRITE_FW(engine, RING_MODE_GEN7, mode);
+
+ ENGINE_WRITE_FW(engine, RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING));
+
+ ENGINE_WRITE_FW(engine,
+ RING_HWS_PGA,
+ i915_ggtt_offset(engine->status_page.vma));
+ ENGINE_POSTING_READ(engine, RING_HWS_PGA);
+
+ enable_error_interrupt(engine);
+}
+
+static bool unexpected_starting_state(struct intel_engine_cs *engine)
+{
+ bool unexpected = false;
+
+ if (ENGINE_READ_FW(engine, RING_MI_MODE) & STOP_RING) {
+ drm_dbg(&engine->i915->drm,
+ "STOP_RING still set in RING_MI_MODE\n");
+ unexpected = true;
+ }
+
+ return unexpected;
+}
+
+static int execlists_resume(struct intel_engine_cs *engine)
+{
+ intel_mocs_init_engine(engine);
+
+ intel_breadcrumbs_reset(engine->breadcrumbs);
+
+ if (GEM_SHOW_DEBUG() && unexpected_starting_state(engine)) {
+ struct drm_printer p = drm_debug_printer(__func__);
+
+ intel_engine_dump(engine, &p, NULL);
+ }
+
+ enable_execlists(engine);
+
+ return 0;
+}
+
+static void execlists_reset_prepare(struct intel_engine_cs *engine)
+{
+ struct intel_engine_execlists * const execlists = &engine->execlists;
+
+ ENGINE_TRACE(engine, "depth<-%d\n",
+ atomic_read(&execlists->tasklet.count));
+
+ /*
+ * Prevent request submission to the hardware until we have
+ * completed the reset in i915_gem_reset_finish(). If a request
+ * is completed by one engine, it may then queue a request
+ * to a second via its execlists->tasklet *just* as we are
+ * calling engine->resume() and also writing the ELSP.
+ * Turning off the execlists->tasklet until the reset is over
+ * prevents the race.
+ */
+ __tasklet_disable_sync_once(&execlists->tasklet);
+ GEM_BUG_ON(!reset_in_progress(execlists));
+
+ /*
+ * We stop engines, otherwise we might get failed reset and a
+ * dead gpu (on elk). Also as modern gpu as kbl can suffer
+ * from system hang if batchbuffer is progressing when
+ * the reset is issued, regardless of READY_TO_RESET ack.
+ * Thus assume it is best to stop engines on all gens
+ * where we have a gpu reset.
+ *
+ * WaKBLVECSSemaphoreWaitPoll:kbl (on ALL_ENGINES)
+ *
+ * FIXME: Wa for more modern gens needs to be validated
+ */
+ ring_set_paused(engine, 1);
+ intel_engine_stop_cs(engine);
+
+ engine->execlists.reset_ccid = active_ccid(engine);
+}
+
+static struct i915_request **
+reset_csb(struct intel_engine_cs *engine, struct i915_request **inactive)
+{
+ struct intel_engine_execlists * const execlists = &engine->execlists;
+
+ mb(); /* paranoia: read the CSB pointers from after the reset */
+ clflush(execlists->csb_write);
+ mb();
+
+ inactive = process_csb(engine, inactive); /* drain preemption events */
+
+ /* Following the reset, we need to reload the CSB read/write pointers */
+ reset_csb_pointers(engine);
+
+ return inactive;
+}
+
+static void
+execlists_reset_active(struct intel_engine_cs *engine, bool stalled)
+{
+ struct intel_context *ce;
+ struct i915_request *rq;
+ u32 head;
+
+ /*
+ * Save the currently executing context, even if we completed
+ * its request, it was still running at the time of the
+ * reset and will have been clobbered.
+ */
+ rq = active_context(engine, engine->execlists.reset_ccid);
+ if (!rq)
+ return;
+
+ ce = rq->context;
+ GEM_BUG_ON(!i915_vma_is_pinned(ce->state));
+
+ if (__i915_request_is_complete(rq)) {
+ /* Idle context; tidy up the ring so we can restart afresh */
+ head = intel_ring_wrap(ce->ring, rq->tail);
+ goto out_replay;
+ }
+
+ /* We still have requests in-flight; the engine should be active */
+ GEM_BUG_ON(!intel_engine_pm_is_awake(engine));
+
+ /* Context has requests still in-flight; it should not be idle! */
+ GEM_BUG_ON(i915_active_is_idle(&ce->active));
+
+ rq = active_request(ce->timeline, rq);
+ head = intel_ring_wrap(ce->ring, rq->head);
+ GEM_BUG_ON(head == ce->ring->tail);
+
+ /*
+ * If this request hasn't started yet, e.g. it is waiting on a
+ * semaphore, we need to avoid skipping the request or else we
+ * break the signaling chain. However, if the context is corrupt
+ * the request will not restart and we will be stuck with a wedged
+ * device. It is quite often the case that if we issue a reset
+ * while the GPU is loading the context image, that the context
+ * image becomes corrupt.
+ *
+ * Otherwise, if we have not started yet, the request should replay
+ * perfectly and we do not need to flag the result as being erroneous.
+ */
+ if (!__i915_request_has_started(rq))
+ goto out_replay;
+
+ /*
+ * If the request was innocent, we leave the request in the ELSP
+ * and will try to replay it on restarting. The context image may
+ * have been corrupted by the reset, in which case we may have
+ * to service a new GPU hang, but more likely we can continue on
+ * without impact.
+ *
+ * If the request was guilty, we presume the context is corrupt
+ * and have to at least restore the RING register in the context
+ * image back to the expected values to skip over the guilty request.
+ */
+ __i915_request_reset(rq, stalled);
+
+ /*
+ * We want a simple context + ring to execute the breadcrumb update.
+ * We cannot rely on the context being intact across the GPU hang,
+ * so clear it and rebuild just what we need for the breadcrumb.
+ * All pending requests for this context will be zapped, and any
+ * future request will be after userspace has had the opportunity
+ * to recreate its own state.
+ */
+out_replay:
+ ENGINE_TRACE(engine, "replay {head:%04x, tail:%04x}\n",
+ head, ce->ring->tail);
+ lrc_reset_regs(ce, engine);
+ ce->lrc.lrca = lrc_update_regs(ce, engine, head);
+}
+
+static void execlists_reset_csb(struct intel_engine_cs *engine, bool stalled)
+{
+ struct intel_engine_execlists * const execlists = &engine->execlists;
+ struct i915_request *post[2 * EXECLIST_MAX_PORTS];
+ struct i915_request **inactive;
+
+ rcu_read_lock();
+ inactive = reset_csb(engine, post);
+
+ execlists_reset_active(engine, true);
+
+ inactive = cancel_port_requests(execlists, inactive);
+ post_process_csb(post, inactive);
+ rcu_read_unlock();
+}
+
+static void execlists_reset_rewind(struct intel_engine_cs *engine, bool stalled)
+{
+ unsigned long flags;
+
+ ENGINE_TRACE(engine, "\n");
+
+ /* Process the csb, find the guilty context and throw away */
+ execlists_reset_csb(engine, stalled);
+
+ /* Push back any incomplete requests for replay after the reset. */
+ rcu_read_lock();
+ spin_lock_irqsave(&engine->active.lock, flags);
+ __unwind_incomplete_requests(engine);
+ spin_unlock_irqrestore(&engine->active.lock, flags);
+ rcu_read_unlock();
+}
+
+static void nop_submission_tasklet(unsigned long data)
+{
+ struct intel_engine_cs * const engine = (struct intel_engine_cs *)data;
+
+ /* The driver is wedged; don't process any more events. */
+ WRITE_ONCE(engine->execlists.queue_priority_hint, INT_MIN);
+}
+
+static void execlists_reset_cancel(struct intel_engine_cs *engine)
+{
+ struct intel_engine_execlists * const execlists = &engine->execlists;
+ struct i915_request *rq, *rn;
+ struct rb_node *rb;
+ unsigned long flags;
+
+ ENGINE_TRACE(engine, "\n");
+
+ /*
+ * Before we call engine->cancel_requests(), we should have exclusive
+ * access to the submission state. This is arranged for us by the
+ * caller disabling the interrupt generation, the tasklet and other
+ * threads that may then access the same state, giving us a free hand
+ * to reset state. However, we still need to let lockdep be aware that
+ * we know this state may be accessed in hardirq context, so we
+ * disable the irq around this manipulation and we want to keep
+ * the spinlock focused on its duties and not accidentally conflate
+ * coverage to the submission's irq state. (Similarly, although we
+ * shouldn't need to disable irq around the manipulation of the
+ * submission's irq state, we also wish to remind ourselves that
+ * it is irq state.)
+ */
+ execlists_reset_csb(engine, true);
+
+ rcu_read_lock();
+ spin_lock_irqsave(&engine->active.lock, flags);
+
+ /* Mark all executing requests as skipped. */
+ list_for_each_entry(rq, &engine->active.requests, sched.link)
+ i915_request_mark_eio(rq);
+ intel_engine_signal_breadcrumbs(engine);
+
+ /* Flush the queued requests to the timeline list (for retiring). */
+ while ((rb = rb_first_cached(&execlists->queue))) {
+ struct i915_priolist *p = to_priolist(rb);
+ int i;
+
+ priolist_for_each_request_consume(rq, rn, p, i) {
+ i915_request_mark_eio(rq);
+ __i915_request_submit(rq);
+ }
+
+ rb_erase_cached(&p->node, &execlists->queue);
+ i915_priolist_free(p);
+ }
+
+ /* On-hold requests will be flushed to timeline upon their release */
+ list_for_each_entry(rq, &engine->active.hold, sched.link)
+ i915_request_mark_eio(rq);
+
+ /* Cancel all attached virtual engines */
+ while ((rb = rb_first_cached(&execlists->virtual))) {
+ struct virtual_engine *ve =
+ rb_entry(rb, typeof(*ve), nodes[engine->id].rb);
+
+ rb_erase_cached(rb, &execlists->virtual);
+ RB_CLEAR_NODE(rb);
+
+ spin_lock(&ve->base.active.lock);
+ rq = fetch_and_zero(&ve->request);
+ if (rq) {
+ i915_request_mark_eio(rq);
+
+ rq->engine = engine;
+ __i915_request_submit(rq);
+ i915_request_put(rq);
+
+ ve->base.execlists.queue_priority_hint = INT_MIN;
+ }
+ spin_unlock(&ve->base.active.lock);
+ }
+
+ /* Remaining _unready_ requests will be nop'ed when submitted */
+
+ execlists->queue_priority_hint = INT_MIN;
+ execlists->queue = RB_ROOT_CACHED;
+
+ GEM_BUG_ON(__tasklet_is_enabled(&execlists->tasklet));
+ execlists->tasklet.func = nop_submission_tasklet;
+
+ spin_unlock_irqrestore(&engine->active.lock, flags);
+ rcu_read_unlock();
+}
+
+static void execlists_reset_finish(struct intel_engine_cs *engine)
+{
+ struct intel_engine_execlists * const execlists = &engine->execlists;
+
+ /*
+ * After a GPU reset, we may have requests to replay. Do so now while
+ * we still have the forcewake to be sure that the GPU is not allowed
+ * to sleep before we restart and reload a context.
+ *
+ * If the GPU reset fails, the engine may still be alive with requests
+ * inflight. We expect those to complete, or for the device to be
+ * reset as the next level of recovery, and as a final resort we
+ * will declare the device wedged.
+ */
+ GEM_BUG_ON(!reset_in_progress(execlists));
+
+ /* And kick in case we missed a new request submission. */
+ if (__tasklet_enable(&execlists->tasklet))
+ __execlists_kick(execlists);
+
+ ENGINE_TRACE(engine, "depth->%d\n",
+ atomic_read(&execlists->tasklet.count));
+}
+
+static void gen8_logical_ring_enable_irq(struct intel_engine_cs *engine)
+{
+ ENGINE_WRITE(engine, RING_IMR,
+ ~(engine->irq_enable_mask | engine->irq_keep_mask));
+ ENGINE_POSTING_READ(engine, RING_IMR);
+}
+
+static void gen8_logical_ring_disable_irq(struct intel_engine_cs *engine)
+{
+ ENGINE_WRITE(engine, RING_IMR, ~engine->irq_keep_mask);
+}
+
+static void execlists_park(struct intel_engine_cs *engine)
+{
+ cancel_timer(&engine->execlists.timer);
+ cancel_timer(&engine->execlists.preempt);
+}
+
+static bool can_preempt(struct intel_engine_cs *engine)
+{
+ if (INTEL_GEN(engine->i915) > 8)
+ return true;
+
+ /* GPGPU on bdw requires extra w/a; not implemented */
+ return engine->class != RENDER_CLASS;
+}
+
+static void execlists_set_default_submission(struct intel_engine_cs *engine)
+{
+ engine->submit_request = execlists_submit_request;
+ engine->schedule = i915_schedule;
+ engine->execlists.tasklet.func = execlists_submission_tasklet;
+
+ engine->reset.prepare = execlists_reset_prepare;
+ engine->reset.rewind = execlists_reset_rewind;
+ engine->reset.cancel = execlists_reset_cancel;
+ engine->reset.finish = execlists_reset_finish;
+
+ engine->park = execlists_park;
+ engine->unpark = NULL;
+
+ engine->flags |= I915_ENGINE_SUPPORTS_STATS;
+ if (!intel_vgpu_active(engine->i915)) {
+ engine->flags |= I915_ENGINE_HAS_SEMAPHORES;
+ if (can_preempt(engine)) {
+ engine->flags |= I915_ENGINE_HAS_PREEMPTION;
+ if (IS_ACTIVE(CONFIG_DRM_I915_TIMESLICE_DURATION))
+ engine->flags |= I915_ENGINE_HAS_TIMESLICES;
+ }
+ }
+
+ if (intel_engine_has_preemption(engine))
+ engine->emit_bb_start = gen8_emit_bb_start;
+ else
+ engine->emit_bb_start = gen8_emit_bb_start_noarb;
+}
+
+static void execlists_shutdown(struct intel_engine_cs *engine)
+{
+ /* Synchronise with residual timers and any softirq they raise */
+ del_timer_sync(&engine->execlists.timer);
+ del_timer_sync(&engine->execlists.preempt);
+ tasklet_kill(&engine->execlists.tasklet);
+}
+
+static void execlists_release(struct intel_engine_cs *engine)
+{
+ engine->sanitize = NULL; /* no longer in control, nothing to sanitize */
+
+ execlists_shutdown(engine);
+
+ intel_engine_cleanup_common(engine);
+ lrc_fini_wa_ctx(engine);
+}
+
+static void
+logical_ring_default_vfuncs(struct intel_engine_cs *engine)
+{
+ /* Default vfuncs which can be overriden by each engine. */
+
+ engine->resume = execlists_resume;
+
+ engine->cops = &execlists_context_ops;
+ engine->request_alloc = execlists_request_alloc;
+
+ engine->emit_flush = gen8_emit_flush_xcs;
+ engine->emit_init_breadcrumb = gen8_emit_init_breadcrumb;
+ engine->emit_fini_breadcrumb = gen8_emit_fini_breadcrumb_xcs;
+ if (INTEL_GEN(engine->i915) >= 12) {
+ engine->emit_fini_breadcrumb = gen12_emit_fini_breadcrumb_xcs;
+ engine->emit_flush = gen12_emit_flush_xcs;
+ }
+ engine->set_default_submission = execlists_set_default_submission;
+
+ if (INTEL_GEN(engine->i915) < 11) {
+ engine->irq_enable = gen8_logical_ring_enable_irq;
+ engine->irq_disable = gen8_logical_ring_disable_irq;
+ } else {
+ /*
+ * TODO: On Gen11 interrupt masks need to be clear
+ * to allow C6 entry. Keep interrupts enabled at
+ * and take the hit of generating extra interrupts
+ * until a more refined solution exists.
+ */
+ }
+}
+
+static void logical_ring_default_irqs(struct intel_engine_cs *engine)
+{
+ unsigned int shift = 0;
+
+ if (INTEL_GEN(engine->i915) < 11) {
+ const u8 irq_shifts[] = {
+ [RCS0] = GEN8_RCS_IRQ_SHIFT,
+ [BCS0] = GEN8_BCS_IRQ_SHIFT,
+ [VCS0] = GEN8_VCS0_IRQ_SHIFT,
+ [VCS1] = GEN8_VCS1_IRQ_SHIFT,
+ [VECS0] = GEN8_VECS_IRQ_SHIFT,
+ };
+
+ shift = irq_shifts[engine->id];
+ }
+
+ engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << shift;
+ engine->irq_keep_mask = GT_CONTEXT_SWITCH_INTERRUPT << shift;
+ engine->irq_keep_mask |= GT_CS_MASTER_ERROR_INTERRUPT << shift;
+ engine->irq_keep_mask |= GT_WAIT_SEMAPHORE_INTERRUPT << shift;
+}
+
+static void rcs_submission_override(struct intel_engine_cs *engine)
+{
+ switch (INTEL_GEN(engine->i915)) {
+ case 12:
+ engine->emit_flush = gen12_emit_flush_rcs;
+ engine->emit_fini_breadcrumb = gen12_emit_fini_breadcrumb_rcs;
+ break;
+ case 11:
+ engine->emit_flush = gen11_emit_flush_rcs;
+ engine->emit_fini_breadcrumb = gen11_emit_fini_breadcrumb_rcs;
+ break;
+ default:
+ engine->emit_flush = gen8_emit_flush_rcs;
+ engine->emit_fini_breadcrumb = gen8_emit_fini_breadcrumb_rcs;
+ break;
+ }
+}
+
+int intel_execlists_submission_setup(struct intel_engine_cs *engine)
+{
+ struct intel_engine_execlists * const execlists = &engine->execlists;
+ struct drm_i915_private *i915 = engine->i915;
+ struct intel_uncore *uncore = engine->uncore;
+ u32 base = engine->mmio_base;
+
+ tasklet_init(&engine->execlists.tasklet,
+ execlists_submission_tasklet, (unsigned long)engine);
+ timer_setup(&engine->execlists.timer, execlists_timeslice, 0);
+ timer_setup(&engine->execlists.preempt, execlists_preempt, 0);
+
+ logical_ring_default_vfuncs(engine);
+ logical_ring_default_irqs(engine);
+
+ if (engine->class == RENDER_CLASS)
+ rcs_submission_override(engine);
+
+ lrc_init_wa_ctx(engine);
+
+ if (HAS_LOGICAL_RING_ELSQ(i915)) {
+ execlists->submit_reg = uncore->regs +
+ i915_mmio_reg_offset(RING_EXECLIST_SQ_CONTENTS(base));
+ execlists->ctrl_reg = uncore->regs +
+ i915_mmio_reg_offset(RING_EXECLIST_CONTROL(base));
+ } else {
+ execlists->submit_reg = uncore->regs +
+ i915_mmio_reg_offset(RING_ELSP(base));
+ }
+
+ execlists->csb_status =
+ (u64 *)&engine->status_page.addr[I915_HWS_CSB_BUF0_INDEX];
+
+ execlists->csb_write =
+ &engine->status_page.addr[intel_hws_csb_write_index(i915)];
+
+ if (INTEL_GEN(i915) < 11)
+ execlists->csb_size = GEN8_CSB_ENTRIES;
+ else
+ execlists->csb_size = GEN11_CSB_ENTRIES;
+
+ engine->context_tag = GENMASK(BITS_PER_LONG - 2, 0);
+ if (INTEL_GEN(engine->i915) >= 11) {
+ execlists->ccid |= engine->instance << (GEN11_ENGINE_INSTANCE_SHIFT - 32);
+ execlists->ccid |= engine->class << (GEN11_ENGINE_CLASS_SHIFT - 32);
+ }
+
+ /* Finally, take ownership and responsibility for cleanup! */
+ engine->sanitize = execlists_sanitize;
+ engine->release = execlists_release;
+
+ return 0;
+}
+
+static struct list_head *virtual_queue(struct virtual_engine *ve)
+{
+ return &ve->base.execlists.default_priolist.requests[0];
+}
+
+static void rcu_virtual_context_destroy(struct work_struct *wrk)
+{
+ struct virtual_engine *ve =
+ container_of(wrk, typeof(*ve), rcu.work);
+ unsigned int n;
+
+ GEM_BUG_ON(ve->context.inflight);
+
+ /* Preempt-to-busy may leave a stale request behind. */
+ if (unlikely(ve->request)) {
+ struct i915_request *old;
+
+ spin_lock_irq(&ve->base.active.lock);
+
+ old = fetch_and_zero(&ve->request);
+ if (old) {
+ GEM_BUG_ON(!__i915_request_is_complete(old));
+ __i915_request_submit(old);
+ i915_request_put(old);
+ }
+
+ spin_unlock_irq(&ve->base.active.lock);
+ }
+
+ /*
+ * Flush the tasklet in case it is still running on another core.
+ *
+ * This needs to be done before we remove ourselves from the siblings'
+ * rbtrees as in the case it is running in parallel, it may reinsert
+ * the rb_node into a sibling.
+ */
+ tasklet_kill(&ve->base.execlists.tasklet);
+
+ /* Decouple ourselves from the siblings, no more access allowed. */
+ for (n = 0; n < ve->num_siblings; n++) {
+ struct intel_engine_cs *sibling = ve->siblings[n];
+ struct rb_node *node = &ve->nodes[sibling->id].rb;
+
+ if (RB_EMPTY_NODE(node))
+ continue;
+
+ spin_lock_irq(&sibling->active.lock);
+
+ /* Detachment is lazily performed in the execlists tasklet */
+ if (!RB_EMPTY_NODE(node))
+ rb_erase_cached(node, &sibling->execlists.virtual);
+
+ spin_unlock_irq(&sibling->active.lock);
+ }
+ GEM_BUG_ON(__tasklet_is_scheduled(&ve->base.execlists.tasklet));
+ GEM_BUG_ON(!list_empty(virtual_queue(ve)));
+
+ lrc_fini(&ve->context);
+ intel_context_fini(&ve->context);
+
+ intel_breadcrumbs_free(ve->base.breadcrumbs);
+ intel_engine_free_request_pool(&ve->base);
+
+ kfree(ve->bonds);
+ kfree(ve);
+}
+
+static void virtual_context_destroy(struct kref *kref)
+{
+ struct virtual_engine *ve =
+ container_of(kref, typeof(*ve), context.ref);
+
+ GEM_BUG_ON(!list_empty(&ve->context.signals));
+
+ /*
+ * When destroying the virtual engine, we have to be aware that
+ * it may still be in use from an hardirq/softirq context causing
+ * the resubmission of a completed request (background completion
+ * due to preempt-to-busy). Before we can free the engine, we need
+ * to flush the submission code and tasklets that are still potentially
+ * accessing the engine. Flushing the tasklets requires process context,
+ * and since we can guard the resubmit onto the engine with an RCU read
+ * lock, we can delegate the free of the engine to an RCU worker.
+ */
+ INIT_RCU_WORK(&ve->rcu, rcu_virtual_context_destroy);
+ queue_rcu_work(system_wq, &ve->rcu);
+}
+
+static void virtual_engine_initial_hint(struct virtual_engine *ve)
+{
+ int swp;
+
+ /*
+ * Pick a random sibling on starting to help spread the load around.
+ *
+ * New contexts are typically created with exactly the same order
+ * of siblings, and often started in batches. Due to the way we iterate
+ * the array of sibling when submitting requests, sibling[0] is
+ * prioritised for dequeuing. If we make sure that sibling[0] is fairly
+ * randomised across the system, we also help spread the load by the
+ * first engine we inspect being different each time.
+ *
+ * NB This does not force us to execute on this engine, it will just
+ * typically be the first we inspect for submission.
+ */
+ swp = prandom_u32_max(ve->num_siblings);
+ if (swp)
+ swap(ve->siblings[swp], ve->siblings[0]);
+}
+
+static int virtual_context_alloc(struct intel_context *ce)
+{
+ struct virtual_engine *ve = container_of(ce, typeof(*ve), context);
+
+ return lrc_alloc(ce, ve->siblings[0]);
+}
+
+static int virtual_context_pre_pin(struct intel_context *ce,
+ struct i915_gem_ww_ctx *ww,
+ void **vaddr)
+{
+ struct virtual_engine *ve = container_of(ce, typeof(*ve), context);
+
+ /* Note: we must use a real engine class for setting up reg state */
+ return lrc_pre_pin(ce, ve->siblings[0], ww, vaddr);
+}
+
+static int virtual_context_pin(struct intel_context *ce, void *vaddr)
+{
+ struct virtual_engine *ve = container_of(ce, typeof(*ve), context);
+
+ return lrc_pin(ce, ve->siblings[0], vaddr);
+}
+
+static void virtual_context_enter(struct intel_context *ce)
+{
+ struct virtual_engine *ve = container_of(ce, typeof(*ve), context);
+ unsigned int n;
+
+ for (n = 0; n < ve->num_siblings; n++)
+ intel_engine_pm_get(ve->siblings[n]);
+
+ intel_timeline_enter(ce->timeline);
+}
+
+static void virtual_context_exit(struct intel_context *ce)
+{
+ struct virtual_engine *ve = container_of(ce, typeof(*ve), context);
+ unsigned int n;
+
+ intel_timeline_exit(ce->timeline);
+
+ for (n = 0; n < ve->num_siblings; n++)
+ intel_engine_pm_put(ve->siblings[n]);
+}
+
+static const struct intel_context_ops virtual_context_ops = {
+ .flags = COPS_HAS_INFLIGHT,
+
+ .alloc = virtual_context_alloc,
+
+ .pre_pin = virtual_context_pre_pin,
+ .pin = virtual_context_pin,
+ .unpin = lrc_unpin,
+ .post_unpin = lrc_post_unpin,
+
+ .enter = virtual_context_enter,
+ .exit = virtual_context_exit,
+
+ .destroy = virtual_context_destroy,
+};
+
+static intel_engine_mask_t virtual_submission_mask(struct virtual_engine *ve)
+{
+ struct i915_request *rq;
+ intel_engine_mask_t mask;
+
+ rq = READ_ONCE(ve->request);
+ if (!rq)
+ return 0;
+
+ /* The rq is ready for submission; rq->execution_mask is now stable. */
+ mask = rq->execution_mask;
+ if (unlikely(!mask)) {
+ /* Invalid selection, submit to a random engine in error */
+ i915_request_set_error_once(rq, -ENODEV);
+ mask = ve->siblings[0]->mask;
+ }
+
+ ENGINE_TRACE(&ve->base, "rq=%llx:%lld, mask=%x, prio=%d\n",
+ rq->fence.context, rq->fence.seqno,
+ mask, ve->base.execlists.queue_priority_hint);
+
+ return mask;
+}
+
+static void virtual_submission_tasklet(unsigned long data)
+{
+ struct virtual_engine * const ve = (struct virtual_engine *)data;
+ const int prio = READ_ONCE(ve->base.execlists.queue_priority_hint);
+ intel_engine_mask_t mask;
+ unsigned int n;
+
+ rcu_read_lock();
+ mask = virtual_submission_mask(ve);
+ rcu_read_unlock();
+ if (unlikely(!mask))
+ return;
+
+ for (n = 0; n < ve->num_siblings; n++) {
+ struct intel_engine_cs *sibling = READ_ONCE(ve->siblings[n]);
+ struct ve_node * const node = &ve->nodes[sibling->id];
+ struct rb_node **parent, *rb;
+ bool first;
+
+ if (!READ_ONCE(ve->request))
+ break; /* already handled by a sibling's tasklet */
+
+ spin_lock_irq(&sibling->active.lock);
+
+ if (unlikely(!(mask & sibling->mask))) {
+ if (!RB_EMPTY_NODE(&node->rb)) {
+ rb_erase_cached(&node->rb,
+ &sibling->execlists.virtual);
+ RB_CLEAR_NODE(&node->rb);
+ }
+
+ goto unlock_engine;
+ }
+
+ if (unlikely(!RB_EMPTY_NODE(&node->rb))) {
+ /*
+ * Cheat and avoid rebalancing the tree if we can
+ * reuse this node in situ.
+ */
+ first = rb_first_cached(&sibling->execlists.virtual) ==
+ &node->rb;
+ if (prio == node->prio || (prio > node->prio && first))
+ goto submit_engine;
+
+ rb_erase_cached(&node->rb, &sibling->execlists.virtual);
+ }
+
+ rb = NULL;
+ first = true;
+ parent = &sibling->execlists.virtual.rb_root.rb_node;
+ while (*parent) {
+ struct ve_node *other;
+
+ rb = *parent;
+ other = rb_entry(rb, typeof(*other), rb);
+ if (prio > other->prio) {
+ parent = &rb->rb_left;
+ } else {
+ parent = &rb->rb_right;
+ first = false;
+ }
+ }
+
+ rb_link_node(&node->rb, rb, parent);
+ rb_insert_color_cached(&node->rb,
+ &sibling->execlists.virtual,
+ first);
+
+submit_engine:
+ GEM_BUG_ON(RB_EMPTY_NODE(&node->rb));
+ node->prio = prio;
+ if (first && prio > sibling->execlists.queue_priority_hint)
+ tasklet_hi_schedule(&sibling->execlists.tasklet);
+
+unlock_engine:
+ spin_unlock_irq(&sibling->active.lock);
+
+ if (intel_context_inflight(&ve->context))
+ break;
+ }
+}
+
+static void virtual_submit_request(struct i915_request *rq)
+{
+ struct virtual_engine *ve = to_virtual_engine(rq->engine);
+ unsigned long flags;
+
+ ENGINE_TRACE(&ve->base, "rq=%llx:%lld\n",
+ rq->fence.context,
+ rq->fence.seqno);
+
+ GEM_BUG_ON(ve->base.submit_request != virtual_submit_request);
+
+ spin_lock_irqsave(&ve->base.active.lock, flags);
+
+ /* By the time we resubmit a request, it may be completed */
+ if (__i915_request_is_complete(rq)) {
+ __i915_request_submit(rq);
+ goto unlock;
+ }
+
+ if (ve->request) { /* background completion from preempt-to-busy */
+ GEM_BUG_ON(!__i915_request_is_complete(ve->request));
+ __i915_request_submit(ve->request);
+ i915_request_put(ve->request);
+ }
+
+ ve->base.execlists.queue_priority_hint = rq_prio(rq);
+ ve->request = i915_request_get(rq);
+
+ GEM_BUG_ON(!list_empty(virtual_queue(ve)));
+ list_move_tail(&rq->sched.link, virtual_queue(ve));
+
+ tasklet_hi_schedule(&ve->base.execlists.tasklet);
+
+unlock:
+ spin_unlock_irqrestore(&ve->base.active.lock, flags);
+}
+
+static struct ve_bond *
+virtual_find_bond(struct virtual_engine *ve,
+ const struct intel_engine_cs *master)
+{
+ int i;
+
+ for (i = 0; i < ve->num_bonds; i++) {
+ if (ve->bonds[i].master == master)
+ return &ve->bonds[i];
+ }
+
+ return NULL;
+}
+
+static void
+virtual_bond_execute(struct i915_request *rq, struct dma_fence *signal)
+{
+ struct virtual_engine *ve = to_virtual_engine(rq->engine);
+ intel_engine_mask_t allowed, exec;
+ struct ve_bond *bond;
+
+ allowed = ~to_request(signal)->engine->mask;
+
+ bond = virtual_find_bond(ve, to_request(signal)->engine);
+ if (bond)
+ allowed &= bond->sibling_mask;
+
+ /* Restrict the bonded request to run on only the available engines */
+ exec = READ_ONCE(rq->execution_mask);
+ while (!try_cmpxchg(&rq->execution_mask, &exec, exec & allowed))
+ ;
+
+ /* Prevent the master from being re-run on the bonded engines */
+ to_request(signal)->execution_mask &= ~allowed;
+}
+
+struct intel_context *
+intel_execlists_create_virtual(struct intel_engine_cs **siblings,
+ unsigned int count)
+{
+ struct virtual_engine *ve;
+ unsigned int n;
+ int err;
+
+ if (count == 0)
+ return ERR_PTR(-EINVAL);
+
+ if (count == 1)
+ return intel_context_create(siblings[0]);
+
+ ve = kzalloc(struct_size(ve, siblings, count), GFP_KERNEL);
+ if (!ve)
+ return ERR_PTR(-ENOMEM);
+
+ ve->base.i915 = siblings[0]->i915;
+ ve->base.gt = siblings[0]->gt;
+ ve->base.uncore = siblings[0]->uncore;
+ ve->base.id = -1;
+
+ ve->base.class = OTHER_CLASS;
+ ve->base.uabi_class = I915_ENGINE_CLASS_INVALID;
+ ve->base.instance = I915_ENGINE_CLASS_INVALID_VIRTUAL;
+ ve->base.uabi_instance = I915_ENGINE_CLASS_INVALID_VIRTUAL;
+
+ /*
+ * The decision on whether to submit a request using semaphores
+ * depends on the saturated state of the engine. We only compute
+ * this during HW submission of the request, and we need for this
+ * state to be globally applied to all requests being submitted
+ * to this engine. Virtual engines encompass more than one physical
+ * engine and so we cannot accurately tell in advance if one of those
+ * engines is already saturated and so cannot afford to use a semaphore
+ * and be pessimized in priority for doing so -- if we are the only
+ * context using semaphores after all other clients have stopped, we
+ * will be starved on the saturated system. Such a global switch for
+ * semaphores is less than ideal, but alas is the current compromise.
+ */
+ ve->base.saturated = ALL_ENGINES;
+
+ snprintf(ve->base.name, sizeof(ve->base.name), "virtual");
+
+ intel_engine_init_active(&ve->base, ENGINE_VIRTUAL);
+ intel_engine_init_execlists(&ve->base);
+
+ ve->base.cops = &virtual_context_ops;
+ ve->base.request_alloc = execlists_request_alloc;
+
+ ve->base.schedule = i915_schedule;
+ ve->base.submit_request = virtual_submit_request;
+ ve->base.bond_execute = virtual_bond_execute;
+
+ INIT_LIST_HEAD(virtual_queue(ve));
+ ve->base.execlists.queue_priority_hint = INT_MIN;
+ tasklet_init(&ve->base.execlists.tasklet,
+ virtual_submission_tasklet,
+ (unsigned long)ve);
+
+ intel_context_init(&ve->context, &ve->base);
+
+ ve->base.breadcrumbs = intel_breadcrumbs_create(NULL);
+ if (!ve->base.breadcrumbs) {
+ err = -ENOMEM;
+ goto err_put;
+ }
+
+ for (n = 0; n < count; n++) {
+ struct intel_engine_cs *sibling = siblings[n];
+
+ GEM_BUG_ON(!is_power_of_2(sibling->mask));
+ if (sibling->mask & ve->base.mask) {
+ DRM_DEBUG("duplicate %s entry in load balancer\n",
+ sibling->name);
+ err = -EINVAL;
+ goto err_put;
+ }
+
+ /*
+ * The virtual engine implementation is tightly coupled to
+ * the execlists backend -- we push out request directly
+ * into a tree inside each physical engine. We could support
+ * layering if we handle cloning of the requests and
+ * submitting a copy into each backend.
+ */
+ if (sibling->execlists.tasklet.func !=
+ execlists_submission_tasklet) {
+ err = -ENODEV;
+ goto err_put;
+ }
+
+ GEM_BUG_ON(RB_EMPTY_NODE(&ve->nodes[sibling->id].rb));
+ RB_CLEAR_NODE(&ve->nodes[sibling->id].rb);
+
+ ve->siblings[ve->num_siblings++] = sibling;
+ ve->base.mask |= sibling->mask;
+
+ /*
+ * All physical engines must be compatible for their emission
+ * functions (as we build the instructions during request
+ * construction and do not alter them before submission
+ * on the physical engine). We use the engine class as a guide
+ * here, although that could be refined.
+ */
+ if (ve->base.class != OTHER_CLASS) {
+ if (ve->base.class != sibling->class) {
+ DRM_DEBUG("invalid mixing of engine class, sibling %d, already %d\n",
+ sibling->class, ve->base.class);
+ err = -EINVAL;
+ goto err_put;
+ }
+ continue;
+ }
+
+ ve->base.class = sibling->class;
+ ve->base.uabi_class = sibling->uabi_class;
+ snprintf(ve->base.name, sizeof(ve->base.name),
+ "v%dx%d", ve->base.class, count);
+ ve->base.context_size = sibling->context_size;
+
+ ve->base.emit_bb_start = sibling->emit_bb_start;
+ ve->base.emit_flush = sibling->emit_flush;
+ ve->base.emit_init_breadcrumb = sibling->emit_init_breadcrumb;
+ ve->base.emit_fini_breadcrumb = sibling->emit_fini_breadcrumb;
+ ve->base.emit_fini_breadcrumb_dw =
+ sibling->emit_fini_breadcrumb_dw;
+
+ ve->base.flags = sibling->flags;
+ }
+
+ ve->base.flags |= I915_ENGINE_IS_VIRTUAL;
+
+ virtual_engine_initial_hint(ve);
+ return &ve->context;
+
+err_put:
+ intel_context_put(&ve->context);
+ return ERR_PTR(err);
+}
+
+struct intel_context *
+intel_execlists_clone_virtual(struct intel_engine_cs *src)
+{
+ struct virtual_engine *se = to_virtual_engine(src);
+ struct intel_context *dst;
+
+ dst = intel_execlists_create_virtual(se->siblings,
+ se->num_siblings);
+ if (IS_ERR(dst))
+ return dst;
+
+ if (se->num_bonds) {
+ struct virtual_engine *de = to_virtual_engine(dst->engine);
+
+ de->bonds = kmemdup(se->bonds,
+ sizeof(*se->bonds) * se->num_bonds,
+ GFP_KERNEL);
+ if (!de->bonds) {
+ intel_context_put(dst);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ de->num_bonds = se->num_bonds;
+ }
+
+ return dst;
+}
+
+int intel_virtual_engine_attach_bond(struct intel_engine_cs *engine,
+ const struct intel_engine_cs *master,
+ const struct intel_engine_cs *sibling)
+{
+ struct virtual_engine *ve = to_virtual_engine(engine);
+ struct ve_bond *bond;
+ int n;
+
+ /* Sanity check the sibling is part of the virtual engine */
+ for (n = 0; n < ve->num_siblings; n++)
+ if (sibling == ve->siblings[n])
+ break;
+ if (n == ve->num_siblings)
+ return -EINVAL;
+
+ bond = virtual_find_bond(ve, master);
+ if (bond) {
+ bond->sibling_mask |= sibling->mask;
+ return 0;
+ }
+
+ bond = krealloc(ve->bonds,
+ sizeof(*bond) * (ve->num_bonds + 1),
+ GFP_KERNEL);
+ if (!bond)
+ return -ENOMEM;
+
+ bond[ve->num_bonds].master = master;
+ bond[ve->num_bonds].sibling_mask = sibling->mask;
+
+ ve->bonds = bond;
+ ve->num_bonds++;
+
+ return 0;
+}
+
+void intel_execlists_show_requests(struct intel_engine_cs *engine,
+ struct drm_printer *m,
+ void (*show_request)(struct drm_printer *m,
+ const struct i915_request *rq,
+ const char *prefix,
+ int indent),
+ unsigned int max)
+{
+ const struct intel_engine_execlists *execlists = &engine->execlists;
+ struct i915_request *rq, *last;
+ unsigned long flags;
+ unsigned int count;
+ struct rb_node *rb;
+
+ spin_lock_irqsave(&engine->active.lock, flags);
+
+ last = NULL;
+ count = 0;
+ list_for_each_entry(rq, &engine->active.requests, sched.link) {
+ if (count++ < max - 1)
+ show_request(m, rq, "\t\t", 0);
+ else
+ last = rq;
+ }
+ if (last) {
+ if (count > max) {
+ drm_printf(m,
+ "\t\t...skipping %d executing requests...\n",
+ count - max);
+ }
+ show_request(m, last, "\t\t", 0);
+ }
+
+ if (execlists->queue_priority_hint != INT_MIN)
+ drm_printf(m, "\t\tQueue priority hint: %d\n",
+ READ_ONCE(execlists->queue_priority_hint));
+
+ last = NULL;
+ count = 0;
+ for (rb = rb_first_cached(&execlists->queue); rb; rb = rb_next(rb)) {
+ struct i915_priolist *p = rb_entry(rb, typeof(*p), node);
+ int i;
+
+ priolist_for_each_request(rq, p, i) {
+ if (count++ < max - 1)
+ show_request(m, rq, "\t\t", 0);
+ else
+ last = rq;
+ }
+ }
+ if (last) {
+ if (count > max) {
+ drm_printf(m,
+ "\t\t...skipping %d queued requests...\n",
+ count - max);
+ }
+ show_request(m, last, "\t\t", 0);
+ }
+
+ last = NULL;
+ count = 0;
+ for (rb = rb_first_cached(&execlists->virtual); rb; rb = rb_next(rb)) {
+ struct virtual_engine *ve =
+ rb_entry(rb, typeof(*ve), nodes[engine->id].rb);
+ struct i915_request *rq = READ_ONCE(ve->request);
+
+ if (rq) {
+ if (count++ < max - 1)
+ show_request(m, rq, "\t\t", 0);
+ else
+ last = rq;
+ }
+ }
+ if (last) {
+ if (count > max) {
+ drm_printf(m,
+ "\t\t...skipping %d virtual requests...\n",
+ count - max);
+ }
+ show_request(m, last, "\t\t", 0);
+ }
+
+ spin_unlock_irqrestore(&engine->active.lock, flags);
+}
+
+bool
+intel_engine_in_execlists_submission_mode(const struct intel_engine_cs *engine)
+{
+ return engine->set_default_submission ==
+ execlists_set_default_submission;
+}
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftest_execlists.c"
+#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.h b/drivers/gpu/drm/i915/gt/intel_execlists_submission.h
new file mode 100644
index 000000000000..a8fd7adefd82
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2014 Intel Corporation
+ */
+
+#ifndef __INTEL_EXECLISTS_SUBMISSION_H__
+#define __INTEL_EXECLISTS_SUBMISSION_H__
+
+#include <linux/types.h>
+
+struct drm_printer;
+
+struct i915_request;
+struct intel_context;
+struct intel_engine_cs;
+
+enum {
+ INTEL_CONTEXT_SCHEDULE_IN = 0,
+ INTEL_CONTEXT_SCHEDULE_OUT,
+ INTEL_CONTEXT_SCHEDULE_PREEMPTED,
+};
+
+int intel_execlists_submission_setup(struct intel_engine_cs *engine);
+
+void intel_execlists_show_requests(struct intel_engine_cs *engine,
+ struct drm_printer *m,
+ void (*show_request)(struct drm_printer *m,
+ const struct i915_request *rq,
+ const char *prefix,
+ int indent),
+ unsigned int max);
+
+struct intel_context *
+intel_execlists_create_virtual(struct intel_engine_cs **siblings,
+ unsigned int count);
+
+struct intel_context *
+intel_execlists_clone_virtual(struct intel_engine_cs *src);
+
+int intel_virtual_engine_attach_bond(struct intel_engine_cs *engine,
+ const struct intel_engine_cs *master,
+ const struct intel_engine_cs *sibling);
+
+bool
+intel_engine_in_execlists_submission_mode(const struct intel_engine_cs *engine);
+
+#endif /* __INTEL_EXECLISTS_SUBMISSION_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c
index cf94525be2c1..eece0844fbe9 100644
--- a/drivers/gpu/drm/i915/gt/intel_ggtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c
@@ -101,7 +101,16 @@ static bool needs_idle_maps(struct drm_i915_private *i915)
* Query intel_iommu to see if we need the workaround. Presumably that
* was loaded first.
*/
- return IS_GEN(i915, 5) && IS_MOBILE(i915) && intel_vtd_active();
+ if (!intel_vtd_active())
+ return false;
+
+ if (IS_GEN(i915, 5) && IS_MOBILE(i915))
+ return true;
+
+ if (IS_GEN(i915, 12))
+ return true; /* XXX DMAR fault reason 7 */
+
+ return false;
}
void i915_ggtt_suspend(struct i915_ggtt *ggtt)
@@ -1050,7 +1059,12 @@ static int i915_gmch_probe(struct i915_ggtt *ggtt)
ggtt->vm.alloc_pt_dma = alloc_pt_dma;
- ggtt->do_idle_maps = needs_idle_maps(i915);
+ if (needs_idle_maps(i915)) {
+ drm_notice(&i915->drm,
+ "Flushing DMA requests before IOMMU unmaps; performance may be degraded\n");
+ ggtt->do_idle_maps = true;
+ }
+
ggtt->vm.insert_page = i915_ggtt_insert_page;
ggtt->vm.insert_entries = i915_ggtt_insert_entries;
ggtt->vm.clear_range = i915_ggtt_clear_range;
diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
index 7fb36b12fe7a..a357bb431815 100644
--- a/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
@@ -320,13 +320,31 @@ void i915_vma_revoke_fence(struct i915_vma *vma)
fence_write(fence);
}
+static bool fence_is_active(const struct i915_fence_reg *fence)
+{
+ return fence->vma && i915_vma_is_active(fence->vma);
+}
+
static struct i915_fence_reg *fence_find(struct i915_ggtt *ggtt)
{
- struct i915_fence_reg *fence;
+ struct i915_fence_reg *active = NULL;
+ struct i915_fence_reg *fence, *fn;
- list_for_each_entry(fence, &ggtt->fence_list, link) {
+ list_for_each_entry_safe(fence, fn, &ggtt->fence_list, link) {
GEM_BUG_ON(fence->vma && fence->vma->fence != fence);
+ if (fence == active) /* now seen this fence twice */
+ active = ERR_PTR(-EAGAIN);
+
+ /* Prefer idle fences so we do not have to wait on the GPU */
+ if (active != ERR_PTR(-EAGAIN) && fence_is_active(fence)) {
+ if (!active)
+ active = fence;
+
+ list_move_tail(&fence->link, &ggtt->fence_list);
+ continue;
+ }
+
if (atomic_read(&fence->pin_count))
continue;
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c
index 44f1d51e5ae5..d8e1ab412634 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt.c
@@ -46,6 +46,8 @@ void intel_gt_init_hw_early(struct intel_gt *gt, struct i915_ggtt *ggtt)
int intel_gt_init_mmio(struct intel_gt *gt)
{
+ intel_gt_init_clock_frequency(gt);
+
intel_uc_init_mmio(&gt->uc);
intel_sseu_info_init(gt);
@@ -546,8 +548,6 @@ int intel_gt_init(struct intel_gt *gt)
*/
intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
- intel_gt_init_clock_frequency(gt);
-
err = intel_gt_init_scratch(gt, IS_GEN(gt->i915, 2) ? SZ_256K : SZ_4K);
if (err)
goto out_fw;
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c b/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c
index 104cb30e8c13..06d84cf09570 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c
@@ -145,7 +145,8 @@ static void pool_retire(struct i915_active *ref)
}
static struct intel_gt_buffer_pool_node *
-node_create(struct intel_gt_buffer_pool *pool, size_t sz)
+node_create(struct intel_gt_buffer_pool *pool, size_t sz,
+ enum i915_map_type type)
{
struct intel_gt *gt = to_gt(pool);
struct intel_gt_buffer_pool_node *node;
@@ -169,12 +170,14 @@ node_create(struct intel_gt_buffer_pool *pool, size_t sz)
i915_gem_object_set_readonly(obj);
+ node->type = type;
node->obj = obj;
return node;
}
struct intel_gt_buffer_pool_node *
-intel_gt_get_buffer_pool(struct intel_gt *gt, size_t size)
+intel_gt_get_buffer_pool(struct intel_gt *gt, size_t size,
+ enum i915_map_type type)
{
struct intel_gt_buffer_pool *pool = &gt->buffer_pool;
struct intel_gt_buffer_pool_node *node;
@@ -191,6 +194,9 @@ intel_gt_get_buffer_pool(struct intel_gt *gt, size_t size)
if (node->obj->base.size < size)
continue;
+ if (node->type != type)
+ continue;
+
age = READ_ONCE(node->age);
if (!age)
continue;
@@ -205,7 +211,7 @@ intel_gt_get_buffer_pool(struct intel_gt *gt, size_t size)
rcu_read_unlock();
if (&node->link == list) {
- node = node_create(pool, size);
+ node = node_create(pool, size, type);
if (IS_ERR(node))
return node;
}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.h b/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.h
index 42cbac003e8a..6068f8f1762e 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.h
@@ -15,7 +15,8 @@ struct intel_gt;
struct i915_request;
struct intel_gt_buffer_pool_node *
-intel_gt_get_buffer_pool(struct intel_gt *gt, size_t size);
+intel_gt_get_buffer_pool(struct intel_gt *gt, size_t size,
+ enum i915_map_type type);
static inline int
intel_gt_buffer_pool_mark_active(struct intel_gt_buffer_pool_node *node,
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool_types.h b/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool_types.h
index bcf1658c9633..d8d82c890da8 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool_types.h
@@ -11,10 +11,9 @@
#include <linux/spinlock.h>
#include <linux/workqueue.h>
+#include "gem/i915_gem_object_types.h"
#include "i915_active_types.h"
-struct drm_i915_gem_object;
-
struct intel_gt_buffer_pool {
spinlock_t lock;
struct list_head cache_list[4];
@@ -31,6 +30,7 @@ struct intel_gt_buffer_pool_node {
struct rcu_head rcu;
};
unsigned long age;
+ enum i915_map_type type;
};
#endif /* INTEL_GT_BUFFER_POOL_TYPES_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c b/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c
index 999079686846..a4242ca8dcd7 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c
@@ -7,34 +7,146 @@
#include "intel_gt.h"
#include "intel_gt_clock_utils.h"
-#define MHZ_12 12000000 /* 12MHz (24MHz/2), 83.333ns */
-#define MHZ_12_5 12500000 /* 12.5MHz (25MHz/2), 80ns */
-#define MHZ_19_2 19200000 /* 19.2MHz, 52.083ns */
+static u32 read_reference_ts_freq(struct intel_uncore *uncore)
+{
+ u32 ts_override = intel_uncore_read(uncore, GEN9_TIMESTAMP_OVERRIDE);
+ u32 base_freq, frac_freq;
+
+ base_freq = ((ts_override & GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DIVIDER_MASK) >>
+ GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DIVIDER_SHIFT) + 1;
+ base_freq *= 1000000;
+
+ frac_freq = ((ts_override &
+ GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DENOMINATOR_MASK) >>
+ GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DENOMINATOR_SHIFT);
+ frac_freq = 1000000 / (frac_freq + 1);
+
+ return base_freq + frac_freq;
+}
+
+static u32 gen10_get_crystal_clock_freq(struct intel_uncore *uncore,
+ u32 rpm_config_reg)
+{
+ u32 f19_2_mhz = 19200000;
+ u32 f24_mhz = 24000000;
+ u32 crystal_clock =
+ (rpm_config_reg & GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK) >>
+ GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT;
-static u32 read_clock_frequency(const struct intel_gt *gt)
+ switch (crystal_clock) {
+ case GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_19_2_MHZ:
+ return f19_2_mhz;
+ case GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_24_MHZ:
+ return f24_mhz;
+ default:
+ MISSING_CASE(crystal_clock);
+ return 0;
+ }
+}
+
+static u32 gen11_get_crystal_clock_freq(struct intel_uncore *uncore,
+ u32 rpm_config_reg)
{
- if (INTEL_GEN(gt->i915) >= 11) {
- u32 config;
-
- config = intel_uncore_read(gt->uncore, RPM_CONFIG0);
- config &= GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK;
- config >>= GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT;
-
- switch (config) {
- case 0: return MHZ_12;
- case 1:
- case 2: return MHZ_19_2;
- default:
- case 3: return MHZ_12_5;
+ u32 f19_2_mhz = 19200000;
+ u32 f24_mhz = 24000000;
+ u32 f25_mhz = 25000000;
+ u32 f38_4_mhz = 38400000;
+ u32 crystal_clock =
+ (rpm_config_reg & GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK) >>
+ GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT;
+
+ switch (crystal_clock) {
+ case GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_24_MHZ:
+ return f24_mhz;
+ case GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_19_2_MHZ:
+ return f19_2_mhz;
+ case GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_38_4_MHZ:
+ return f38_4_mhz;
+ case GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_25_MHZ:
+ return f25_mhz;
+ default:
+ MISSING_CASE(crystal_clock);
+ return 0;
+ }
+}
+
+static u32 read_clock_frequency(struct intel_uncore *uncore)
+{
+ u32 f12_5_mhz = 12500000;
+ u32 f19_2_mhz = 19200000;
+ u32 f24_mhz = 24000000;
+
+ if (INTEL_GEN(uncore->i915) <= 4) {
+ /*
+ * PRMs say:
+ *
+ * "The value in this register increments once every 16
+ * hclks." (through the “Clocking Configuration”
+ * (“CLKCFG”) MCHBAR register)
+ */
+ return RUNTIME_INFO(uncore->i915)->rawclk_freq * 1000 / 16;
+ } else if (INTEL_GEN(uncore->i915) <= 8) {
+ /*
+ * PRMs say:
+ *
+ * "The PCU TSC counts 10ns increments; this timestamp
+ * reflects bits 38:3 of the TSC (i.e. 80ns granularity,
+ * rolling over every 1.5 hours).
+ */
+ return f12_5_mhz;
+ } else if (INTEL_GEN(uncore->i915) <= 9) {
+ u32 ctc_reg = intel_uncore_read(uncore, CTC_MODE);
+ u32 freq = 0;
+
+ if ((ctc_reg & CTC_SOURCE_PARAMETER_MASK) == CTC_SOURCE_DIVIDE_LOGIC) {
+ freq = read_reference_ts_freq(uncore);
+ } else {
+ freq = IS_GEN9_LP(uncore->i915) ? f19_2_mhz : f24_mhz;
+
+ /*
+ * Now figure out how the command stream's timestamp
+ * register increments from this frequency (it might
+ * increment only every few clock cycle).
+ */
+ freq >>= 3 - ((ctc_reg & CTC_SHIFT_PARAMETER_MASK) >>
+ CTC_SHIFT_PARAMETER_SHIFT);
}
- } else if (INTEL_GEN(gt->i915) >= 9) {
- if (IS_GEN9_LP(gt->i915))
- return MHZ_19_2;
- else
- return MHZ_12;
- } else {
- return MHZ_12_5;
+
+ return freq;
+ } else if (INTEL_GEN(uncore->i915) <= 12) {
+ u32 ctc_reg = intel_uncore_read(uncore, CTC_MODE);
+ u32 freq = 0;
+
+ /*
+ * First figure out the reference frequency. There are 2 ways
+ * we can compute the frequency, either through the
+ * TIMESTAMP_OVERRIDE register or through RPM_CONFIG. CTC_MODE
+ * tells us which one we should use.
+ */
+ if ((ctc_reg & CTC_SOURCE_PARAMETER_MASK) == CTC_SOURCE_DIVIDE_LOGIC) {
+ freq = read_reference_ts_freq(uncore);
+ } else {
+ u32 c0 = intel_uncore_read(uncore, RPM_CONFIG0);
+
+ if (INTEL_GEN(uncore->i915) <= 10)
+ freq = gen10_get_crystal_clock_freq(uncore, c0);
+ else
+ freq = gen11_get_crystal_clock_freq(uncore, c0);
+
+ /*
+ * Now figure out how the command stream's timestamp
+ * register increments from this frequency (it might
+ * increment only every few clock cycle).
+ */
+ freq >>= 3 - ((c0 & GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_MASK) >>
+ GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_SHIFT);
+ }
+
+ return freq;
}
+
+ MISSING_CASE("Unknown gen, unable to read command streamer timestamp frequency\n");
+ return 0;
}
void intel_gt_init_clock_frequency(struct intel_gt *gt)
@@ -43,20 +155,27 @@ void intel_gt_init_clock_frequency(struct intel_gt *gt)
* Note that on gen11+, the clock frequency may be reconfigured.
* We do not, and we assume nobody else does.
*/
- gt->clock_frequency = read_clock_frequency(gt);
+ gt->clock_frequency = read_clock_frequency(gt->uncore);
+ if (gt->clock_frequency)
+ gt->clock_period_ns = intel_gt_clock_interval_to_ns(gt, 1);
+
GT_TRACE(gt,
- "Using clock frequency: %dkHz\n",
- gt->clock_frequency / 1000);
+ "Using clock frequency: %dkHz, period: %dns, wrap: %lldms\n",
+ gt->clock_frequency / 1000,
+ gt->clock_period_ns,
+ div_u64(mul_u32_u32(gt->clock_period_ns, S32_MAX),
+ USEC_PER_SEC));
+
}
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
void intel_gt_check_clock_frequency(const struct intel_gt *gt)
{
- if (gt->clock_frequency != read_clock_frequency(gt)) {
+ if (gt->clock_frequency != read_clock_frequency(gt->uncore)) {
dev_err(gt->i915->drm.dev,
"GT clock frequency changed, was %uHz, now %uHz!\n",
gt->clock_frequency,
- read_clock_frequency(gt));
+ read_clock_frequency(gt->uncore));
}
}
#endif
@@ -66,26 +185,24 @@ static u64 div_u64_roundup(u64 nom, u32 den)
return div_u64(nom + den - 1, den);
}
-u32 intel_gt_clock_interval_to_ns(const struct intel_gt *gt, u32 count)
+u64 intel_gt_clock_interval_to_ns(const struct intel_gt *gt, u64 count)
{
- return div_u64_roundup(mul_u32_u32(count, 1000 * 1000 * 1000),
- gt->clock_frequency);
+ return div_u64_roundup(count * NSEC_PER_SEC, gt->clock_frequency);
}
-u32 intel_gt_pm_interval_to_ns(const struct intel_gt *gt, u32 count)
+u64 intel_gt_pm_interval_to_ns(const struct intel_gt *gt, u64 count)
{
return intel_gt_clock_interval_to_ns(gt, 16 * count);
}
-u32 intel_gt_ns_to_clock_interval(const struct intel_gt *gt, u32 ns)
+u64 intel_gt_ns_to_clock_interval(const struct intel_gt *gt, u64 ns)
{
- return div_u64_roundup(mul_u32_u32(gt->clock_frequency, ns),
- 1000 * 1000 * 1000);
+ return div_u64_roundup(gt->clock_frequency * ns, NSEC_PER_SEC);
}
-u32 intel_gt_ns_to_pm_interval(const struct intel_gt *gt, u32 ns)
+u64 intel_gt_ns_to_pm_interval(const struct intel_gt *gt, u64 ns)
{
- u32 val;
+ u64 val;
/*
* Make these a multiple of magic 25 to avoid SNB (eg. Dell XPS
@@ -94,9 +211,9 @@ u32 intel_gt_ns_to_pm_interval(const struct intel_gt *gt, u32 ns)
* EI/thresholds are "bad", leading to a very sluggish or even
* frozen machine.
*/
- val = DIV_ROUND_UP(intel_gt_ns_to_clock_interval(gt, ns), 16);
+ val = div_u64_roundup(intel_gt_ns_to_clock_interval(gt, ns), 16);
if (IS_GEN(gt->i915, 6))
- val = roundup(val, 25);
+ val = div_u64_roundup(val, 25) * 25;
return val;
}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.h b/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.h
index f793c89f2cbd..8b03e97a85df 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.h
@@ -18,10 +18,10 @@ void intel_gt_check_clock_frequency(const struct intel_gt *gt);
static inline void intel_gt_check_clock_frequency(const struct intel_gt *gt) {}
#endif
-u32 intel_gt_clock_interval_to_ns(const struct intel_gt *gt, u32 count);
-u32 intel_gt_pm_interval_to_ns(const struct intel_gt *gt, u32 count);
+u64 intel_gt_clock_interval_to_ns(const struct intel_gt *gt, u64 count);
+u64 intel_gt_pm_interval_to_ns(const struct intel_gt *gt, u64 count);
-u32 intel_gt_ns_to_clock_interval(const struct intel_gt *gt, u32 ns);
-u32 intel_gt_ns_to_pm_interval(const struct intel_gt *gt, u32 ns);
+u64 intel_gt_ns_to_clock_interval(const struct intel_gt *gt, u64 ns);
+u64 intel_gt_ns_to_pm_interval(const struct intel_gt *gt, u64 ns);
#endif /* __INTEL_GT_CLOCK_UTILS_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_irq.c b/drivers/gpu/drm/i915/gt/intel_gt_irq.c
index 257063a57101..9830342aa6f4 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_irq.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_irq.c
@@ -11,6 +11,7 @@
#include "intel_breadcrumbs.h"
#include "intel_gt.h"
#include "intel_gt_irq.h"
+#include "intel_lrc_reg.h"
#include "intel_uncore.h"
#include "intel_rps.h"
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
index 274aa0dd7050..c94e8ac884eb 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
@@ -39,6 +39,28 @@ static void user_forcewake(struct intel_gt *gt, bool suspend)
intel_gt_pm_put(gt);
}
+static void runtime_begin(struct intel_gt *gt)
+{
+ local_irq_disable();
+ write_seqcount_begin(&gt->stats.lock);
+ gt->stats.start = ktime_get();
+ gt->stats.active = true;
+ write_seqcount_end(&gt->stats.lock);
+ local_irq_enable();
+}
+
+static void runtime_end(struct intel_gt *gt)
+{
+ local_irq_disable();
+ write_seqcount_begin(&gt->stats.lock);
+ gt->stats.active = false;
+ gt->stats.total =
+ ktime_add(gt->stats.total,
+ ktime_sub(ktime_get(), gt->stats.start));
+ write_seqcount_end(&gt->stats.lock);
+ local_irq_enable();
+}
+
static int __gt_unpark(struct intel_wakeref *wf)
{
struct intel_gt *gt = container_of(wf, typeof(*gt), wakeref);
@@ -67,6 +89,7 @@ static int __gt_unpark(struct intel_wakeref *wf)
i915_pmu_gt_unparked(i915);
intel_gt_unpark_requests(gt);
+ runtime_begin(gt);
return 0;
}
@@ -79,6 +102,7 @@ static int __gt_park(struct intel_wakeref *wf)
GT_TRACE(gt, "\n");
+ runtime_end(gt);
intel_gt_park_requests(gt);
i915_vma_parked(gt);
@@ -106,6 +130,7 @@ static const struct intel_wakeref_ops wf_ops = {
void intel_gt_pm_init_early(struct intel_gt *gt)
{
intel_wakeref_init(&gt->wakeref, gt->uncore->rpm, &wf_ops);
+ seqcount_mutex_init(&gt->stats.lock, &gt->wakeref.mutex);
}
void intel_gt_pm_init(struct intel_gt *gt)
@@ -339,6 +364,30 @@ int intel_gt_runtime_resume(struct intel_gt *gt)
return intel_uc_runtime_resume(&gt->uc);
}
+static ktime_t __intel_gt_get_awake_time(const struct intel_gt *gt)
+{
+ ktime_t total = gt->stats.total;
+
+ if (gt->stats.active)
+ total = ktime_add(total,
+ ktime_sub(ktime_get(), gt->stats.start));
+
+ return total;
+}
+
+ktime_t intel_gt_get_awake_time(const struct intel_gt *gt)
+{
+ unsigned int seq;
+ ktime_t total;
+
+ do {
+ seq = read_seqcount_begin(&gt->stats.lock);
+ total = __intel_gt_get_awake_time(gt);
+ } while (read_seqcount_retry(&gt->stats.lock, seq));
+
+ return total;
+}
+
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftest_gt_pm.c"
#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.h b/drivers/gpu/drm/i915/gt/intel_gt_pm.h
index 60f0e2fbe55c..63846a856e7e 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.h
@@ -58,6 +58,8 @@ int intel_gt_resume(struct intel_gt *gt);
void intel_gt_runtime_suspend(struct intel_gt *gt);
int intel_gt_runtime_resume(struct intel_gt *gt);
+ktime_t intel_gt_get_awake_time(const struct intel_gt *gt);
+
static inline bool is_mock_gt(const struct intel_gt *gt)
{
return I915_SELFTEST_ONLY(gt->awake == -ENODEV);
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_requests.c b/drivers/gpu/drm/i915/gt/intel_gt_requests.c
index 66fcbf9d0fdd..dc06c78c9eeb 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_requests.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_requests.c
@@ -135,13 +135,8 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout)
struct intel_gt_timelines *timelines = &gt->timelines;
struct intel_timeline *tl, *tn;
unsigned long active_count = 0;
- bool interruptible;
LIST_HEAD(free);
- interruptible = true;
- if (unlikely(timeout < 0))
- timeout = -timeout, interruptible = false;
-
flush_submission(gt, timeout); /* kick the ksoftirqd tasklets */
spin_lock(&timelines->lock);
list_for_each_entry_safe(tl, tn, &timelines->active_list, link) {
@@ -163,7 +158,7 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout)
mutex_unlock(&tl->mutex);
timeout = dma_fence_wait_timeout(fence,
- interruptible,
+ true,
timeout);
dma_fence_put(fence);
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_types.h b/drivers/gpu/drm/i915/gt/intel_gt_types.h
index 6d39a4a11bf3..a83d3e18254d 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_types.h
@@ -75,6 +75,7 @@ struct intel_gt {
intel_wakeref_t awake;
u32 clock_frequency;
+ u32 clock_period_ns;
struct intel_llc llc;
struct intel_rc6 rc6;
@@ -87,6 +88,30 @@ struct intel_gt {
u32 pm_guc_events;
+ struct {
+ bool active;
+
+ /**
+ * @lock: Lock protecting the below fields.
+ */
+ seqcount_mutex_t lock;
+
+ /**
+ * @total: Total time this engine was busy.
+ *
+ * Accumulated time not counting the most recent block in cases
+ * where engine is currently busy (active > 0).
+ */
+ ktime_t total;
+
+ /**
+ * @start: Timestamp of the last idle to active transition.
+ *
+ * Idle is defined as active == 0, active is active > 0.
+ */
+ ktime_t start;
+ } stats;
+
struct intel_engine_cs *engine[I915_NUM_ENGINES];
struct intel_engine_cs *engine_class[MAX_ENGINE_CLASS + 1]
[MAX_ENGINE_INSTANCE + 1];
diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.c b/drivers/gpu/drm/i915/gt/intel_gtt.c
index 7bfe9072be9a..04aa6601e984 100644
--- a/drivers/gpu/drm/i915/gt/intel_gtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gtt.c
@@ -422,6 +422,35 @@ void setup_private_pat(struct intel_uncore *uncore)
bdw_setup_private_ppat(uncore);
}
+struct i915_vma *
+__vm_create_scratch_for_read(struct i915_address_space *vm, unsigned long size)
+{
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ int err;
+
+ obj = i915_gem_object_create_internal(vm->i915, PAGE_ALIGN(size));
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
+
+ i915_gem_object_set_cache_coherency(obj, I915_CACHING_CACHED);
+
+ vma = i915_vma_instance(obj, vm, NULL);
+ if (IS_ERR(vma)) {
+ i915_gem_object_put(obj);
+ return vma;
+ }
+
+ err = i915_vma_pin(vma, 0, 0,
+ i915_vma_is_ggtt(vma) ? PIN_GLOBAL : PIN_USER);
+ if (err) {
+ i915_vma_put(vma);
+ return ERR_PTR(err);
+ }
+
+ return vma;
+}
+
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/mock_gtt.c"
#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.h b/drivers/gpu/drm/i915/gt/intel_gtt.h
index 8a33940a71f3..29c10fde8ce3 100644
--- a/drivers/gpu/drm/i915/gt/intel_gtt.h
+++ b/drivers/gpu/drm/i915/gt/intel_gtt.h
@@ -573,6 +573,9 @@ int i915_vm_pin_pt_stash(struct i915_address_space *vm,
void i915_vm_free_pt_stash(struct i915_address_space *vm,
struct i915_vm_pt_stash *stash);
+struct i915_vma *
+__vm_create_scratch_for_read(struct i915_address_space *vm, unsigned long size);
+
static inline struct sgt_dma {
struct scatterlist *sg;
dma_addr_t dma, max;
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index 7614a3d24fca..94f485b591af 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -1,599 +1,23 @@
+// SPDX-License-Identifier: MIT
/*
* Copyright © 2014 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- * Authors:
- * Ben Widawsky <[email protected]>
- * Michel Thierry <[email protected]>
- * Thomas Daniel <[email protected]>
- * Oscar Mateo <[email protected]>
- *
- */
-
-/**
- * DOC: Logical Rings, Logical Ring Contexts and Execlists
- *
- * Motivation:
- * GEN8 brings an expansion of the HW contexts: "Logical Ring Contexts".
- * These expanded contexts enable a number of new abilities, especially
- * "Execlists" (also implemented in this file).
- *
- * One of the main differences with the legacy HW contexts is that logical
- * ring contexts incorporate many more things to the context's state, like
- * PDPs or ringbuffer control registers:
- *
- * The reason why PDPs are included in the context is straightforward: as
- * PPGTTs (per-process GTTs) are actually per-context, having the PDPs
- * contained there mean you don't need to do a ppgtt->switch_mm yourself,
- * instead, the GPU will do it for you on the context switch.
- *
- * But, what about the ringbuffer control registers (head, tail, etc..)?
- * shouldn't we just need a set of those per engine command streamer? This is
- * where the name "Logical Rings" starts to make sense: by virtualizing the
- * rings, the engine cs shifts to a new "ring buffer" with every context
- * switch. When you want to submit a workload to the GPU you: A) choose your
- * context, B) find its appropriate virtualized ring, C) write commands to it
- * and then, finally, D) tell the GPU to switch to that context.
- *
- * Instead of the legacy MI_SET_CONTEXT, the way you tell the GPU to switch
- * to a contexts is via a context execution list, ergo "Execlists".
- *
- * LRC implementation:
- * Regarding the creation of contexts, we have:
- *
- * - One global default context.
- * - One local default context for each opened fd.
- * - One local extra context for each context create ioctl call.
- *
- * Now that ringbuffers belong per-context (and not per-engine, like before)
- * and that contexts are uniquely tied to a given engine (and not reusable,
- * like before) we need:
- *
- * - One ringbuffer per-engine inside each context.
- * - One backing object per-engine inside each context.
- *
- * The global default context starts its life with these new objects fully
- * allocated and populated. The local default context for each opened fd is
- * more complex, because we don't know at creation time which engine is going
- * to use them. To handle this, we have implemented a deferred creation of LR
- * contexts:
- *
- * The local context starts its life as a hollow or blank holder, that only
- * gets populated for a given engine once we receive an execbuffer. If later
- * on we receive another execbuffer ioctl for the same context but a different
- * engine, we allocate/populate a new ringbuffer and context backing object and
- * so on.
- *
- * Finally, regarding local contexts created using the ioctl call: as they are
- * only allowed with the render ring, we can allocate & populate them right
- * away (no need to defer anything, at least for now).
- *
- * Execlists implementation:
- * Execlists are the new method by which, on gen8+ hardware, workloads are
- * submitted for execution (as opposed to the legacy, ringbuffer-based, method).
- * This method works as follows:
- *
- * When a request is committed, its commands (the BB start and any leading or
- * trailing commands, like the seqno breadcrumbs) are placed in the ringbuffer
- * for the appropriate context. The tail pointer in the hardware context is not
- * updated at this time, but instead, kept by the driver in the ringbuffer
- * structure. A structure representing this request is added to a request queue
- * for the appropriate engine: this structure contains a copy of the context's
- * tail after the request was written to the ring buffer and a pointer to the
- * context itself.
- *
- * If the engine's request queue was empty before the request was added, the
- * queue is processed immediately. Otherwise the queue will be processed during
- * a context switch interrupt. In any case, elements on the queue will get sent
- * (in pairs) to the GPU's ExecLists Submit Port (ELSP, for short) with a
- * globally unique 20-bits submission ID.
- *
- * When execution of a request completes, the GPU updates the context status
- * buffer with a context complete event and generates a context switch interrupt.
- * During the interrupt handling, the driver examines the events in the buffer:
- * for each context complete event, if the announced ID matches that on the head
- * of the request queue, then that request is retired and removed from the queue.
- *
- * After processing, if any requests were retired and the queue is not empty
- * then a new execution list can be submitted. The two requests at the front of
- * the queue are next to be submitted but since a context may not occur twice in
- * an execution list, if subsequent requests have the same ID as the first then
- * the two requests must be combined. This is done simply by discarding requests
- * at the head of the queue until either only one requests is left (in which case
- * we use a NULL second context) or the first two requests have unique IDs.
- *
- * By always executing the first two requests in the queue the driver ensures
- * that the GPU is kept as busy as possible. In the case where a single context
- * completes but a second context is still executing, the request for this second
- * context will be at the head of the queue when we remove the first one. This
- * request will then be resubmitted along with a new request for a different context,
- * which will cause the hardware to continue executing the second request and queue
- * the new request (the GPU detects the condition of a context getting preempted
- * with the same context and optimizes the context switch flow by not doing
- * preemption, but just sampling the new tail pointer).
- *
*/
-#include <linux/interrupt.h>
+#include "gen8_engine_cs.h"
#include "i915_drv.h"
#include "i915_perf.h"
-#include "i915_trace.h"
-#include "i915_vgpu.h"
-#include "intel_breadcrumbs.h"
-#include "intel_context.h"
-#include "intel_engine_pm.h"
+#include "intel_engine.h"
+#include "intel_gpu_commands.h"
#include "intel_gt.h"
-#include "intel_gt_pm.h"
-#include "intel_gt_requests.h"
+#include "intel_lrc.h"
#include "intel_lrc_reg.h"
-#include "intel_mocs.h"
-#include "intel_reset.h"
#include "intel_ring.h"
-#include "intel_workarounds.h"
#include "shmem_utils.h"
-#define RING_EXECLIST_QFULL (1 << 0x2)
-#define RING_EXECLIST1_VALID (1 << 0x3)
-#define RING_EXECLIST0_VALID (1 << 0x4)
-#define RING_EXECLIST_ACTIVE_STATUS (3 << 0xE)
-#define RING_EXECLIST1_ACTIVE (1 << 0x11)
-#define RING_EXECLIST0_ACTIVE (1 << 0x12)
-
-#define GEN8_CTX_STATUS_IDLE_ACTIVE (1 << 0)
-#define GEN8_CTX_STATUS_PREEMPTED (1 << 1)
-#define GEN8_CTX_STATUS_ELEMENT_SWITCH (1 << 2)
-#define GEN8_CTX_STATUS_ACTIVE_IDLE (1 << 3)
-#define GEN8_CTX_STATUS_COMPLETE (1 << 4)
-#define GEN8_CTX_STATUS_LITE_RESTORE (1 << 15)
-
-#define GEN8_CTX_STATUS_COMPLETED_MASK \
- (GEN8_CTX_STATUS_COMPLETE | GEN8_CTX_STATUS_PREEMPTED)
-
-#define CTX_DESC_FORCE_RESTORE BIT_ULL(2)
-
-#define GEN12_CTX_STATUS_SWITCHED_TO_NEW_QUEUE (0x1) /* lower csb dword */
-#define GEN12_CTX_SWITCH_DETAIL(csb_dw) ((csb_dw) & 0xF) /* upper csb dword */
-#define GEN12_CSB_SW_CTX_ID_MASK GENMASK(25, 15)
-#define GEN12_IDLE_CTX_ID 0x7FF
-#define GEN12_CSB_CTX_VALID(csb_dw) \
- (FIELD_GET(GEN12_CSB_SW_CTX_ID_MASK, csb_dw) != GEN12_IDLE_CTX_ID)
-
-/* Typical size of the average request (2 pipecontrols and a MI_BB) */
-#define EXECLISTS_REQUEST_SIZE 64 /* bytes */
-
-struct virtual_engine {
- struct intel_engine_cs base;
- struct intel_context context;
- struct rcu_work rcu;
-
- /*
- * We allow only a single request through the virtual engine at a time
- * (each request in the timeline waits for the completion fence of
- * the previous before being submitted). By restricting ourselves to
- * only submitting a single request, each request is placed on to a
- * physical to maximise load spreading (by virtue of the late greedy
- * scheduling -- each real engine takes the next available request
- * upon idling).
- */
- struct i915_request *request;
-
- /*
- * We keep a rbtree of available virtual engines inside each physical
- * engine, sorted by priority. Here we preallocate the nodes we need
- * for the virtual engine, indexed by physical_engine->id.
- */
- struct ve_node {
- struct rb_node rb;
- int prio;
- } nodes[I915_NUM_ENGINES];
-
- /*
- * Keep track of bonded pairs -- restrictions upon on our selection
- * of physical engines any particular request may be submitted to.
- * If we receive a submit-fence from a master engine, we will only
- * use one of sibling_mask physical engines.
- */
- struct ve_bond {
- const struct intel_engine_cs *master;
- intel_engine_mask_t sibling_mask;
- } *bonds;
- unsigned int num_bonds;
-
- /* And finally, which physical engines this virtual engine maps onto. */
- unsigned int num_siblings;
- struct intel_engine_cs *siblings[];
-};
-
-static struct virtual_engine *to_virtual_engine(struct intel_engine_cs *engine)
-{
- GEM_BUG_ON(!intel_engine_is_virtual(engine));
- return container_of(engine, struct virtual_engine, base);
-}
-
-static int __execlists_context_alloc(struct intel_context *ce,
- struct intel_engine_cs *engine);
-
-static void execlists_init_reg_state(u32 *reg_state,
- const struct intel_context *ce,
- const struct intel_engine_cs *engine,
- const struct intel_ring *ring,
- bool close);
-static void
-__execlists_update_reg_state(const struct intel_context *ce,
- const struct intel_engine_cs *engine,
- u32 head);
-
-static int lrc_ring_mi_mode(const struct intel_engine_cs *engine)
-{
- if (INTEL_GEN(engine->i915) >= 12)
- return 0x60;
- else if (INTEL_GEN(engine->i915) >= 9)
- return 0x54;
- else if (engine->class == RENDER_CLASS)
- return 0x58;
- else
- return -1;
-}
-
-static int lrc_ring_gpr0(const struct intel_engine_cs *engine)
-{
- if (INTEL_GEN(engine->i915) >= 12)
- return 0x74;
- else if (INTEL_GEN(engine->i915) >= 9)
- return 0x68;
- else if (engine->class == RENDER_CLASS)
- return 0xd8;
- else
- return -1;
-}
-
-static int lrc_ring_wa_bb_per_ctx(const struct intel_engine_cs *engine)
-{
- if (INTEL_GEN(engine->i915) >= 12)
- return 0x12;
- else if (INTEL_GEN(engine->i915) >= 9 || engine->class == RENDER_CLASS)
- return 0x18;
- else
- return -1;
-}
-
-static int lrc_ring_indirect_ptr(const struct intel_engine_cs *engine)
-{
- int x;
-
- x = lrc_ring_wa_bb_per_ctx(engine);
- if (x < 0)
- return x;
-
- return x + 2;
-}
-
-static int lrc_ring_indirect_offset(const struct intel_engine_cs *engine)
-{
- int x;
-
- x = lrc_ring_indirect_ptr(engine);
- if (x < 0)
- return x;
-
- return x + 2;
-}
-
-static int lrc_ring_cmd_buf_cctl(const struct intel_engine_cs *engine)
-{
- if (engine->class != RENDER_CLASS)
- return -1;
-
- if (INTEL_GEN(engine->i915) >= 12)
- return 0xb6;
- else if (INTEL_GEN(engine->i915) >= 11)
- return 0xaa;
- else
- return -1;
-}
-
-static u32
-lrc_ring_indirect_offset_default(const struct intel_engine_cs *engine)
-{
- switch (INTEL_GEN(engine->i915)) {
- default:
- MISSING_CASE(INTEL_GEN(engine->i915));
- fallthrough;
- case 12:
- return GEN12_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
- case 11:
- return GEN11_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
- case 10:
- return GEN10_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
- case 9:
- return GEN9_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
- case 8:
- return GEN8_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
- }
-}
-
-static void
-lrc_ring_setup_indirect_ctx(u32 *regs,
- const struct intel_engine_cs *engine,
- u32 ctx_bb_ggtt_addr,
- u32 size)
-{
- GEM_BUG_ON(!size);
- GEM_BUG_ON(!IS_ALIGNED(size, CACHELINE_BYTES));
- GEM_BUG_ON(lrc_ring_indirect_ptr(engine) == -1);
- regs[lrc_ring_indirect_ptr(engine) + 1] =
- ctx_bb_ggtt_addr | (size / CACHELINE_BYTES);
-
- GEM_BUG_ON(lrc_ring_indirect_offset(engine) == -1);
- regs[lrc_ring_indirect_offset(engine) + 1] =
- lrc_ring_indirect_offset_default(engine) << 6;
-}
-
-static u32 intel_context_get_runtime(const struct intel_context *ce)
-{
- /*
- * We can use either ppHWSP[16] which is recorded before the context
- * switch (and so excludes the cost of context switches) or use the
- * value from the context image itself, which is saved/restored earlier
- * and so includes the cost of the save.
- */
- return READ_ONCE(ce->lrc_reg_state[CTX_TIMESTAMP]);
-}
-
-static void mark_eio(struct i915_request *rq)
-{
- if (i915_request_completed(rq))
- return;
-
- GEM_BUG_ON(i915_request_signaled(rq));
-
- i915_request_set_error_once(rq, -EIO);
- i915_request_mark_complete(rq);
-}
-
-static struct i915_request *
-active_request(const struct intel_timeline * const tl, struct i915_request *rq)
-{
- struct i915_request *active = rq;
-
- rcu_read_lock();
- list_for_each_entry_continue_reverse(rq, &tl->requests, link) {
- if (i915_request_completed(rq))
- break;
-
- active = rq;
- }
- rcu_read_unlock();
-
- return active;
-}
-
-static inline u32 intel_hws_preempt_address(struct intel_engine_cs *engine)
-{
- return (i915_ggtt_offset(engine->status_page.vma) +
- I915_GEM_HWS_PREEMPT_ADDR);
-}
-
-static inline void
-ring_set_paused(const struct intel_engine_cs *engine, int state)
-{
- /*
- * We inspect HWS_PREEMPT with a semaphore inside
- * engine->emit_fini_breadcrumb. If the dword is true,
- * the ring is paused as the semaphore will busywait
- * until the dword is false.
- */
- engine->status_page.addr[I915_GEM_HWS_PREEMPT] = state;
- if (state)
- wmb();
-}
-
-static inline struct i915_priolist *to_priolist(struct rb_node *rb)
-{
- return rb_entry(rb, struct i915_priolist, node);
-}
-
-static inline int rq_prio(const struct i915_request *rq)
-{
- return READ_ONCE(rq->sched.attr.priority);
-}
-
-static int effective_prio(const struct i915_request *rq)
-{
- int prio = rq_prio(rq);
-
- /*
- * If this request is special and must not be interrupted at any
- * cost, so be it. Note we are only checking the most recent request
- * in the context and so may be masking an earlier vip request. It
- * is hoped that under the conditions where nopreempt is used, this
- * will not matter (i.e. all requests to that context will be
- * nopreempt for as long as desired).
- */
- if (i915_request_has_nopreempt(rq))
- prio = I915_PRIORITY_UNPREEMPTABLE;
-
- return prio;
-}
-
-static int queue_prio(const struct intel_engine_execlists *execlists)
-{
- struct i915_priolist *p;
- struct rb_node *rb;
-
- rb = rb_first_cached(&execlists->queue);
- if (!rb)
- return INT_MIN;
-
- /*
- * As the priolist[] are inverted, with the highest priority in [0],
- * we have to flip the index value to become priority.
- */
- p = to_priolist(rb);
- if (!I915_USER_PRIORITY_SHIFT)
- return p->priority;
-
- return ((p->priority + 1) << I915_USER_PRIORITY_SHIFT) - ffs(p->used);
-}
-
-static inline bool need_preempt(const struct intel_engine_cs *engine,
- const struct i915_request *rq,
- struct rb_node *rb)
-{
- int last_prio;
-
- if (!intel_engine_has_semaphores(engine))
- return false;
-
- /*
- * Check if the current priority hint merits a preemption attempt.
- *
- * We record the highest value priority we saw during rescheduling
- * prior to this dequeue, therefore we know that if it is strictly
- * less than the current tail of ESLP[0], we do not need to force
- * a preempt-to-idle cycle.
- *
- * However, the priority hint is a mere hint that we may need to
- * preempt. If that hint is stale or we may be trying to preempt
- * ourselves, ignore the request.
- *
- * More naturally we would write
- * prio >= max(0, last);
- * except that we wish to prevent triggering preemption at the same
- * priority level: the task that is running should remain running
- * to preserve FIFO ordering of dependencies.
- */
- last_prio = max(effective_prio(rq), I915_PRIORITY_NORMAL - 1);
- if (engine->execlists.queue_priority_hint <= last_prio)
- return false;
-
- /*
- * Check against the first request in ELSP[1], it will, thanks to the
- * power of PI, be the highest priority of that context.
- */
- if (!list_is_last(&rq->sched.link, &engine->active.requests) &&
- rq_prio(list_next_entry(rq, sched.link)) > last_prio)
- return true;
-
- if (rb) {
- struct virtual_engine *ve =
- rb_entry(rb, typeof(*ve), nodes[engine->id].rb);
- bool preempt = false;
-
- if (engine == ve->siblings[0]) { /* only preempt one sibling */
- struct i915_request *next;
-
- rcu_read_lock();
- next = READ_ONCE(ve->request);
- if (next)
- preempt = rq_prio(next) > last_prio;
- rcu_read_unlock();
- }
-
- if (preempt)
- return preempt;
- }
-
- /*
- * If the inflight context did not trigger the preemption, then maybe
- * it was the set of queued requests? Pick the highest priority in
- * the queue (the first active priolist) and see if it deserves to be
- * running instead of ELSP[0].
- *
- * The highest priority request in the queue can not be either
- * ELSP[0] or ELSP[1] as, thanks again to PI, if it was the same
- * context, it's priority would not exceed ELSP[0] aka last_prio.
- */
- return queue_prio(&engine->execlists) > last_prio;
-}
-
-__maybe_unused static inline bool
-assert_priority_queue(const struct i915_request *prev,
- const struct i915_request *next)
-{
- /*
- * Without preemption, the prev may refer to the still active element
- * which we refuse to let go.
- *
- * Even with preemption, there are times when we think it is better not
- * to preempt and leave an ostensibly lower priority request in flight.
- */
- if (i915_request_is_active(prev))
- return true;
-
- return rq_prio(prev) >= rq_prio(next);
-}
-
-/*
- * The context descriptor encodes various attributes of a context,
- * including its GTT address and some flags. Because it's fairly
- * expensive to calculate, we'll just do it once and cache the result,
- * which remains valid until the context is unpinned.
- *
- * This is what a descriptor looks like, from LSB to MSB::
- *
- * bits 0-11: flags, GEN8_CTX_* (cached in ctx->desc_template)
- * bits 12-31: LRCA, GTT address of (the HWSP of) this context
- * bits 32-52: ctx ID, a globally unique tag (highest bit used by GuC)
- * bits 53-54: mbz, reserved for use by hardware
- * bits 55-63: group ID, currently unused and set to 0
- *
- * Starting from Gen11, the upper dword of the descriptor has a new format:
- *
- * bits 32-36: reserved
- * bits 37-47: SW context ID
- * bits 48:53: engine instance
- * bit 54: mbz, reserved for use by hardware
- * bits 55-60: SW counter
- * bits 61-63: engine class
- *
- * engine info, SW context ID and SW counter need to form a unique number
- * (Context ID) per lrc.
- */
-static u32
-lrc_descriptor(struct intel_context *ce, struct intel_engine_cs *engine)
-{
- u32 desc;
-
- desc = INTEL_LEGACY_32B_CONTEXT;
- if (i915_vm_is_4lvl(ce->vm))
- desc = INTEL_LEGACY_64B_CONTEXT;
- desc <<= GEN8_CTX_ADDRESSING_MODE_SHIFT;
-
- desc |= GEN8_CTX_VALID | GEN8_CTX_PRIVILEGE;
- if (IS_GEN(engine->i915, 8))
- desc |= GEN8_CTX_L3LLC_COHERENT;
-
- return i915_ggtt_offset(ce->state) | desc;
-}
-
-static inline unsigned int dword_in_page(void *addr)
-{
- return offset_in_page(addr) / sizeof(u32);
-}
-
static void set_offsets(u32 *regs,
const u8 *data,
const struct intel_engine_cs *engine,
- bool clear)
+ bool close)
#define NOP(x) (BIT(7) | (x))
#define LRI(count, flags) ((flags) << 6 | (count) | BUILD_BUG_ON_ZERO(count >= BIT(6)))
#define POSTED BIT(0)
@@ -601,7 +25,7 @@ static void set_offsets(u32 *regs,
#define REG16(x) \
(((x) >> 9) | BIT(7) | BUILD_BUG_ON_ZERO(x >= 0x10000)), \
(((x) >> 2) & 0x7f)
-#define END(total_state_size) 0, (total_state_size)
+#define END 0
{
const u32 base = engine->mmio_base;
@@ -610,8 +34,6 @@ static void set_offsets(u32 *regs,
if (*data & BIT(7)) { /* skip */
count = *data++ & ~BIT(7);
- if (clear)
- memset32(regs, MI_NOOP, count);
regs += count;
continue;
}
@@ -639,19 +61,11 @@ static void set_offsets(u32 *regs,
} while (v & BIT(7));
regs[0] = base + (offset << 2);
- if (clear)
- regs[1] = 0;
regs += 2;
} while (--count);
}
- if (clear) {
- u8 count = *++data;
-
- /* Clear past the tail for HW access */
- GEM_BUG_ON(dword_in_page(regs) > count);
- memset32(regs, MI_NOOP, count - dword_in_page(regs));
-
+ if (close) {
/* Close the batch; used mainly by live_lrc_layout() */
*regs = MI_BATCH_BUFFER_END;
if (INTEL_GEN(engine->i915) >= 10)
@@ -691,7 +105,7 @@ static const u8 gen8_xcs_offsets[] = {
REG16(0x200),
REG(0x028),
- END(80)
+ END
};
static const u8 gen9_xcs_offsets[] = {
@@ -775,7 +189,7 @@ static const u8 gen9_xcs_offsets[] = {
REG16(0x67c),
REG(0x068),
- END(176)
+ END
};
static const u8 gen12_xcs_offsets[] = {
@@ -807,7 +221,7 @@ static const u8 gen12_xcs_offsets[] = {
REG16(0x274),
REG16(0x270),
- END(80)
+ END
};
static const u8 gen8_rcs_offsets[] = {
@@ -844,7 +258,7 @@ static const u8 gen8_rcs_offsets[] = {
LRI(1, 0),
REG(0x0c8),
- END(80)
+ END
};
static const u8 gen9_rcs_offsets[] = {
@@ -928,7 +342,7 @@ static const u8 gen9_rcs_offsets[] = {
REG16(0x67c),
REG(0x68),
- END(176)
+ END
};
static const u8 gen11_rcs_offsets[] = {
@@ -969,7 +383,7 @@ static const u8 gen11_rcs_offsets[] = {
LRI(1, 0),
REG(0x0c8),
- END(80)
+ END
};
static const u8 gen12_rcs_offsets[] = {
@@ -1065,7 +479,7 @@ static const u8 gen12_rcs_offsets[] = {
REG(0x084),
NOP(1),
- END(192)
+ END
};
#undef END
@@ -1104,2284 +518,440 @@ static const u8 *reg_offsets(const struct intel_engine_cs *engine)
}
}
-static struct i915_request *
-__unwind_incomplete_requests(struct intel_engine_cs *engine)
-{
- struct i915_request *rq, *rn, *active = NULL;
- struct list_head *pl;
- int prio = I915_PRIORITY_INVALID;
-
- lockdep_assert_held(&engine->active.lock);
-
- list_for_each_entry_safe_reverse(rq, rn,
- &engine->active.requests,
- sched.link) {
- if (i915_request_completed(rq))
- continue; /* XXX */
-
- __i915_request_unsubmit(rq);
-
- /*
- * Push the request back into the queue for later resubmission.
- * If this request is not native to this physical engine (i.e.
- * it came from a virtual source), push it back onto the virtual
- * engine so that it can be moved across onto another physical
- * engine as load dictates.
- */
- if (likely(rq->execution_mask == engine->mask)) {
- GEM_BUG_ON(rq_prio(rq) == I915_PRIORITY_INVALID);
- if (rq_prio(rq) != prio) {
- prio = rq_prio(rq);
- pl = i915_sched_lookup_priolist(engine, prio);
- }
- GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root));
-
- list_move(&rq->sched.link, pl);
- set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
-
- /* Check in case we rollback so far we wrap [size/2] */
- if (intel_ring_direction(rq->ring,
- rq->tail,
- rq->ring->tail + 8) > 0)
- rq->context->lrc.desc |= CTX_DESC_FORCE_RESTORE;
-
- active = rq;
- } else {
- struct intel_engine_cs *owner = rq->context->engine;
-
- WRITE_ONCE(rq->engine, owner);
- owner->submit_request(rq);
- active = NULL;
- }
- }
-
- return active;
-}
-
-struct i915_request *
-execlists_unwind_incomplete_requests(struct intel_engine_execlists *execlists)
-{
- struct intel_engine_cs *engine =
- container_of(execlists, typeof(*engine), execlists);
-
- return __unwind_incomplete_requests(engine);
-}
-
-static inline void
-execlists_context_status_change(struct i915_request *rq, unsigned long status)
+static int lrc_ring_mi_mode(const struct intel_engine_cs *engine)
{
- /*
- * Only used when GVT-g is enabled now. When GVT-g is disabled,
- * The compiler should eliminate this function as dead-code.
- */
- if (!IS_ENABLED(CONFIG_DRM_I915_GVT))
- return;
-
- atomic_notifier_call_chain(&rq->engine->context_status_notifier,
- status, rq);
+ if (INTEL_GEN(engine->i915) >= 12)
+ return 0x60;
+ else if (INTEL_GEN(engine->i915) >= 9)
+ return 0x54;
+ else if (engine->class == RENDER_CLASS)
+ return 0x58;
+ else
+ return -1;
}
-static void intel_engine_context_in(struct intel_engine_cs *engine)
+static int lrc_ring_gpr0(const struct intel_engine_cs *engine)
{
- unsigned long flags;
-
- if (atomic_add_unless(&engine->stats.active, 1, 0))
- return;
-
- write_seqlock_irqsave(&engine->stats.lock, flags);
- if (!atomic_add_unless(&engine->stats.active, 1, 0)) {
- engine->stats.start = ktime_get();
- atomic_inc(&engine->stats.active);
- }
- write_sequnlock_irqrestore(&engine->stats.lock, flags);
+ if (INTEL_GEN(engine->i915) >= 12)
+ return 0x74;
+ else if (INTEL_GEN(engine->i915) >= 9)
+ return 0x68;
+ else if (engine->class == RENDER_CLASS)
+ return 0xd8;
+ else
+ return -1;
}
-static void intel_engine_context_out(struct intel_engine_cs *engine)
+static int lrc_ring_wa_bb_per_ctx(const struct intel_engine_cs *engine)
{
- unsigned long flags;
-
- GEM_BUG_ON(!atomic_read(&engine->stats.active));
-
- if (atomic_add_unless(&engine->stats.active, -1, 1))
- return;
-
- write_seqlock_irqsave(&engine->stats.lock, flags);
- if (atomic_dec_and_test(&engine->stats.active)) {
- engine->stats.total =
- ktime_add(engine->stats.total,
- ktime_sub(ktime_get(), engine->stats.start));
- }
- write_sequnlock_irqrestore(&engine->stats.lock, flags);
+ if (INTEL_GEN(engine->i915) >= 12)
+ return 0x12;
+ else if (INTEL_GEN(engine->i915) >= 9 || engine->class == RENDER_CLASS)
+ return 0x18;
+ else
+ return -1;
}
-static void
-execlists_check_context(const struct intel_context *ce,
- const struct intel_engine_cs *engine,
- const char *when)
+static int lrc_ring_indirect_ptr(const struct intel_engine_cs *engine)
{
- const struct intel_ring *ring = ce->ring;
- u32 *regs = ce->lrc_reg_state;
- bool valid = true;
int x;
- if (regs[CTX_RING_START] != i915_ggtt_offset(ring->vma)) {
- pr_err("%s: context submitted with incorrect RING_START [%08x], expected %08x\n",
- engine->name,
- regs[CTX_RING_START],
- i915_ggtt_offset(ring->vma));
- regs[CTX_RING_START] = i915_ggtt_offset(ring->vma);
- valid = false;
- }
-
- if ((regs[CTX_RING_CTL] & ~(RING_WAIT | RING_WAIT_SEMAPHORE)) !=
- (RING_CTL_SIZE(ring->size) | RING_VALID)) {
- pr_err("%s: context submitted with incorrect RING_CTL [%08x], expected %08x\n",
- engine->name,
- regs[CTX_RING_CTL],
- (u32)(RING_CTL_SIZE(ring->size) | RING_VALID));
- regs[CTX_RING_CTL] = RING_CTL_SIZE(ring->size) | RING_VALID;
- valid = false;
- }
-
- x = lrc_ring_mi_mode(engine);
- if (x != -1 && regs[x + 1] & (regs[x + 1] >> 16) & STOP_RING) {
- pr_err("%s: context submitted with STOP_RING [%08x] in RING_MI_MODE\n",
- engine->name, regs[x + 1]);
- regs[x + 1] &= ~STOP_RING;
- regs[x + 1] |= STOP_RING << 16;
- valid = false;
- }
+ x = lrc_ring_wa_bb_per_ctx(engine);
+ if (x < 0)
+ return x;
- WARN_ONCE(!valid, "Invalid lrc state found %s submission\n", when);
+ return x + 2;
}
-static void restore_default_state(struct intel_context *ce,
- struct intel_engine_cs *engine)
+static int lrc_ring_indirect_offset(const struct intel_engine_cs *engine)
{
- u32 *regs;
+ int x;
- regs = memset(ce->lrc_reg_state, 0, engine->context_size - PAGE_SIZE);
- execlists_init_reg_state(regs, ce, engine, ce->ring, true);
+ x = lrc_ring_indirect_ptr(engine);
+ if (x < 0)
+ return x;
- ce->runtime.last = intel_context_get_runtime(ce);
+ return x + 2;
}
-static void reset_active(struct i915_request *rq,
- struct intel_engine_cs *engine)
+static int lrc_ring_cmd_buf_cctl(const struct intel_engine_cs *engine)
{
- struct intel_context * const ce = rq->context;
- u32 head;
-
- /*
- * The executing context has been cancelled. We want to prevent
- * further execution along this context and propagate the error on
- * to anything depending on its results.
- *
- * In __i915_request_submit(), we apply the -EIO and remove the
- * requests' payloads for any banned requests. But first, we must
- * rewind the context back to the start of the incomplete request so
- * that we do not jump back into the middle of the batch.
- *
- * We preserve the breadcrumbs and semaphores of the incomplete
- * requests so that inter-timeline dependencies (i.e other timelines)
- * remain correctly ordered. And we defer to __i915_request_submit()
- * so that all asynchronous waits are correctly handled.
- */
- ENGINE_TRACE(engine, "{ rq=%llx:%lld }\n",
- rq->fence.context, rq->fence.seqno);
+ if (engine->class != RENDER_CLASS)
+ return -1;
- /* On resubmission of the active request, payload will be scrubbed */
- if (i915_request_completed(rq))
- head = rq->tail;
+ if (INTEL_GEN(engine->i915) >= 12)
+ return 0xb6;
+ else if (INTEL_GEN(engine->i915) >= 11)
+ return 0xaa;
else
- head = active_request(ce->timeline, rq)->head;
- head = intel_ring_wrap(ce->ring, head);
-
- /* Scrub the context image to prevent replaying the previous batch */
- restore_default_state(ce, engine);
- __execlists_update_reg_state(ce, engine, head);
-
- /* We've switched away, so this should be a no-op, but intent matters */
- ce->lrc.desc |= CTX_DESC_FORCE_RESTORE;
-}
-
-static void st_update_runtime_underflow(struct intel_context *ce, s32 dt)
-{
-#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
- ce->runtime.num_underflow += dt < 0;
- ce->runtime.max_underflow = max_t(u32, ce->runtime.max_underflow, -dt);
-#endif
+ return -1;
}
-static void intel_context_update_runtime(struct intel_context *ce)
+static u32
+lrc_ring_indirect_offset_default(const struct intel_engine_cs *engine)
{
- u32 old;
- s32 dt;
-
- if (intel_context_is_barrier(ce))
- return;
-
- old = ce->runtime.last;
- ce->runtime.last = intel_context_get_runtime(ce);
- dt = ce->runtime.last - old;
-
- if (unlikely(dt <= 0)) {
- CE_TRACE(ce, "runtime underflow: last=%u, new=%u, delta=%d\n",
- old, ce->runtime.last, dt);
- st_update_runtime_underflow(ce, dt);
- return;
+ switch (INTEL_GEN(engine->i915)) {
+ default:
+ MISSING_CASE(INTEL_GEN(engine->i915));
+ fallthrough;
+ case 12:
+ return GEN12_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
+ case 11:
+ return GEN11_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
+ case 10:
+ return GEN10_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
+ case 9:
+ return GEN9_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
+ case 8:
+ return GEN8_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
}
-
- ewma_runtime_add(&ce->runtime.avg, dt);
- ce->runtime.total += dt;
}
-static inline struct intel_engine_cs *
-__execlists_schedule_in(struct i915_request *rq)
+static void
+lrc_setup_indirect_ctx(u32 *regs,
+ const struct intel_engine_cs *engine,
+ u32 ctx_bb_ggtt_addr,
+ u32 size)
{
- struct intel_engine_cs * const engine = rq->engine;
- struct intel_context * const ce = rq->context;
-
- intel_context_get(ce);
-
- if (unlikely(intel_context_is_banned(ce)))
- reset_active(rq, engine);
-
- if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
- execlists_check_context(ce, engine, "before");
-
- if (ce->tag) {
- /* Use a fixed tag for OA and friends */
- GEM_BUG_ON(ce->tag <= BITS_PER_LONG);
- ce->lrc.ccid = ce->tag;
- } else {
- /* We don't need a strict matching tag, just different values */
- unsigned int tag = ffs(READ_ONCE(engine->context_tag));
-
- GEM_BUG_ON(tag == 0 || tag >= BITS_PER_LONG);
- clear_bit(tag - 1, &engine->context_tag);
- ce->lrc.ccid = tag << (GEN11_SW_CTX_ID_SHIFT - 32);
-
- BUILD_BUG_ON(BITS_PER_LONG > GEN12_MAX_CONTEXT_HW_ID);
- }
-
- ce->lrc.ccid |= engine->execlists.ccid;
-
- __intel_gt_pm_get(engine->gt);
- if (engine->fw_domain && !atomic_fetch_inc(&engine->fw_active))
- intel_uncore_forcewake_get(engine->uncore, engine->fw_domain);
- execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_IN);
- intel_engine_context_in(engine);
+ GEM_BUG_ON(!size);
+ GEM_BUG_ON(!IS_ALIGNED(size, CACHELINE_BYTES));
+ GEM_BUG_ON(lrc_ring_indirect_ptr(engine) == -1);
+ regs[lrc_ring_indirect_ptr(engine) + 1] =
+ ctx_bb_ggtt_addr | (size / CACHELINE_BYTES);
- return engine;
+ GEM_BUG_ON(lrc_ring_indirect_offset(engine) == -1);
+ regs[lrc_ring_indirect_offset(engine) + 1] =
+ lrc_ring_indirect_offset_default(engine) << 6;
}
-static inline struct i915_request *
-execlists_schedule_in(struct i915_request *rq, int idx)
+static void init_common_regs(u32 * const regs,
+ const struct intel_context *ce,
+ const struct intel_engine_cs *engine,
+ bool inhibit)
{
- struct intel_context * const ce = rq->context;
- struct intel_engine_cs *old;
-
- GEM_BUG_ON(!intel_engine_pm_is_awake(rq->engine));
- trace_i915_request_in(rq, idx);
-
- old = READ_ONCE(ce->inflight);
- do {
- if (!old) {
- WRITE_ONCE(ce->inflight, __execlists_schedule_in(rq));
- break;
- }
- } while (!try_cmpxchg(&ce->inflight, &old, ptr_inc(old)));
-
- GEM_BUG_ON(intel_context_inflight(ce) != rq->engine);
- return i915_request_get(rq);
-}
+ u32 ctl;
-static void kick_siblings(struct i915_request *rq, struct intel_context *ce)
-{
- struct virtual_engine *ve = container_of(ce, typeof(*ve), context);
- struct i915_request *next = READ_ONCE(ve->request);
+ ctl = _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH);
+ ctl |= _MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
+ if (inhibit)
+ ctl |= CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT;
+ if (INTEL_GEN(engine->i915) < 11)
+ ctl |= _MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT |
+ CTX_CTRL_RS_CTX_ENABLE);
+ regs[CTX_CONTEXT_CONTROL] = ctl;
- if (next == rq || (next && next->execution_mask & ~rq->execution_mask))
- tasklet_hi_schedule(&ve->base.execlists.tasklet);
+ regs[CTX_TIMESTAMP] = ce->runtime.last;
}
-static inline void
-__execlists_schedule_out(struct i915_request *rq,
- struct intel_engine_cs * const engine,
- unsigned int ccid)
+static void init_wa_bb_regs(u32 * const regs,
+ const struct intel_engine_cs *engine)
{
- struct intel_context * const ce = rq->context;
-
- /*
- * NB process_csb() is not under the engine->active.lock and hence
- * schedule_out can race with schedule_in meaning that we should
- * refrain from doing non-trivial work here.
- */
+ const struct i915_ctx_workarounds * const wa_ctx = &engine->wa_ctx;
- if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
- execlists_check_context(ce, engine, "after");
+ if (wa_ctx->per_ctx.size) {
+ const u32 ggtt_offset = i915_ggtt_offset(wa_ctx->vma);
- /*
- * If we have just completed this context, the engine may now be
- * idle and we want to re-enter powersaving.
- */
- if (list_is_last_rcu(&rq->link, &ce->timeline->requests) &&
- i915_request_completed(rq))
- intel_engine_add_retire(engine, ce->timeline);
-
- ccid >>= GEN11_SW_CTX_ID_SHIFT - 32;
- ccid &= GEN12_MAX_CONTEXT_HW_ID;
- if (ccid < BITS_PER_LONG) {
- GEM_BUG_ON(ccid == 0);
- GEM_BUG_ON(test_bit(ccid - 1, &engine->context_tag));
- set_bit(ccid - 1, &engine->context_tag);
+ GEM_BUG_ON(lrc_ring_wa_bb_per_ctx(engine) == -1);
+ regs[lrc_ring_wa_bb_per_ctx(engine) + 1] =
+ (ggtt_offset + wa_ctx->per_ctx.offset) | 0x01;
}
- intel_context_update_runtime(ce);
- intel_engine_context_out(engine);
- execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_OUT);
- if (engine->fw_domain && !atomic_dec_return(&engine->fw_active))
- intel_uncore_forcewake_put(engine->uncore, engine->fw_domain);
- intel_gt_pm_put_async(engine->gt);
-
- /*
- * If this is part of a virtual engine, its next request may
- * have been blocked waiting for access to the active context.
- * We have to kick all the siblings again in case we need to
- * switch (e.g. the next request is not runnable on this
- * engine). Hopefully, we will already have submitted the next
- * request before the tasklet runs and do not need to rebuild
- * each virtual tree and kick everyone again.
- */
- if (ce->engine != engine)
- kick_siblings(rq, ce);
-
- intel_context_put(ce);
-}
-
-static inline void
-execlists_schedule_out(struct i915_request *rq)
-{
- struct intel_context * const ce = rq->context;
- struct intel_engine_cs *cur, *old;
- u32 ccid;
-
- trace_i915_request_out(rq);
-
- ccid = rq->context->lrc.ccid;
- old = READ_ONCE(ce->inflight);
- do
- cur = ptr_unmask_bits(old, 2) ? ptr_dec(old) : NULL;
- while (!try_cmpxchg(&ce->inflight, &old, cur));
- if (!cur)
- __execlists_schedule_out(rq, old, ccid);
-
- i915_request_put(rq);
-}
-
-static u64 execlists_update_context(struct i915_request *rq)
-{
- struct intel_context *ce = rq->context;
- u64 desc = ce->lrc.desc;
- u32 tail, prev;
-
- /*
- * WaIdleLiteRestore:bdw,skl
- *
- * We should never submit the context with the same RING_TAIL twice
- * just in case we submit an empty ring, which confuses the HW.
- *
- * We append a couple of NOOPs (gen8_emit_wa_tail) after the end of
- * the normal request to be able to always advance the RING_TAIL on
- * subsequent resubmissions (for lite restore). Should that fail us,
- * and we try and submit the same tail again, force the context
- * reload.
- *
- * If we need to return to a preempted context, we need to skip the
- * lite-restore and force it to reload the RING_TAIL. Otherwise, the
- * HW has a tendency to ignore us rewinding the TAIL to the end of
- * an earlier request.
- */
- GEM_BUG_ON(ce->lrc_reg_state[CTX_RING_TAIL] != rq->ring->tail);
- prev = rq->ring->tail;
- tail = intel_ring_set_tail(rq->ring, rq->tail);
- if (unlikely(intel_ring_direction(rq->ring, tail, prev) <= 0))
- desc |= CTX_DESC_FORCE_RESTORE;
- ce->lrc_reg_state[CTX_RING_TAIL] = tail;
- rq->tail = rq->wa_tail;
-
- /*
- * Make sure the context image is complete before we submit it to HW.
- *
- * Ostensibly, writes (including the WCB) should be flushed prior to
- * an uncached write such as our mmio register access, the empirical
- * evidence (esp. on Braswell) suggests that the WC write into memory
- * may not be visible to the HW prior to the completion of the UC
- * register write and that we may begin execution from the context
- * before its image is complete leading to invalid PD chasing.
- */
- wmb();
-
- ce->lrc.desc &= ~CTX_DESC_FORCE_RESTORE;
- return desc;
-}
-
-static inline void write_desc(struct intel_engine_execlists *execlists, u64 desc, u32 port)
-{
- if (execlists->ctrl_reg) {
- writel(lower_32_bits(desc), execlists->submit_reg + port * 2);
- writel(upper_32_bits(desc), execlists->submit_reg + port * 2 + 1);
- } else {
- writel(upper_32_bits(desc), execlists->submit_reg);
- writel(lower_32_bits(desc), execlists->submit_reg);
+ if (wa_ctx->indirect_ctx.size) {
+ lrc_setup_indirect_ctx(regs, engine,
+ i915_ggtt_offset(wa_ctx->vma) +
+ wa_ctx->indirect_ctx.offset,
+ wa_ctx->indirect_ctx.size);
}
}
-static __maybe_unused char *
-dump_port(char *buf, int buflen, const char *prefix, struct i915_request *rq)
+static void init_ppgtt_regs(u32 *regs, const struct i915_ppgtt *ppgtt)
{
- if (!rq)
- return "";
-
- snprintf(buf, buflen, "%sccid:%x %llx:%lld%s prio %d",
- prefix,
- rq->context->lrc.ccid,
- rq->fence.context, rq->fence.seqno,
- i915_request_completed(rq) ? "!" :
- i915_request_started(rq) ? "*" :
- "",
- rq_prio(rq));
-
- return buf;
-}
-
-static __maybe_unused void
-trace_ports(const struct intel_engine_execlists *execlists,
- const char *msg,
- struct i915_request * const *ports)
-{
- const struct intel_engine_cs *engine =
- container_of(execlists, typeof(*engine), execlists);
- char __maybe_unused p0[40], p1[40];
-
- if (!ports[0])
- return;
-
- ENGINE_TRACE(engine, "%s { %s%s }\n", msg,
- dump_port(p0, sizeof(p0), "", ports[0]),
- dump_port(p1, sizeof(p1), ", ", ports[1]));
-}
-
-static inline bool
-reset_in_progress(const struct intel_engine_execlists *execlists)
-{
- return unlikely(!__tasklet_is_enabled(&execlists->tasklet));
-}
-
-static __maybe_unused bool
-assert_pending_valid(const struct intel_engine_execlists *execlists,
- const char *msg)
-{
- struct intel_engine_cs *engine =
- container_of(execlists, typeof(*engine), execlists);
- struct i915_request * const *port, *rq;
- struct intel_context *ce = NULL;
- bool sentinel = false;
- u32 ccid = -1;
-
- trace_ports(execlists, msg, execlists->pending);
-
- /* We may be messing around with the lists during reset, lalala */
- if (reset_in_progress(execlists))
- return true;
-
- if (!execlists->pending[0]) {
- GEM_TRACE_ERR("%s: Nothing pending for promotion!\n",
- engine->name);
- return false;
- }
-
- if (execlists->pending[execlists_num_ports(execlists)]) {
- GEM_TRACE_ERR("%s: Excess pending[%d] for promotion!\n",
- engine->name, execlists_num_ports(execlists));
- return false;
- }
-
- for (port = execlists->pending; (rq = *port); port++) {
- unsigned long flags;
- bool ok = true;
-
- GEM_BUG_ON(!kref_read(&rq->fence.refcount));
- GEM_BUG_ON(!i915_request_is_active(rq));
-
- if (ce == rq->context) {
- GEM_TRACE_ERR("%s: Dup context:%llx in pending[%zd]\n",
- engine->name,
- ce->timeline->fence_context,
- port - execlists->pending);
- return false;
- }
- ce = rq->context;
-
- if (ccid == ce->lrc.ccid) {
- GEM_TRACE_ERR("%s: Dup ccid:%x context:%llx in pending[%zd]\n",
- engine->name,
- ccid, ce->timeline->fence_context,
- port - execlists->pending);
- return false;
- }
- ccid = ce->lrc.ccid;
-
- /*
- * Sentinels are supposed to be the last request so they flush
- * the current execution off the HW. Check that they are the only
- * request in the pending submission.
+ if (i915_vm_is_4lvl(&ppgtt->vm)) {
+ /* 64b PPGTT (48bit canonical)
+ * PDP0_DESCRIPTOR contains the base address to PML4 and
+ * other PDP Descriptors are ignored.
*/
- if (sentinel) {
- GEM_TRACE_ERR("%s: context:%llx after sentinel in pending[%zd]\n",
- engine->name,
- ce->timeline->fence_context,
- port - execlists->pending);
- return false;
- }
- sentinel = i915_request_has_sentinel(rq);
-
- /* Hold tightly onto the lock to prevent concurrent retires! */
- if (!spin_trylock_irqsave(&rq->lock, flags))
- continue;
-
- if (i915_request_completed(rq))
- goto unlock;
-
- if (i915_active_is_idle(&ce->active) &&
- !intel_context_is_barrier(ce)) {
- GEM_TRACE_ERR("%s: Inactive context:%llx in pending[%zd]\n",
- engine->name,
- ce->timeline->fence_context,
- port - execlists->pending);
- ok = false;
- goto unlock;
- }
-
- if (!i915_vma_is_pinned(ce->state)) {
- GEM_TRACE_ERR("%s: Unpinned context:%llx in pending[%zd]\n",
- engine->name,
- ce->timeline->fence_context,
- port - execlists->pending);
- ok = false;
- goto unlock;
- }
-
- if (!i915_vma_is_pinned(ce->ring->vma)) {
- GEM_TRACE_ERR("%s: Unpinned ring:%llx in pending[%zd]\n",
- engine->name,
- ce->timeline->fence_context,
- port - execlists->pending);
- ok = false;
- goto unlock;
- }
-
-unlock:
- spin_unlock_irqrestore(&rq->lock, flags);
- if (!ok)
- return false;
- }
-
- return ce;
-}
-
-static void execlists_submit_ports(struct intel_engine_cs *engine)
-{
- struct intel_engine_execlists *execlists = &engine->execlists;
- unsigned int n;
-
- GEM_BUG_ON(!assert_pending_valid(execlists, "submit"));
-
- /*
- * We can skip acquiring intel_runtime_pm_get() here as it was taken
- * on our behalf by the request (see i915_gem_mark_busy()) and it will
- * not be relinquished until the device is idle (see
- * i915_gem_idle_work_handler()). As a precaution, we make sure
- * that all ELSP are drained i.e. we have processed the CSB,
- * before allowing ourselves to idle and calling intel_runtime_pm_put().
- */
- GEM_BUG_ON(!intel_engine_pm_is_awake(engine));
-
- /*
- * ELSQ note: the submit queue is not cleared after being submitted
- * to the HW so we need to make sure we always clean it up. This is
- * currently ensured by the fact that we always write the same number
- * of elsq entries, keep this in mind before changing the loop below.
- */
- for (n = execlists_num_ports(execlists); n--; ) {
- struct i915_request *rq = execlists->pending[n];
-
- write_desc(execlists,
- rq ? execlists_update_context(rq) : 0,
- n);
- }
-
- /* we need to manually load the submit queue */
- if (execlists->ctrl_reg)
- writel(EL_CTRL_LOAD, execlists->ctrl_reg);
-}
-
-static bool ctx_single_port_submission(const struct intel_context *ce)
-{
- return (IS_ENABLED(CONFIG_DRM_I915_GVT) &&
- intel_context_force_single_submission(ce));
-}
-
-static bool can_merge_ctx(const struct intel_context *prev,
- const struct intel_context *next)
-{
- if (prev != next)
- return false;
-
- if (ctx_single_port_submission(prev))
- return false;
-
- return true;
-}
-
-static unsigned long i915_request_flags(const struct i915_request *rq)
-{
- return READ_ONCE(rq->fence.flags);
-}
-
-static bool can_merge_rq(const struct i915_request *prev,
- const struct i915_request *next)
-{
- GEM_BUG_ON(prev == next);
- GEM_BUG_ON(!assert_priority_queue(prev, next));
-
- /*
- * We do not submit known completed requests. Therefore if the next
- * request is already completed, we can pretend to merge it in
- * with the previous context (and we will skip updating the ELSP
- * and tracking). Thus hopefully keeping the ELSP full with active
- * contexts, despite the best efforts of preempt-to-busy to confuse
- * us.
- */
- if (i915_request_completed(next))
- return true;
-
- if (unlikely((i915_request_flags(prev) ^ i915_request_flags(next)) &
- (BIT(I915_FENCE_FLAG_NOPREEMPT) |
- BIT(I915_FENCE_FLAG_SENTINEL))))
- return false;
-
- if (!can_merge_ctx(prev->context, next->context))
- return false;
-
- GEM_BUG_ON(i915_seqno_passed(prev->fence.seqno, next->fence.seqno));
- return true;
-}
-
-static void virtual_update_register_offsets(u32 *regs,
- struct intel_engine_cs *engine)
-{
- set_offsets(regs, reg_offsets(engine), engine, false);
-}
-
-static bool virtual_matches(const struct virtual_engine *ve,
- const struct i915_request *rq,
- const struct intel_engine_cs *engine)
-{
- const struct intel_engine_cs *inflight;
-
- if (!(rq->execution_mask & engine->mask)) /* We peeked too soon! */
- return false;
-
- /*
- * We track when the HW has completed saving the context image
- * (i.e. when we have seen the final CS event switching out of
- * the context) and must not overwrite the context image before
- * then. This restricts us to only using the active engine
- * while the previous virtualized request is inflight (so
- * we reuse the register offsets). This is a very small
- * hystersis on the greedy seelction algorithm.
- */
- inflight = intel_context_inflight(&ve->context);
- if (inflight && inflight != engine)
- return false;
-
- return true;
-}
-
-static void virtual_xfer_context(struct virtual_engine *ve,
- struct intel_engine_cs *engine)
-{
- unsigned int n;
-
- if (likely(engine == ve->siblings[0]))
- return;
-
- GEM_BUG_ON(READ_ONCE(ve->context.inflight));
- if (!intel_engine_has_relative_mmio(engine))
- virtual_update_register_offsets(ve->context.lrc_reg_state,
- engine);
-
- /*
- * Move the bound engine to the top of the list for
- * future execution. We then kick this tasklet first
- * before checking others, so that we preferentially
- * reuse this set of bound registers.
- */
- for (n = 1; n < ve->num_siblings; n++) {
- if (ve->siblings[n] == engine) {
- swap(ve->siblings[n], ve->siblings[0]);
- break;
- }
+ ASSIGN_CTX_PML4(ppgtt, regs);
+ } else {
+ ASSIGN_CTX_PDP(ppgtt, regs, 3);
+ ASSIGN_CTX_PDP(ppgtt, regs, 2);
+ ASSIGN_CTX_PDP(ppgtt, regs, 1);
+ ASSIGN_CTX_PDP(ppgtt, regs, 0);
}
}
-#define for_each_waiter(p__, rq__) \
- list_for_each_entry_lockless(p__, \
- &(rq__)->sched.waiters_list, \
- wait_link)
-
-#define for_each_signaler(p__, rq__) \
- list_for_each_entry_rcu(p__, \
- &(rq__)->sched.signalers_list, \
- signal_link)
-
-static void defer_request(struct i915_request *rq, struct list_head * const pl)
-{
- LIST_HEAD(list);
-
- /*
- * We want to move the interrupted request to the back of
- * the round-robin list (i.e. its priority level), but
- * in doing so, we must then move all requests that were in
- * flight and were waiting for the interrupted request to
- * be run after it again.
- */
- do {
- struct i915_dependency *p;
-
- GEM_BUG_ON(i915_request_is_active(rq));
- list_move_tail(&rq->sched.link, pl);
-
- for_each_waiter(p, rq) {
- struct i915_request *w =
- container_of(p->waiter, typeof(*w), sched);
-
- if (p->flags & I915_DEPENDENCY_WEAK)
- continue;
-
- /* Leave semaphores spinning on the other engines */
- if (w->engine != rq->engine)
- continue;
-
- /* No waiter should start before its signaler */
- GEM_BUG_ON(i915_request_has_initial_breadcrumb(w) &&
- i915_request_started(w) &&
- !i915_request_completed(rq));
-
- GEM_BUG_ON(i915_request_is_active(w));
- if (!i915_request_is_ready(w))
- continue;
-
- if (rq_prio(w) < rq_prio(rq))
- continue;
-
- GEM_BUG_ON(rq_prio(w) > rq_prio(rq));
- list_move_tail(&w->sched.link, &list);
- }
-
- rq = list_first_entry_or_null(&list, typeof(*rq), sched.link);
- } while (rq);
-}
-
-static void defer_active(struct intel_engine_cs *engine)
+static struct i915_ppgtt *vm_alias(struct i915_address_space *vm)
{
- struct i915_request *rq;
-
- rq = __unwind_incomplete_requests(engine);
- if (!rq)
- return;
-
- defer_request(rq, i915_sched_lookup_priolist(engine, rq_prio(rq)));
+ if (i915_is_ggtt(vm))
+ return i915_vm_to_ggtt(vm)->alias;
+ else
+ return i915_vm_to_ppgtt(vm);
}
-static bool
-need_timeslice(const struct intel_engine_cs *engine,
- const struct i915_request *rq,
- const struct rb_node *rb)
+static void __reset_stop_ring(u32 *regs, const struct intel_engine_cs *engine)
{
- int hint;
-
- if (!intel_engine_has_timeslices(engine))
- return false;
-
- hint = engine->execlists.queue_priority_hint;
-
- if (rb) {
- const struct virtual_engine *ve =
- rb_entry(rb, typeof(*ve), nodes[engine->id].rb);
- const struct intel_engine_cs *inflight =
- intel_context_inflight(&ve->context);
-
- if (!inflight || inflight == engine) {
- struct i915_request *next;
+ int x;
- rcu_read_lock();
- next = READ_ONCE(ve->request);
- if (next)
- hint = max(hint, rq_prio(next));
- rcu_read_unlock();
- }
+ x = lrc_ring_mi_mode(engine);
+ if (x != -1) {
+ regs[x + 1] &= ~STOP_RING;
+ regs[x + 1] |= STOP_RING << 16;
}
-
- if (!list_is_last(&rq->sched.link, &engine->active.requests))
- hint = max(hint, rq_prio(list_next_entry(rq, sched.link)));
-
- GEM_BUG_ON(hint >= I915_PRIORITY_UNPREEMPTABLE);
- return hint >= effective_prio(rq);
}
-static bool
-timeslice_yield(const struct intel_engine_execlists *el,
- const struct i915_request *rq)
+static void __lrc_init_regs(u32 *regs,
+ const struct intel_context *ce,
+ const struct intel_engine_cs *engine,
+ bool inhibit)
{
/*
- * Once bitten, forever smitten!
+ * A context is actually a big batch buffer with several
+ * MI_LOAD_REGISTER_IMM commands followed by (reg, value) pairs. The
+ * values we are setting here are only for the first context restore:
+ * on a subsequent save, the GPU will recreate this batchbuffer with new
+ * values (including all the missing MI_LOAD_REGISTER_IMM commands that
+ * we are not initializing here).
*
- * If the active context ever busy-waited on a semaphore,
- * it will be treated as a hog until the end of its timeslice (i.e.
- * until it is scheduled out and replaced by a new submission,
- * possibly even its own lite-restore). The HW only sends an interrupt
- * on the first miss, and we do know if that semaphore has been
- * signaled, or even if it is now stuck on another semaphore. Play
- * safe, yield if it might be stuck -- it will be given a fresh
- * timeslice in the near future.
+ * Must keep consistent with virtual_update_register_offsets().
*/
- return rq->context->lrc.ccid == READ_ONCE(el->yield);
-}
-
-static bool
-timeslice_expired(const struct intel_engine_execlists *el,
- const struct i915_request *rq)
-{
- return timer_expired(&el->timer) || timeslice_yield(el, rq);
-}
-
-static int
-switch_prio(struct intel_engine_cs *engine, const struct i915_request *rq)
-{
- if (list_is_last(&rq->sched.link, &engine->active.requests))
- return engine->execlists.queue_priority_hint;
-
- return rq_prio(list_next_entry(rq, sched.link));
-}
-
-static inline unsigned long
-timeslice(const struct intel_engine_cs *engine)
-{
- return READ_ONCE(engine->props.timeslice_duration_ms);
-}
-static unsigned long active_timeslice(const struct intel_engine_cs *engine)
-{
- const struct intel_engine_execlists *execlists = &engine->execlists;
- const struct i915_request *rq = *execlists->active;
-
- if (!rq || i915_request_completed(rq))
- return 0;
-
- if (READ_ONCE(execlists->switch_priority_hint) < effective_prio(rq))
- return 0;
-
- return timeslice(engine);
-}
+ if (inhibit)
+ memset(regs, 0, PAGE_SIZE);
-static void set_timeslice(struct intel_engine_cs *engine)
-{
- unsigned long duration;
+ set_offsets(regs, reg_offsets(engine), engine, inhibit);
- if (!intel_engine_has_timeslices(engine))
- return;
+ init_common_regs(regs, ce, engine, inhibit);
+ init_ppgtt_regs(regs, vm_alias(ce->vm));
- duration = active_timeslice(engine);
- ENGINE_TRACE(engine, "bump timeslicing, interval:%lu", duration);
+ init_wa_bb_regs(regs, engine);
- set_timer_ms(&engine->execlists.timer, duration);
+ __reset_stop_ring(regs, engine);
}
-static void start_timeslice(struct intel_engine_cs *engine, int prio)
+void lrc_init_regs(const struct intel_context *ce,
+ const struct intel_engine_cs *engine,
+ bool inhibit)
{
- struct intel_engine_execlists *execlists = &engine->execlists;
- unsigned long duration;
-
- if (!intel_engine_has_timeslices(engine))
- return;
-
- WRITE_ONCE(execlists->switch_priority_hint, prio);
- if (prio == INT_MIN)
- return;
-
- if (timer_pending(&execlists->timer))
- return;
-
- duration = timeslice(engine);
- ENGINE_TRACE(engine,
- "start timeslicing, prio:%d, interval:%lu",
- prio, duration);
-
- set_timer_ms(&execlists->timer, duration);
+ __lrc_init_regs(ce->lrc_reg_state, ce, engine, inhibit);
}
-static void record_preemption(struct intel_engine_execlists *execlists)
+void lrc_reset_regs(const struct intel_context *ce,
+ const struct intel_engine_cs *engine)
{
- (void)I915_SELFTEST_ONLY(execlists->preempt_hang.count++);
+ __reset_stop_ring(ce->lrc_reg_state, engine);
}
-static unsigned long active_preempt_timeout(struct intel_engine_cs *engine,
- const struct i915_request *rq)
+static void
+set_redzone(void *vaddr, const struct intel_engine_cs *engine)
{
- if (!rq)
- return 0;
+ if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
+ return;
- /* Force a fast reset for terminated contexts (ignoring sysfs!) */
- if (unlikely(intel_context_is_banned(rq->context)))
- return 1;
+ vaddr += engine->context_size;
- return READ_ONCE(engine->props.preempt_timeout_ms);
+ memset(vaddr, CONTEXT_REDZONE, I915_GTT_PAGE_SIZE);
}
-static void set_preempt_timeout(struct intel_engine_cs *engine,
- const struct i915_request *rq)
+static void
+check_redzone(const void *vaddr, const struct intel_engine_cs *engine)
{
- if (!intel_engine_has_preempt_reset(engine))
+ if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
return;
- set_timer_ms(&engine->execlists.preempt,
- active_preempt_timeout(engine, rq));
-}
-
-static inline void clear_ports(struct i915_request **ports, int count)
-{
- memset_p((void **)ports, NULL, count);
-}
+ vaddr += engine->context_size;
-static inline void
-copy_ports(struct i915_request **dst, struct i915_request **src, int count)
-{
- /* A memcpy_p() would be very useful here! */
- while (count--)
- WRITE_ONCE(*dst++, *src++); /* avoid write tearing */
+ if (memchr_inv(vaddr, CONTEXT_REDZONE, I915_GTT_PAGE_SIZE))
+ drm_err_once(&engine->i915->drm,
+ "%s context redzone overwritten!\n",
+ engine->name);
}
-static void execlists_dequeue(struct intel_engine_cs *engine)
+void lrc_init_state(struct intel_context *ce,
+ struct intel_engine_cs *engine,
+ void *state)
{
- struct intel_engine_execlists * const execlists = &engine->execlists;
- struct i915_request **port = execlists->pending;
- struct i915_request ** const last_port = port + execlists->port_mask;
- struct i915_request * const *active;
- struct i915_request *last;
- struct rb_node *rb;
- bool submit = false;
-
- /*
- * Hardware submission is through 2 ports. Conceptually each port
- * has a (RING_START, RING_HEAD, RING_TAIL) tuple. RING_START is
- * static for a context, and unique to each, so we only execute
- * requests belonging to a single context from each ring. RING_HEAD
- * is maintained by the CS in the context image, it marks the place
- * where it got up to last time, and through RING_TAIL we tell the CS
- * where we want to execute up to this time.
- *
- * In this list the requests are in order of execution. Consecutive
- * requests from the same context are adjacent in the ringbuffer. We
- * can combine these requests into a single RING_TAIL update:
- *
- * RING_HEAD...req1...req2
- * ^- RING_TAIL
- * since to execute req2 the CS must first execute req1.
- *
- * Our goal then is to point each port to the end of a consecutive
- * sequence of requests as being the most optimal (fewest wake ups
- * and context switches) submission.
- */
-
- for (rb = rb_first_cached(&execlists->virtual); rb; ) {
- struct virtual_engine *ve =
- rb_entry(rb, typeof(*ve), nodes[engine->id].rb);
- struct i915_request *rq = READ_ONCE(ve->request);
-
- if (!rq) { /* lazily cleanup after another engine handled rq */
- rb_erase_cached(rb, &execlists->virtual);
- RB_CLEAR_NODE(rb);
- rb = rb_first_cached(&execlists->virtual);
- continue;
- }
-
- if (!virtual_matches(ve, rq, engine)) {
- rb = rb_next(rb);
- continue;
- }
-
- break;
- }
-
- /*
- * If the queue is higher priority than the last
- * request in the currently active context, submit afresh.
- * We will resubmit again afterwards in case we need to split
- * the active context to interject the preemption request,
- * i.e. we will retrigger preemption following the ack in case
- * of trouble.
- */
- active = READ_ONCE(execlists->active);
-
- /*
- * In theory we can skip over completed contexts that have not
- * yet been processed by events (as those events are in flight):
- *
- * while ((last = *active) && i915_request_completed(last))
- * active++;
- *
- * However, the GPU cannot handle this as it will ultimately
- * find itself trying to jump back into a context it has just
- * completed and barf.
- */
-
- if ((last = *active)) {
- if (need_preempt(engine, last, rb)) {
- if (i915_request_completed(last)) {
- tasklet_hi_schedule(&execlists->tasklet);
- return;
- }
-
- ENGINE_TRACE(engine,
- "preempting last=%llx:%lld, prio=%d, hint=%d\n",
- last->fence.context,
- last->fence.seqno,
- last->sched.attr.priority,
- execlists->queue_priority_hint);
- record_preemption(execlists);
-
- /*
- * Don't let the RING_HEAD advance past the breadcrumb
- * as we unwind (and until we resubmit) so that we do
- * not accidentally tell it to go backwards.
- */
- ring_set_paused(engine, 1);
-
- /*
- * Note that we have not stopped the GPU at this point,
- * so we are unwinding the incomplete requests as they
- * remain inflight and so by the time we do complete
- * the preemption, some of the unwound requests may
- * complete!
- */
- __unwind_incomplete_requests(engine);
-
- last = NULL;
- } else if (need_timeslice(engine, last, rb) &&
- timeslice_expired(execlists, last)) {
- if (i915_request_completed(last)) {
- tasklet_hi_schedule(&execlists->tasklet);
- return;
- }
-
- ENGINE_TRACE(engine,
- "expired last=%llx:%lld, prio=%d, hint=%d, yield?=%s\n",
- last->fence.context,
- last->fence.seqno,
- last->sched.attr.priority,
- execlists->queue_priority_hint,
- yesno(timeslice_yield(execlists, last)));
-
- ring_set_paused(engine, 1);
- defer_active(engine);
-
- /*
- * Unlike for preemption, if we rewind and continue
- * executing the same context as previously active,
- * the order of execution will remain the same and
- * the tail will only advance. We do not need to
- * force a full context restore, as a lite-restore
- * is sufficient to resample the monotonic TAIL.
- *
- * If we switch to any other context, similarly we
- * will not rewind TAIL of current context, and
- * normal save/restore will preserve state and allow
- * us to later continue executing the same request.
- */
- last = NULL;
- } else {
- /*
- * Otherwise if we already have a request pending
- * for execution after the current one, we can
- * just wait until the next CS event before
- * queuing more. In either case we will force a
- * lite-restore preemption event, but if we wait
- * we hopefully coalesce several updates into a single
- * submission.
- */
- if (!list_is_last(&last->sched.link,
- &engine->active.requests)) {
- /*
- * Even if ELSP[1] is occupied and not worthy
- * of timeslices, our queue might be.
- */
- start_timeslice(engine, queue_prio(execlists));
- return;
- }
- }
- }
-
- while (rb) { /* XXX virtual is always taking precedence */
- struct virtual_engine *ve =
- rb_entry(rb, typeof(*ve), nodes[engine->id].rb);
- struct i915_request *rq;
-
- spin_lock(&ve->base.active.lock);
-
- rq = ve->request;
- if (unlikely(!rq)) { /* lost the race to a sibling */
- spin_unlock(&ve->base.active.lock);
- rb_erase_cached(rb, &execlists->virtual);
- RB_CLEAR_NODE(rb);
- rb = rb_first_cached(&execlists->virtual);
- continue;
- }
+ bool inhibit = true;
- GEM_BUG_ON(rq != ve->request);
- GEM_BUG_ON(rq->engine != &ve->base);
- GEM_BUG_ON(rq->context != &ve->context);
-
- if (rq_prio(rq) >= queue_prio(execlists)) {
- if (!virtual_matches(ve, rq, engine)) {
- spin_unlock(&ve->base.active.lock);
- rb = rb_next(rb);
- continue;
- }
-
- if (last && !can_merge_rq(last, rq)) {
- spin_unlock(&ve->base.active.lock);
- start_timeslice(engine, rq_prio(rq));
- return; /* leave this for another sibling */
- }
-
- ENGINE_TRACE(engine,
- "virtual rq=%llx:%lld%s, new engine? %s\n",
- rq->fence.context,
- rq->fence.seqno,
- i915_request_completed(rq) ? "!" :
- i915_request_started(rq) ? "*" :
- "",
- yesno(engine != ve->siblings[0]));
-
- WRITE_ONCE(ve->request, NULL);
- WRITE_ONCE(ve->base.execlists.queue_priority_hint,
- INT_MIN);
- rb_erase_cached(rb, &execlists->virtual);
- RB_CLEAR_NODE(rb);
-
- GEM_BUG_ON(!(rq->execution_mask & engine->mask));
- WRITE_ONCE(rq->engine, engine);
-
- if (__i915_request_submit(rq)) {
- /*
- * Only after we confirm that we will submit
- * this request (i.e. it has not already
- * completed), do we want to update the context.
- *
- * This serves two purposes. It avoids
- * unnecessary work if we are resubmitting an
- * already completed request after timeslicing.
- * But more importantly, it prevents us altering
- * ve->siblings[] on an idle context, where
- * we may be using ve->siblings[] in
- * virtual_context_enter / virtual_context_exit.
- */
- virtual_xfer_context(ve, engine);
- GEM_BUG_ON(ve->siblings[0] != engine);
-
- submit = true;
- last = rq;
- }
- i915_request_put(rq);
-
- /*
- * Hmm, we have a bunch of virtual engine requests,
- * but the first one was already completed (thanks
- * preempt-to-busy!). Keep looking at the veng queue
- * until we have no more relevant requests (i.e.
- * the normal submit queue has higher priority).
- */
- if (!submit) {
- spin_unlock(&ve->base.active.lock);
- rb = rb_first_cached(&execlists->virtual);
- continue;
- }
- }
+ set_redzone(state, engine);
- spin_unlock(&ve->base.active.lock);
- break;
+ if (engine->default_state) {
+ shmem_read(engine->default_state, 0,
+ state, engine->context_size);
+ __set_bit(CONTEXT_VALID_BIT, &ce->flags);
+ inhibit = false;
}
- while ((rb = rb_first_cached(&execlists->queue))) {
- struct i915_priolist *p = to_priolist(rb);
- struct i915_request *rq, *rn;
- int i;
-
- priolist_for_each_request_consume(rq, rn, p, i) {
- bool merge = true;
-
- /*
- * Can we combine this request with the current port?
- * It has to be the same context/ringbuffer and not
- * have any exceptions (e.g. GVT saying never to
- * combine contexts).
- *
- * If we can combine the requests, we can execute both
- * by updating the RING_TAIL to point to the end of the
- * second request, and so we never need to tell the
- * hardware about the first.
- */
- if (last && !can_merge_rq(last, rq)) {
- /*
- * If we are on the second port and cannot
- * combine this request with the last, then we
- * are done.
- */
- if (port == last_port)
- goto done;
-
- /*
- * We must not populate both ELSP[] with the
- * same LRCA, i.e. we must submit 2 different
- * contexts if we submit 2 ELSP.
- */
- if (last->context == rq->context)
- goto done;
-
- if (i915_request_has_sentinel(last))
- goto done;
-
- /*
- * If GVT overrides us we only ever submit
- * port[0], leaving port[1] empty. Note that we
- * also have to be careful that we don't queue
- * the same context (even though a different
- * request) to the second port.
- */
- if (ctx_single_port_submission(last->context) ||
- ctx_single_port_submission(rq->context))
- goto done;
-
- merge = false;
- }
-
- if (__i915_request_submit(rq)) {
- if (!merge) {
- *port = execlists_schedule_in(last, port - execlists->pending);
- port++;
- last = NULL;
- }
-
- GEM_BUG_ON(last &&
- !can_merge_ctx(last->context,
- rq->context));
- GEM_BUG_ON(last &&
- i915_seqno_passed(last->fence.seqno,
- rq->fence.seqno));
-
- submit = true;
- last = rq;
- }
- }
-
- rb_erase_cached(&p->node, &execlists->queue);
- i915_priolist_free(p);
- }
+ /* Clear the ppHWSP (inc. per-context counters) */
+ memset(state, 0, PAGE_SIZE);
-done:
/*
- * Here be a bit of magic! Or sleight-of-hand, whichever you prefer.
- *
- * We choose the priority hint such that if we add a request of greater
- * priority than this, we kick the submission tasklet to decide on
- * the right order of submitting the requests to hardware. We must
- * also be prepared to reorder requests as they are in-flight on the
- * HW. We derive the priority hint then as the first "hole" in
- * the HW submission ports and if there are no available slots,
- * the priority of the lowest executing request, i.e. last.
- *
- * When we do receive a higher priority request ready to run from the
- * user, see queue_request(), the priority hint is bumped to that
- * request triggering preemption on the next dequeue (or subsequent
- * interrupt for secondary ports).
+ * The second page of the context object contains some registers which
+ * must be set up prior to the first execution.
*/
- execlists->queue_priority_hint = queue_prio(execlists);
-
- if (submit) {
- *port = execlists_schedule_in(last, port - execlists->pending);
- execlists->switch_priority_hint =
- switch_prio(engine, *execlists->pending);
-
- /*
- * Skip if we ended up with exactly the same set of requests,
- * e.g. trying to timeslice a pair of ordered contexts
- */
- if (!memcmp(active, execlists->pending,
- (port - execlists->pending + 1) * sizeof(*port))) {
- do
- execlists_schedule_out(fetch_and_zero(port));
- while (port-- != execlists->pending);
-
- goto skip_submit;
- }
- clear_ports(port + 1, last_port - port);
-
- WRITE_ONCE(execlists->yield, -1);
- set_preempt_timeout(engine, *active);
- execlists_submit_ports(engine);
- } else {
- start_timeslice(engine, execlists->queue_priority_hint);
-skip_submit:
- ring_set_paused(engine, 0);
- }
+ __lrc_init_regs(state + LRC_STATE_OFFSET, ce, engine, inhibit);
}
-static void
-cancel_port_requests(struct intel_engine_execlists * const execlists)
+static struct i915_vma *
+__lrc_alloc_state(struct intel_context *ce, struct intel_engine_cs *engine)
{
- struct i915_request * const *port;
-
- for (port = execlists->pending; *port; port++)
- execlists_schedule_out(*port);
- clear_ports(execlists->pending, ARRAY_SIZE(execlists->pending));
-
- /* Mark the end of active before we overwrite *active */
- for (port = xchg(&execlists->active, execlists->pending); *port; port++)
- execlists_schedule_out(*port);
- clear_ports(execlists->inflight, ARRAY_SIZE(execlists->inflight));
-
- smp_wmb(); /* complete the seqlock for execlists_active() */
- WRITE_ONCE(execlists->active, execlists->inflight);
-}
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ u32 context_size;
-static inline void
-invalidate_csb_entries(const u64 *first, const u64 *last)
-{
- clflush((void *)first);
- clflush((void *)last);
-}
+ context_size = round_up(engine->context_size, I915_GTT_PAGE_SIZE);
-/*
- * Starting with Gen12, the status has a new format:
- *
- * bit 0: switched to new queue
- * bit 1: reserved
- * bit 2: semaphore wait mode (poll or signal), only valid when
- * switch detail is set to "wait on semaphore"
- * bits 3-5: engine class
- * bits 6-11: engine instance
- * bits 12-14: reserved
- * bits 15-25: sw context id of the lrc the GT switched to
- * bits 26-31: sw counter of the lrc the GT switched to
- * bits 32-35: context switch detail
- * - 0: ctx complete
- * - 1: wait on sync flip
- * - 2: wait on vblank
- * - 3: wait on scanline
- * - 4: wait on semaphore
- * - 5: context preempted (not on SEMAPHORE_WAIT or
- * WAIT_FOR_EVENT)
- * bit 36: reserved
- * bits 37-43: wait detail (for switch detail 1 to 4)
- * bits 44-46: reserved
- * bits 47-57: sw context id of the lrc the GT switched away from
- * bits 58-63: sw counter of the lrc the GT switched away from
- */
-static inline bool gen12_csb_parse(const u64 csb)
-{
- bool ctx_away_valid = GEN12_CSB_CTX_VALID(upper_32_bits(csb));
- bool new_queue =
- lower_32_bits(csb) & GEN12_CTX_STATUS_SWITCHED_TO_NEW_QUEUE;
+ if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
+ context_size += I915_GTT_PAGE_SIZE; /* for redzone */
- /*
- * The context switch detail is not guaranteed to be 5 when a preemption
- * occurs, so we can't just check for that. The check below works for
- * all the cases we care about, including preemptions of WAIT
- * instructions and lite-restore. Preempt-to-idle via the CTRL register
- * would require some extra handling, but we don't support that.
- */
- if (!ctx_away_valid || new_queue) {
- GEM_BUG_ON(!GEN12_CSB_CTX_VALID(lower_32_bits(csb)));
- return true;
+ if (INTEL_GEN(engine->i915) == 12) {
+ ce->wa_bb_page = context_size / PAGE_SIZE;
+ context_size += PAGE_SIZE;
}
- /*
- * switch detail = 5 is covered by the case above and we do not expect a
- * context switch on an unsuccessful wait instruction since we always
- * use polling mode.
- */
- GEM_BUG_ON(GEN12_CTX_SWITCH_DETAIL(upper_32_bits(csb)));
- return false;
-}
-
-static inline bool gen8_csb_parse(const u64 csb)
-{
- return csb & (GEN8_CTX_STATUS_IDLE_ACTIVE | GEN8_CTX_STATUS_PREEMPTED);
-}
-
-static noinline u64
-wa_csb_read(const struct intel_engine_cs *engine, u64 * const csb)
-{
- u64 entry;
-
- /*
- * Reading from the HWSP has one particular advantage: we can detect
- * a stale entry. Since the write into HWSP is broken, we have no reason
- * to trust the HW at all, the mmio entry may equally be unordered, so
- * we prefer the path that is self-checking and as a last resort,
- * return the mmio value.
- *
- * tgl,dg1:HSDES#22011327657
- */
- preempt_disable();
- if (wait_for_atomic_us((entry = READ_ONCE(*csb)) != -1, 10)) {
- int idx = csb - engine->execlists.csb_status;
- int status;
-
- status = GEN8_EXECLISTS_STATUS_BUF;
- if (idx >= 6) {
- status = GEN11_EXECLISTS_STATUS_BUF2;
- idx -= 6;
- }
- status += sizeof(u64) * idx;
+ obj = i915_gem_object_create_shmem(engine->i915, context_size);
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
- entry = intel_uncore_read64(engine->uncore,
- _MMIO(engine->mmio_base + status));
+ vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL);
+ if (IS_ERR(vma)) {
+ i915_gem_object_put(obj);
+ return vma;
}
- preempt_enable();
-
- return entry;
-}
-static inline u64
-csb_read(const struct intel_engine_cs *engine, u64 * const csb)
-{
- u64 entry = READ_ONCE(*csb);
-
- /*
- * Unfortunately, the GPU does not always serialise its write
- * of the CSB entries before its write of the CSB pointer, at least
- * from the perspective of the CPU, using what is known as a Global
- * Observation Point. We may read a new CSB tail pointer, but then
- * read the stale CSB entries, causing us to misinterpret the
- * context-switch events, and eventually declare the GPU hung.
- *
- * icl:HSDES#1806554093
- * tgl:HSDES#22011248461
- */
- if (unlikely(entry == -1))
- entry = wa_csb_read(engine, csb);
-
- /* Consume this entry so that we can spot its future reuse. */
- WRITE_ONCE(*csb, -1);
-
- /* ELSP is an implicit wmb() before the GPU wraps and overwrites csb */
- return entry;
+ return vma;
}
-static void process_csb(struct intel_engine_cs *engine)
+static struct intel_timeline *
+pinned_timeline(struct intel_context *ce, struct intel_engine_cs *engine)
{
- struct intel_engine_execlists * const execlists = &engine->execlists;
- u64 * const buf = execlists->csb_status;
- const u8 num_entries = execlists->csb_size;
- u8 head, tail;
-
- /*
- * As we modify our execlists state tracking we require exclusive
- * access. Either we are inside the tasklet, or the tasklet is disabled
- * and we assume that is only inside the reset paths and so serialised.
- */
- GEM_BUG_ON(!tasklet_is_locked(&execlists->tasklet) &&
- !reset_in_progress(execlists));
- GEM_BUG_ON(!intel_engine_in_execlists_submission_mode(engine));
-
- /*
- * Note that csb_write, csb_status may be either in HWSP or mmio.
- * When reading from the csb_write mmio register, we have to be
- * careful to only use the GEN8_CSB_WRITE_PTR portion, which is
- * the low 4bits. As it happens we know the next 4bits are always
- * zero and so we can simply masked off the low u8 of the register
- * and treat it identically to reading from the HWSP (without having
- * to use explicit shifting and masking, and probably bifurcating
- * the code to handle the legacy mmio read).
- */
- head = execlists->csb_head;
- tail = READ_ONCE(*execlists->csb_write);
- if (unlikely(head == tail))
- return;
-
- /*
- * We will consume all events from HW, or at least pretend to.
- *
- * The sequence of events from the HW is deterministic, and derived
- * from our writes to the ELSP, with a smidgen of variability for
- * the arrival of the asynchronous requests wrt to the inflight
- * execution. If the HW sends an event that does not correspond with
- * the one we are expecting, we have to abandon all hope as we lose
- * all tracking of what the engine is actually executing. We will
- * only detect we are out of sequence with the HW when we get an
- * 'impossible' event because we have already drained our own
- * preemption/promotion queue. If this occurs, we know that we likely
- * lost track of execution earlier and must unwind and restart, the
- * simplest way is by stop processing the event queue and force the
- * engine to reset.
- */
- execlists->csb_head = tail;
- ENGINE_TRACE(engine, "cs-irq head=%d, tail=%d\n", head, tail);
-
- /*
- * Hopefully paired with a wmb() in HW!
- *
- * We must complete the read of the write pointer before any reads
- * from the CSB, so that we do not see stale values. Without an rmb
- * (lfence) the HW may speculatively perform the CSB[] reads *before*
- * we perform the READ_ONCE(*csb_write).
- */
- rmb();
- do {
- bool promote;
- u64 csb;
-
- if (++head == num_entries)
- head = 0;
-
- /*
- * We are flying near dragons again.
- *
- * We hold a reference to the request in execlist_port[]
- * but no more than that. We are operating in softirq
- * context and so cannot hold any mutex or sleep. That
- * prevents us stopping the requests we are processing
- * in port[] from being retired simultaneously (the
- * breadcrumb will be complete before we see the
- * context-switch). As we only hold the reference to the
- * request, any pointer chasing underneath the request
- * is subject to a potential use-after-free. Thus we
- * store all of the bookkeeping within port[] as
- * required, and avoid using unguarded pointers beneath
- * request itself. The same applies to the atomic
- * status notifier.
- */
-
- csb = csb_read(engine, buf + head);
- ENGINE_TRACE(engine, "csb[%d]: status=0x%08x:0x%08x\n",
- head, upper_32_bits(csb), lower_32_bits(csb));
-
- if (INTEL_GEN(engine->i915) >= 12)
- promote = gen12_csb_parse(csb);
- else
- promote = gen8_csb_parse(csb);
- if (promote) {
- struct i915_request * const *old = execlists->active;
-
- if (GEM_WARN_ON(!*execlists->pending)) {
- execlists->error_interrupt |= ERROR_CSB;
- break;
- }
-
- ring_set_paused(engine, 0);
-
- /* Point active to the new ELSP; prevent overwriting */
- WRITE_ONCE(execlists->active, execlists->pending);
- smp_wmb(); /* notify execlists_active() */
-
- /* cancel old inflight, prepare for switch */
- trace_ports(execlists, "preempted", old);
- while (*old)
- execlists_schedule_out(*old++);
-
- /* switch pending to inflight */
- GEM_BUG_ON(!assert_pending_valid(execlists, "promote"));
- copy_ports(execlists->inflight,
- execlists->pending,
- execlists_num_ports(execlists));
- smp_wmb(); /* complete the seqlock */
- WRITE_ONCE(execlists->active, execlists->inflight);
-
- /* XXX Magic delay for tgl */
- ENGINE_POSTING_READ(engine, RING_CONTEXT_STATUS_PTR);
-
- WRITE_ONCE(execlists->pending[0], NULL);
- } else {
- if (GEM_WARN_ON(!*execlists->active)) {
- execlists->error_interrupt |= ERROR_CSB;
- break;
- }
-
- /* port0 completed, advanced to port1 */
- trace_ports(execlists, "completed", execlists->active);
-
- /*
- * We rely on the hardware being strongly
- * ordered, that the breadcrumb write is
- * coherent (visible from the CPU) before the
- * user interrupt is processed. One might assume
- * that the breadcrumb write being before the
- * user interrupt and the CS event for the context
- * switch would therefore be before the CS event
- * itself...
- */
- if (GEM_SHOW_DEBUG() &&
- !i915_request_completed(*execlists->active)) {
- struct i915_request *rq = *execlists->active;
- const u32 *regs __maybe_unused =
- rq->context->lrc_reg_state;
-
- ENGINE_TRACE(engine,
- "context completed before request!\n");
- ENGINE_TRACE(engine,
- "ring:{start:0x%08x, head:%04x, tail:%04x, ctl:%08x, mode:%08x}\n",
- ENGINE_READ(engine, RING_START),
- ENGINE_READ(engine, RING_HEAD) & HEAD_ADDR,
- ENGINE_READ(engine, RING_TAIL) & TAIL_ADDR,
- ENGINE_READ(engine, RING_CTL),
- ENGINE_READ(engine, RING_MI_MODE));
- ENGINE_TRACE(engine,
- "rq:{start:%08x, head:%04x, tail:%04x, seqno:%llx:%d, hwsp:%d}, ",
- i915_ggtt_offset(rq->ring->vma),
- rq->head, rq->tail,
- rq->fence.context,
- lower_32_bits(rq->fence.seqno),
- hwsp_seqno(rq));
- ENGINE_TRACE(engine,
- "ctx:{start:%08x, head:%04x, tail:%04x}, ",
- regs[CTX_RING_START],
- regs[CTX_RING_HEAD],
- regs[CTX_RING_TAIL]);
- }
-
- execlists_schedule_out(*execlists->active++);
-
- GEM_BUG_ON(execlists->active - execlists->inflight >
- execlists_num_ports(execlists));
- }
- } while (head != tail);
-
- set_timeslice(engine);
-
- /*
- * Gen11 has proven to fail wrt global observation point between
- * entry and tail update, failing on the ordering and thus
- * we see an old entry in the context status buffer.
- *
- * Forcibly evict out entries for the next gpu csb update,
- * to increase the odds that we get a fresh entries with non
- * working hardware. The cost for doing so comes out mostly with
- * the wash as hardware, working or not, will need to do the
- * invalidation before.
- */
- invalidate_csb_entries(&buf[0], &buf[num_entries - 1]);
-}
+ struct intel_timeline *tl = fetch_and_zero(&ce->timeline);
-static void __execlists_submission_tasklet(struct intel_engine_cs *const engine)
-{
- lockdep_assert_held(&engine->active.lock);
- if (!READ_ONCE(engine->execlists.pending[0])) {
- rcu_read_lock(); /* protect peeking at execlists->active */
- execlists_dequeue(engine);
- rcu_read_unlock();
- }
+ return intel_timeline_create_from_engine(engine, page_unmask_bits(tl));
}
-static void __execlists_hold(struct i915_request *rq)
+int lrc_alloc(struct intel_context *ce, struct intel_engine_cs *engine)
{
- LIST_HEAD(list);
-
- do {
- struct i915_dependency *p;
-
- if (i915_request_is_active(rq))
- __i915_request_unsubmit(rq);
-
- clear_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
- list_move_tail(&rq->sched.link, &rq->engine->active.hold);
- i915_request_set_hold(rq);
- RQ_TRACE(rq, "on hold\n");
-
- for_each_waiter(p, rq) {
- struct i915_request *w =
- container_of(p->waiter, typeof(*w), sched);
-
- /* Leave semaphores spinning on the other engines */
- if (w->engine != rq->engine)
- continue;
-
- if (!i915_request_is_ready(w))
- continue;
-
- if (i915_request_completed(w))
- continue;
-
- if (i915_request_on_hold(w))
- continue;
-
- list_move_tail(&w->sched.link, &list);
- }
-
- rq = list_first_entry_or_null(&list, typeof(*rq), sched.link);
- } while (rq);
-}
+ struct intel_ring *ring;
+ struct i915_vma *vma;
+ int err;
-static bool execlists_hold(struct intel_engine_cs *engine,
- struct i915_request *rq)
-{
- if (i915_request_on_hold(rq))
- return false;
+ GEM_BUG_ON(ce->state);
- spin_lock_irq(&engine->active.lock);
+ vma = __lrc_alloc_state(ce, engine);
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
- if (i915_request_completed(rq)) { /* too late! */
- rq = NULL;
- goto unlock;
+ ring = intel_engine_create_ring(engine, (unsigned long)ce->ring);
+ if (IS_ERR(ring)) {
+ err = PTR_ERR(ring);
+ goto err_vma;
}
- if (rq->engine != engine) { /* preempted virtual engine */
- struct virtual_engine *ve = to_virtual_engine(rq->engine);
-
- /*
- * intel_context_inflight() is only protected by virtue
- * of process_csb() being called only by the tasklet (or
- * directly from inside reset while the tasklet is suspended).
- * Assert that neither of those are allowed to run while we
- * poke at the request queues.
- */
- GEM_BUG_ON(!reset_in_progress(&engine->execlists));
+ if (!page_mask_bits(ce->timeline)) {
+ struct intel_timeline *tl;
/*
- * An unsubmitted request along a virtual engine will
- * remain on the active (this) engine until we are able
- * to process the context switch away (and so mark the
- * context as no longer in flight). That cannot have happened
- * yet, otherwise we would not be hanging!
+ * Use the static global HWSP for the kernel context, and
+ * a dynamically allocated cacheline for everyone else.
*/
- spin_lock(&ve->base.active.lock);
- GEM_BUG_ON(intel_context_inflight(rq->context) != engine);
- GEM_BUG_ON(ve->request != rq);
- ve->request = NULL;
- spin_unlock(&ve->base.active.lock);
- i915_request_put(rq);
-
- rq->engine = engine;
- }
-
- /*
- * Transfer this request onto the hold queue to prevent it
- * being resumbitted to HW (and potentially completed) before we have
- * released it. Since we may have already submitted following
- * requests, we need to remove those as well.
- */
- GEM_BUG_ON(i915_request_on_hold(rq));
- GEM_BUG_ON(rq->engine != engine);
- __execlists_hold(rq);
- GEM_BUG_ON(list_empty(&engine->active.hold));
-
-unlock:
- spin_unlock_irq(&engine->active.lock);
- return rq;
-}
-
-static bool hold_request(const struct i915_request *rq)
-{
- struct i915_dependency *p;
- bool result = false;
-
- /*
- * If one of our ancestors is on hold, we must also be on hold,
- * otherwise we will bypass it and execute before it.
- */
- rcu_read_lock();
- for_each_signaler(p, rq) {
- const struct i915_request *s =
- container_of(p->signaler, typeof(*s), sched);
-
- if (s->engine != rq->engine)
- continue;
-
- result = i915_request_on_hold(s);
- if (result)
- break;
- }
- rcu_read_unlock();
-
- return result;
-}
-
-static void __execlists_unhold(struct i915_request *rq)
-{
- LIST_HEAD(list);
-
- do {
- struct i915_dependency *p;
-
- RQ_TRACE(rq, "hold release\n");
-
- GEM_BUG_ON(!i915_request_on_hold(rq));
- GEM_BUG_ON(!i915_sw_fence_signaled(&rq->submit));
-
- i915_request_clear_hold(rq);
- list_move_tail(&rq->sched.link,
- i915_sched_lookup_priolist(rq->engine,
- rq_prio(rq)));
- set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
-
- /* Also release any children on this engine that are ready */
- for_each_waiter(p, rq) {
- struct i915_request *w =
- container_of(p->waiter, typeof(*w), sched);
-
- /* Propagate any change in error status */
- if (rq->fence.error)
- i915_request_set_error_once(w, rq->fence.error);
-
- if (w->engine != rq->engine)
- continue;
-
- if (!i915_request_on_hold(w))
- continue;
-
- /* Check that no other parents are also on hold */
- if (hold_request(w))
- continue;
-
- list_move_tail(&w->sched.link, &list);
- }
-
- rq = list_first_entry_or_null(&list, typeof(*rq), sched.link);
- } while (rq);
-}
-
-static void execlists_unhold(struct intel_engine_cs *engine,
- struct i915_request *rq)
-{
- spin_lock_irq(&engine->active.lock);
-
- /*
- * Move this request back to the priority queue, and all of its
- * children and grandchildren that were suspended along with it.
- */
- __execlists_unhold(rq);
-
- if (rq_prio(rq) > engine->execlists.queue_priority_hint) {
- engine->execlists.queue_priority_hint = rq_prio(rq);
- tasklet_hi_schedule(&engine->execlists.tasklet);
- }
-
- spin_unlock_irq(&engine->active.lock);
-}
-
-struct execlists_capture {
- struct work_struct work;
- struct i915_request *rq;
- struct i915_gpu_coredump *error;
-};
-
-static void execlists_capture_work(struct work_struct *work)
-{
- struct execlists_capture *cap = container_of(work, typeof(*cap), work);
- const gfp_t gfp = GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN;
- struct intel_engine_cs *engine = cap->rq->engine;
- struct intel_gt_coredump *gt = cap->error->gt;
- struct intel_engine_capture_vma *vma;
-
- /* Compress all the objects attached to the request, slow! */
- vma = intel_engine_coredump_add_request(gt->engine, cap->rq, gfp);
- if (vma) {
- struct i915_vma_compress *compress =
- i915_vma_capture_prepare(gt);
-
- intel_engine_coredump_add_vma(gt->engine, vma, compress);
- i915_vma_capture_finish(gt, compress);
- }
-
- gt->simulated = gt->engine->simulated;
- cap->error->simulated = gt->simulated;
-
- /* Publish the error state, and announce it to the world */
- i915_error_state_store(cap->error);
- i915_gpu_coredump_put(cap->error);
-
- /* Return this request and all that depend upon it for signaling */
- execlists_unhold(engine, cap->rq);
- i915_request_put(cap->rq);
-
- kfree(cap);
-}
-
-static struct execlists_capture *capture_regs(struct intel_engine_cs *engine)
-{
- const gfp_t gfp = GFP_ATOMIC | __GFP_NOWARN;
- struct execlists_capture *cap;
-
- cap = kmalloc(sizeof(*cap), gfp);
- if (!cap)
- return NULL;
-
- cap->error = i915_gpu_coredump_alloc(engine->i915, gfp);
- if (!cap->error)
- goto err_cap;
-
- cap->error->gt = intel_gt_coredump_alloc(engine->gt, gfp);
- if (!cap->error->gt)
- goto err_gpu;
-
- cap->error->gt->engine = intel_engine_coredump_alloc(engine, gfp);
- if (!cap->error->gt->engine)
- goto err_gt;
-
- cap->error->gt->engine->hung = true;
-
- return cap;
-
-err_gt:
- kfree(cap->error->gt);
-err_gpu:
- kfree(cap->error);
-err_cap:
- kfree(cap);
- return NULL;
-}
-
-static struct i915_request *
-active_context(struct intel_engine_cs *engine, u32 ccid)
-{
- const struct intel_engine_execlists * const el = &engine->execlists;
- struct i915_request * const *port, *rq;
-
- /*
- * Use the most recent result from process_csb(), but just in case
- * we trigger an error (via interrupt) before the first CS event has
- * been written, peek at the next submission.
- */
-
- for (port = el->active; (rq = *port); port++) {
- if (rq->context->lrc.ccid == ccid) {
- ENGINE_TRACE(engine,
- "ccid found at active:%zd\n",
- port - el->active);
- return rq;
- }
- }
-
- for (port = el->pending; (rq = *port); port++) {
- if (rq->context->lrc.ccid == ccid) {
- ENGINE_TRACE(engine,
- "ccid found at pending:%zd\n",
- port - el->pending);
- return rq;
+ if (unlikely(ce->timeline))
+ tl = pinned_timeline(ce, engine);
+ else
+ tl = intel_timeline_create(engine->gt);
+ if (IS_ERR(tl)) {
+ err = PTR_ERR(tl);
+ goto err_ring;
}
- }
-
- ENGINE_TRACE(engine, "ccid:%x not found\n", ccid);
- return NULL;
-}
-
-static u32 active_ccid(struct intel_engine_cs *engine)
-{
- return ENGINE_READ_FW(engine, RING_EXECLIST_STATUS_HI);
-}
-
-static void execlists_capture(struct intel_engine_cs *engine)
-{
- struct execlists_capture *cap;
-
- if (!IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR))
- return;
-
- /*
- * We need to _quickly_ capture the engine state before we reset.
- * We are inside an atomic section (softirq) here and we are delaying
- * the forced preemption event.
- */
- cap = capture_regs(engine);
- if (!cap)
- return;
- spin_lock_irq(&engine->active.lock);
- cap->rq = active_context(engine, active_ccid(engine));
- if (cap->rq) {
- cap->rq = active_request(cap->rq->context->timeline, cap->rq);
- cap->rq = i915_request_get_rcu(cap->rq);
+ ce->timeline = tl;
}
- spin_unlock_irq(&engine->active.lock);
- if (!cap->rq)
- goto err_free;
-
- /*
- * Remove the request from the execlists queue, and take ownership
- * of the request. We pass it to our worker who will _slowly_ compress
- * all the pages the _user_ requested for debugging their batch, after
- * which we return it to the queue for signaling.
- *
- * By removing them from the execlists queue, we also remove the
- * requests from being processed by __unwind_incomplete_requests()
- * during the intel_engine_reset(), and so they will *not* be replayed
- * afterwards.
- *
- * Note that because we have not yet reset the engine at this point,
- * it is possible for the request that we have identified as being
- * guilty, did in fact complete and we will then hit an arbitration
- * point allowing the outstanding preemption to succeed. The likelihood
- * of that is very low (as capturing of the engine registers should be
- * fast enough to run inside an irq-off atomic section!), so we will
- * simply hold that request accountable for being non-preemptible
- * long enough to force the reset.
- */
- if (!execlists_hold(engine, cap->rq))
- goto err_rq;
-
- INIT_WORK(&cap->work, execlists_capture_work);
- schedule_work(&cap->work);
- return;
-
-err_rq:
- i915_request_put(cap->rq);
-err_free:
- i915_gpu_coredump_put(cap->error);
- kfree(cap);
-}
-
-static void execlists_reset(struct intel_engine_cs *engine, const char *msg)
-{
- const unsigned int bit = I915_RESET_ENGINE + engine->id;
- unsigned long *lock = &engine->gt->reset.flags;
- if (!intel_has_reset_engine(engine->gt))
- return;
-
- if (test_and_set_bit(bit, lock))
- return;
-
- ENGINE_TRACE(engine, "reset for %s\n", msg);
-
- /* Mark this tasklet as disabled to avoid waiting for it to complete */
- tasklet_disable_nosync(&engine->execlists.tasklet);
+ ce->ring = ring;
+ ce->state = vma;
- ring_set_paused(engine, 1); /* Freeze the current request in place */
- execlists_capture(engine);
- intel_engine_reset(engine, msg);
+ return 0;
- tasklet_enable(&engine->execlists.tasklet);
- clear_and_wake_up_bit(bit, lock);
+err_ring:
+ intel_ring_put(ring);
+err_vma:
+ i915_vma_put(vma);
+ return err;
}
-static bool preempt_timeout(const struct intel_engine_cs *const engine)
+void lrc_reset(struct intel_context *ce)
{
- const struct timer_list *t = &engine->execlists.preempt;
-
- if (!CONFIG_DRM_I915_PREEMPT_TIMEOUT)
- return false;
+ GEM_BUG_ON(!intel_context_is_pinned(ce));
- if (!timer_expired(t))
- return false;
+ intel_ring_reset(ce->ring, ce->ring->emit);
- return READ_ONCE(engine->execlists.pending[0]);
+ /* Scrub away the garbage */
+ lrc_init_regs(ce, ce->engine, true);
+ ce->lrc.lrca = lrc_update_regs(ce, ce->engine, ce->ring->tail);
}
-/*
- * Check the unread Context Status Buffers and manage the submission of new
- * contexts to the ELSP accordingly.
- */
-static void execlists_submission_tasklet(unsigned long data)
+int
+lrc_pre_pin(struct intel_context *ce,
+ struct intel_engine_cs *engine,
+ struct i915_gem_ww_ctx *ww,
+ void **vaddr)
{
- struct intel_engine_cs * const engine = (struct intel_engine_cs *)data;
- bool timeout = preempt_timeout(engine);
-
- process_csb(engine);
-
- if (unlikely(READ_ONCE(engine->execlists.error_interrupt))) {
- const char *msg;
-
- /* Generate the error message in priority wrt to the user! */
- if (engine->execlists.error_interrupt & GENMASK(15, 0))
- msg = "CS error"; /* thrown by a user payload */
- else if (engine->execlists.error_interrupt & ERROR_CSB)
- msg = "invalid CSB event";
- else
- msg = "internal error";
-
- engine->execlists.error_interrupt = 0;
- execlists_reset(engine, msg);
- }
-
- if (!READ_ONCE(engine->execlists.pending[0]) || timeout) {
- unsigned long flags;
+ GEM_BUG_ON(!ce->state);
+ GEM_BUG_ON(!i915_vma_is_pinned(ce->state));
- spin_lock_irqsave(&engine->active.lock, flags);
- __execlists_submission_tasklet(engine);
- spin_unlock_irqrestore(&engine->active.lock, flags);
+ *vaddr = i915_gem_object_pin_map(ce->state->obj,
+ i915_coherent_map_type(ce->engine->i915) |
+ I915_MAP_OVERRIDE);
- /* Recheck after serialising with direct-submission */
- if (unlikely(timeout && preempt_timeout(engine))) {
- cancel_timer(&engine->execlists.preempt);
- execlists_reset(engine, "preemption time out");
- }
- }
+ return PTR_ERR_OR_ZERO(*vaddr);
}
-static void __execlists_kick(struct intel_engine_execlists *execlists)
+int
+lrc_pin(struct intel_context *ce,
+ struct intel_engine_cs *engine,
+ void *vaddr)
{
- /* Kick the tasklet for some interrupt coalescing and reset handling */
- tasklet_hi_schedule(&execlists->tasklet);
-}
+ ce->lrc_reg_state = vaddr + LRC_STATE_OFFSET;
-#define execlists_kick(t, member) \
- __execlists_kick(container_of(t, struct intel_engine_execlists, member))
+ if (!__test_and_set_bit(CONTEXT_INIT_BIT, &ce->flags))
+ lrc_init_state(ce, engine, vaddr);
-static void execlists_timeslice(struct timer_list *timer)
-{
- execlists_kick(timer, timer);
-}
-
-static void execlists_preempt(struct timer_list *timer)
-{
- execlists_kick(timer, preempt);
+ ce->lrc.lrca = lrc_update_regs(ce, engine, ce->ring->tail);
+ return 0;
}
-static void queue_request(struct intel_engine_cs *engine,
- struct i915_request *rq)
+void lrc_unpin(struct intel_context *ce)
{
- GEM_BUG_ON(!list_empty(&rq->sched.link));
- list_add_tail(&rq->sched.link,
- i915_sched_lookup_priolist(engine, rq_prio(rq)));
- set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
+ check_redzone((void *)ce->lrc_reg_state - LRC_STATE_OFFSET,
+ ce->engine);
}
-static void __submit_queue_imm(struct intel_engine_cs *engine)
+void lrc_post_unpin(struct intel_context *ce)
{
- struct intel_engine_execlists * const execlists = &engine->execlists;
-
- if (reset_in_progress(execlists))
- return; /* defer until we restart the engine following reset */
-
- __execlists_submission_tasklet(engine);
+ i915_gem_object_unpin_map(ce->state->obj);
}
-static void submit_queue(struct intel_engine_cs *engine,
- const struct i915_request *rq)
+void lrc_fini(struct intel_context *ce)
{
- struct intel_engine_execlists *execlists = &engine->execlists;
-
- if (rq_prio(rq) <= execlists->queue_priority_hint)
+ if (!ce->state)
return;
- execlists->queue_priority_hint = rq_prio(rq);
- __submit_queue_imm(engine);
+ intel_ring_put(fetch_and_zero(&ce->ring));
+ i915_vma_put(fetch_and_zero(&ce->state));
}
-static bool ancestor_on_hold(const struct intel_engine_cs *engine,
- const struct i915_request *rq)
-{
- GEM_BUG_ON(i915_request_on_hold(rq));
- return !list_empty(&engine->active.hold) && hold_request(rq);
-}
-
-static void flush_csb(struct intel_engine_cs *engine)
-{
- struct intel_engine_execlists *el = &engine->execlists;
-
- if (READ_ONCE(el->pending[0]) && tasklet_trylock(&el->tasklet)) {
- if (!reset_in_progress(el))
- process_csb(engine);
- tasklet_unlock(&el->tasklet);
- }
-}
-
-static void execlists_submit_request(struct i915_request *request)
-{
- struct intel_engine_cs *engine = request->engine;
- unsigned long flags;
-
- /* Hopefully we clear execlists->pending[] to let us through */
- flush_csb(engine);
-
- /* Will be called from irq-context when using foreign fences. */
- spin_lock_irqsave(&engine->active.lock, flags);
-
- if (unlikely(ancestor_on_hold(engine, request))) {
- RQ_TRACE(request, "ancestor on hold\n");
- list_add_tail(&request->sched.link, &engine->active.hold);
- i915_request_set_hold(request);
- } else {
- queue_request(engine, request);
-
- GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root));
- GEM_BUG_ON(list_empty(&request->sched.link));
-
- submit_queue(engine, request);
- }
-
- spin_unlock_irqrestore(&engine->active.lock, flags);
-}
-
-static void __execlists_context_fini(struct intel_context *ce)
-{
- intel_ring_put(ce->ring);
- i915_vma_put(ce->state);
-}
-
-static void execlists_context_destroy(struct kref *kref)
+void lrc_destroy(struct kref *kref)
{
struct intel_context *ce = container_of(kref, typeof(*ce), ref);
GEM_BUG_ON(!i915_active_is_idle(&ce->active));
GEM_BUG_ON(intel_context_is_pinned(ce));
- if (ce->state)
- __execlists_context_fini(ce);
+ lrc_fini(ce);
intel_context_fini(ce);
intel_context_free(ce);
}
-static void
-set_redzone(void *vaddr, const struct intel_engine_cs *engine)
-{
- if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
- return;
-
- vaddr += engine->context_size;
-
- memset(vaddr, CONTEXT_REDZONE, I915_GTT_PAGE_SIZE);
-}
-
-static void
-check_redzone(const void *vaddr, const struct intel_engine_cs *engine)
-{
- if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
- return;
-
- vaddr += engine->context_size;
-
- if (memchr_inv(vaddr, CONTEXT_REDZONE, I915_GTT_PAGE_SIZE))
- drm_err_once(&engine->i915->drm,
- "%s context redzone overwritten!\n",
- engine->name);
-}
-
-static void execlists_context_unpin(struct intel_context *ce)
-{
- check_redzone((void *)ce->lrc_reg_state - LRC_STATE_OFFSET,
- ce->engine);
-}
-
-static void execlists_context_post_unpin(struct intel_context *ce)
-{
- i915_gem_object_unpin_map(ce->state->obj);
-}
-
static u32 *
gen12_emit_timestamp_wa(const struct intel_context *ce, u32 *cs)
{
@@ -3465,7 +1035,7 @@ gen12_emit_indirect_ctx_xcs(const struct intel_context *ce, u32 *cs)
return cs;
}
-static inline u32 context_wa_bb_offset(const struct intel_context *ce)
+static u32 context_wa_bb_offset(const struct intel_context *ce)
{
return PAGE_SIZE * ce->wa_bb_page;
}
@@ -3496,16 +1066,57 @@ setup_indirect_ctx_bb(const struct intel_context *ce,
while ((unsigned long)cs % CACHELINE_BYTES)
*cs++ = MI_NOOP;
- lrc_ring_setup_indirect_ctx(ce->lrc_reg_state, engine,
- i915_ggtt_offset(ce->state) +
- context_wa_bb_offset(ce),
- (cs - start) * sizeof(*cs));
+ lrc_setup_indirect_ctx(ce->lrc_reg_state, engine,
+ i915_ggtt_offset(ce->state) +
+ context_wa_bb_offset(ce),
+ (cs - start) * sizeof(*cs));
}
-static void
-__execlists_update_reg_state(const struct intel_context *ce,
- const struct intel_engine_cs *engine,
- u32 head)
+/*
+ * The context descriptor encodes various attributes of a context,
+ * including its GTT address and some flags. Because it's fairly
+ * expensive to calculate, we'll just do it once and cache the result,
+ * which remains valid until the context is unpinned.
+ *
+ * This is what a descriptor looks like, from LSB to MSB::
+ *
+ * bits 0-11: flags, GEN8_CTX_* (cached in ctx->desc_template)
+ * bits 12-31: LRCA, GTT address of (the HWSP of) this context
+ * bits 32-52: ctx ID, a globally unique tag (highest bit used by GuC)
+ * bits 53-54: mbz, reserved for use by hardware
+ * bits 55-63: group ID, currently unused and set to 0
+ *
+ * Starting from Gen11, the upper dword of the descriptor has a new format:
+ *
+ * bits 32-36: reserved
+ * bits 37-47: SW context ID
+ * bits 48:53: engine instance
+ * bit 54: mbz, reserved for use by hardware
+ * bits 55-60: SW counter
+ * bits 61-63: engine class
+ *
+ * engine info, SW context ID and SW counter need to form a unique number
+ * (Context ID) per lrc.
+ */
+static u32 lrc_descriptor(const struct intel_context *ce)
+{
+ u32 desc;
+
+ desc = INTEL_LEGACY_32B_CONTEXT;
+ if (i915_vm_is_4lvl(ce->vm))
+ desc = INTEL_LEGACY_64B_CONTEXT;
+ desc <<= GEN8_CTX_ADDRESSING_MODE_SHIFT;
+
+ desc |= GEN8_CTX_VALID | GEN8_CTX_PRIVILEGE;
+ if (IS_GEN(ce->vm->i915, 8))
+ desc |= GEN8_CTX_L3LLC_COHERENT;
+
+ return i915_ggtt_offset(ce->state) | desc;
+}
+
+u32 lrc_update_regs(const struct intel_context *ce,
+ const struct intel_engine_cs *engine,
+ u32 head)
{
struct intel_ring *ring = ce->ring;
u32 *regs = ce->lrc_reg_state;
@@ -3537,205 +1148,54 @@ __execlists_update_reg_state(const struct intel_context *ce,
GEM_BUG_ON(engine->wa_ctx.indirect_ctx.size);
setup_indirect_ctx_bb(ce, engine, fn);
}
-}
-static int
-execlists_context_pre_pin(struct intel_context *ce,
- struct i915_gem_ww_ctx *ww, void **vaddr)
-{
- GEM_BUG_ON(!ce->state);
- GEM_BUG_ON(!i915_vma_is_pinned(ce->state));
-
- *vaddr = i915_gem_object_pin_map(ce->state->obj,
- i915_coherent_map_type(ce->engine->i915) |
- I915_MAP_OVERRIDE);
-
- return PTR_ERR_OR_ZERO(*vaddr);
-}
-
-static int
-__execlists_context_pin(struct intel_context *ce,
- struct intel_engine_cs *engine,
- void *vaddr)
-{
- ce->lrc.lrca = lrc_descriptor(ce, engine) | CTX_DESC_FORCE_RESTORE;
- ce->lrc_reg_state = vaddr + LRC_STATE_OFFSET;
- __execlists_update_reg_state(ce, engine, ce->ring->tail);
-
- return 0;
-}
-
-static int execlists_context_pin(struct intel_context *ce, void *vaddr)
-{
- return __execlists_context_pin(ce, ce->engine, vaddr);
+ return lrc_descriptor(ce) | CTX_DESC_FORCE_RESTORE;
}
-static int execlists_context_alloc(struct intel_context *ce)
+void lrc_update_offsets(struct intel_context *ce,
+ struct intel_engine_cs *engine)
{
- return __execlists_context_alloc(ce, ce->engine);
+ set_offsets(ce->lrc_reg_state, reg_offsets(engine), engine, false);
}
-static void execlists_context_reset(struct intel_context *ce)
+void lrc_check_regs(const struct intel_context *ce,
+ const struct intel_engine_cs *engine,
+ const char *when)
{
- CE_TRACE(ce, "reset\n");
- GEM_BUG_ON(!intel_context_is_pinned(ce));
-
- intel_ring_reset(ce->ring, ce->ring->emit);
-
- /* Scrub away the garbage */
- execlists_init_reg_state(ce->lrc_reg_state,
- ce, ce->engine, ce->ring, true);
- __execlists_update_reg_state(ce, ce->engine, ce->ring->tail);
-
- ce->lrc.desc |= CTX_DESC_FORCE_RESTORE;
-}
-
-static const struct intel_context_ops execlists_context_ops = {
- .alloc = execlists_context_alloc,
-
- .pre_pin = execlists_context_pre_pin,
- .pin = execlists_context_pin,
- .unpin = execlists_context_unpin,
- .post_unpin = execlists_context_post_unpin,
-
- .enter = intel_context_enter_engine,
- .exit = intel_context_exit_engine,
-
- .reset = execlists_context_reset,
- .destroy = execlists_context_destroy,
-};
-
-static u32 hwsp_offset(const struct i915_request *rq)
-{
- const struct intel_timeline_cacheline *cl;
-
- /* Before the request is executed, the timeline/cachline is fixed */
-
- cl = rcu_dereference_protected(rq->hwsp_cacheline, 1);
- if (cl)
- return cl->ggtt_offset;
-
- return rcu_dereference_protected(rq->timeline, 1)->hwsp_offset;
-}
-
-static int gen8_emit_init_breadcrumb(struct i915_request *rq)
-{
- u32 *cs;
-
- GEM_BUG_ON(i915_request_has_initial_breadcrumb(rq));
- if (!i915_request_timeline(rq)->has_initial_breadcrumb)
- return 0;
-
- cs = intel_ring_begin(rq, 6);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- /*
- * Check if we have been preempted before we even get started.
- *
- * After this point i915_request_started() reports true, even if
- * we get preempted and so are no longer running.
- */
- *cs++ = MI_ARB_CHECK;
- *cs++ = MI_NOOP;
-
- *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
- *cs++ = hwsp_offset(rq);
- *cs++ = 0;
- *cs++ = rq->fence.seqno - 1;
-
- intel_ring_advance(rq, cs);
-
- /* Record the updated position of the request's payload */
- rq->infix = intel_ring_offset(rq, cs);
-
- __set_bit(I915_FENCE_FLAG_INITIAL_BREADCRUMB, &rq->fence.flags);
-
- return 0;
-}
-
-static int emit_pdps(struct i915_request *rq)
-{
- const struct intel_engine_cs * const engine = rq->engine;
- struct i915_ppgtt * const ppgtt = i915_vm_to_ppgtt(rq->context->vm);
- int err, i;
- u32 *cs;
-
- GEM_BUG_ON(intel_vgpu_active(rq->engine->i915));
-
- /*
- * Beware ye of the dragons, this sequence is magic!
- *
- * Small changes to this sequence can cause anything from
- * GPU hangs to forcewake errors and machine lockups!
- */
-
- /* Flush any residual operations from the context load */
- err = engine->emit_flush(rq, EMIT_FLUSH);
- if (err)
- return err;
+ const struct intel_ring *ring = ce->ring;
+ u32 *regs = ce->lrc_reg_state;
+ bool valid = true;
+ int x;
- /* Magic required to prevent forcewake errors! */
- err = engine->emit_flush(rq, EMIT_INVALIDATE);
- if (err)
- return err;
-
- cs = intel_ring_begin(rq, 4 * GEN8_3LVL_PDPES + 2);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- /* Ensure the LRI have landed before we invalidate & continue */
- *cs++ = MI_LOAD_REGISTER_IMM(2 * GEN8_3LVL_PDPES) | MI_LRI_FORCE_POSTED;
- for (i = GEN8_3LVL_PDPES; i--; ) {
- const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
- u32 base = engine->mmio_base;
-
- *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, i));
- *cs++ = upper_32_bits(pd_daddr);
- *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, i));
- *cs++ = lower_32_bits(pd_daddr);
+ if (regs[CTX_RING_START] != i915_ggtt_offset(ring->vma)) {
+ pr_err("%s: context submitted with incorrect RING_START [%08x], expected %08x\n",
+ engine->name,
+ regs[CTX_RING_START],
+ i915_ggtt_offset(ring->vma));
+ regs[CTX_RING_START] = i915_ggtt_offset(ring->vma);
+ valid = false;
}
- *cs++ = MI_NOOP;
- intel_ring_advance(rq, cs);
-
- return 0;
-}
-
-static int execlists_request_alloc(struct i915_request *request)
-{
- int ret;
-
- GEM_BUG_ON(!intel_context_is_pinned(request->context));
-
- /*
- * Flush enough space to reduce the likelihood of waiting after
- * we start building the request - in which case we will just
- * have to repeat work.
- */
- request->reserved_space += EXECLISTS_REQUEST_SIZE;
-
- /*
- * Note that after this point, we have committed to using
- * this request as it is being used to both track the
- * state of engine initialisation and liveness of the
- * golden renderstate above. Think twice before you try
- * to cancel/unwind this request now.
- */
-
- if (!i915_vm_is_4lvl(request->context->vm)) {
- ret = emit_pdps(request);
- if (ret)
- return ret;
+ if ((regs[CTX_RING_CTL] & ~(RING_WAIT | RING_WAIT_SEMAPHORE)) !=
+ (RING_CTL_SIZE(ring->size) | RING_VALID)) {
+ pr_err("%s: context submitted with incorrect RING_CTL [%08x], expected %08x\n",
+ engine->name,
+ regs[CTX_RING_CTL],
+ (u32)(RING_CTL_SIZE(ring->size) | RING_VALID));
+ regs[CTX_RING_CTL] = RING_CTL_SIZE(ring->size) | RING_VALID;
+ valid = false;
}
- /* Unconditionally invalidate GPU caches and TLBs. */
- ret = request->engine->emit_flush(request, EMIT_INVALIDATE);
- if (ret)
- return ret;
+ x = lrc_ring_mi_mode(engine);
+ if (x != -1 && regs[x + 1] & (regs[x + 1] >> 16) & STOP_RING) {
+ pr_err("%s: context submitted with STOP_RING [%08x] in RING_MI_MODE\n",
+ engine->name, regs[x + 1]);
+ regs[x + 1] &= ~STOP_RING;
+ regs[x + 1] |= STOP_RING << 16;
+ valid = false;
+ }
- request->reserved_space -= EXECLISTS_REQUEST_SIZE;
- return 0;
+ WARN_ONCE(!valid, "Invalid lrc state found %s submission\n", when);
}
/*
@@ -3955,7 +1415,7 @@ gen10_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch)
return batch;
}
-#define CTX_WA_BB_OBJ_SIZE (PAGE_SIZE)
+#define CTX_WA_BB_SIZE (PAGE_SIZE)
static int lrc_setup_wa_ctx(struct intel_engine_cs *engine)
{
@@ -3963,7 +1423,7 @@ static int lrc_setup_wa_ctx(struct intel_engine_cs *engine)
struct i915_vma *vma;
int err;
- obj = i915_gem_object_create_shmem(engine->i915, CTX_WA_BB_OBJ_SIZE);
+ obj = i915_gem_object_create_shmem(engine->i915, CTX_WA_BB_SIZE);
if (IS_ERR(obj))
return PTR_ERR(obj);
@@ -3985,30 +1445,34 @@ err:
return err;
}
-static void lrc_destroy_wa_ctx(struct intel_engine_cs *engine)
+void lrc_fini_wa_ctx(struct intel_engine_cs *engine)
{
i915_vma_unpin_and_release(&engine->wa_ctx.vma, 0);
+
+ /* Called on error unwind, clear all flags to prevent further use */
+ memset(&engine->wa_ctx, 0, sizeof(engine->wa_ctx));
}
typedef u32 *(*wa_bb_func_t)(struct intel_engine_cs *engine, u32 *batch);
-static int intel_init_workaround_bb(struct intel_engine_cs *engine)
+void lrc_init_wa_ctx(struct intel_engine_cs *engine)
{
struct i915_ctx_workarounds *wa_ctx = &engine->wa_ctx;
- struct i915_wa_ctx_bb *wa_bb[2] = { &wa_ctx->indirect_ctx,
- &wa_ctx->per_ctx };
- wa_bb_func_t wa_bb_fn[2];
+ struct i915_wa_ctx_bb *wa_bb[] = {
+ &wa_ctx->indirect_ctx, &wa_ctx->per_ctx
+ };
+ wa_bb_func_t wa_bb_fn[ARRAY_SIZE(wa_bb)];
void *batch, *batch_ptr;
unsigned int i;
- int ret;
+ int err;
if (engine->class != RENDER_CLASS)
- return 0;
+ return;
switch (INTEL_GEN(engine->i915)) {
case 12:
case 11:
- return 0;
+ return;
case 10:
wa_bb_fn[0] = gen10_init_indirectctx_bb;
wa_bb_fn[1] = NULL;
@@ -4023,14 +1487,20 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine)
break;
default:
MISSING_CASE(INTEL_GEN(engine->i915));
- return 0;
+ return;
}
- ret = lrc_setup_wa_ctx(engine);
- if (ret) {
- drm_dbg(&engine->i915->drm,
- "Failed to setup context WA page: %d\n", ret);
- return ret;
+ err = lrc_setup_wa_ctx(engine);
+ if (err) {
+ /*
+ * We continue even if we fail to initialize WA batch
+ * because we only expect rare glitches but nothing
+ * critical to prevent us from using GPU
+ */
+ drm_err(&engine->i915->drm,
+ "Ignoring context switch w/a allocation error:%d\n",
+ err);
+ return;
}
batch = i915_gem_object_pin_map(wa_ctx->vma->obj, I915_MAP_WB);
@@ -4045,2104 +1515,52 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine)
wa_bb[i]->offset = batch_ptr - batch;
if (GEM_DEBUG_WARN_ON(!IS_ALIGNED(wa_bb[i]->offset,
CACHELINE_BYTES))) {
- ret = -EINVAL;
+ err = -EINVAL;
break;
}
if (wa_bb_fn[i])
batch_ptr = wa_bb_fn[i](engine, batch_ptr);
wa_bb[i]->size = batch_ptr - (batch + wa_bb[i]->offset);
}
- GEM_BUG_ON(batch_ptr - batch > CTX_WA_BB_OBJ_SIZE);
+ GEM_BUG_ON(batch_ptr - batch > CTX_WA_BB_SIZE);
__i915_gem_object_flush_map(wa_ctx->vma->obj, 0, batch_ptr - batch);
__i915_gem_object_release_map(wa_ctx->vma->obj);
- if (ret)
- lrc_destroy_wa_ctx(engine);
-
- return ret;
-}
-
-static void reset_csb_pointers(struct intel_engine_cs *engine)
-{
- struct intel_engine_execlists * const execlists = &engine->execlists;
- const unsigned int reset_value = execlists->csb_size - 1;
-
- ring_set_paused(engine, 0);
-
- /*
- * Sometimes Icelake forgets to reset its pointers on a GPU reset.
- * Bludgeon them with a mmio update to be sure.
- */
- ENGINE_WRITE(engine, RING_CONTEXT_STATUS_PTR,
- 0xffff << 16 | reset_value << 8 | reset_value);
- ENGINE_POSTING_READ(engine, RING_CONTEXT_STATUS_PTR);
-
- /*
- * After a reset, the HW starts writing into CSB entry [0]. We
- * therefore have to set our HEAD pointer back one entry so that
- * the *first* entry we check is entry 0. To complicate this further,
- * as we don't wait for the first interrupt after reset, we have to
- * fake the HW write to point back to the last entry so that our
- * inline comparison of our cached head position against the last HW
- * write works even before the first interrupt.
- */
- execlists->csb_head = reset_value;
- WRITE_ONCE(*execlists->csb_write, reset_value);
- wmb(); /* Make sure this is visible to HW (paranoia?) */
-
- /* Check that the GPU does indeed update the CSB entries! */
- memset(execlists->csb_status, -1, (reset_value + 1) * sizeof(u64));
- invalidate_csb_entries(&execlists->csb_status[0],
- &execlists->csb_status[reset_value]);
-
- /* Once more for luck and our trusty paranoia */
- ENGINE_WRITE(engine, RING_CONTEXT_STATUS_PTR,
- 0xffff << 16 | reset_value << 8 | reset_value);
- ENGINE_POSTING_READ(engine, RING_CONTEXT_STATUS_PTR);
-
- GEM_BUG_ON(READ_ONCE(*execlists->csb_write) != reset_value);
-}
-
-static void execlists_sanitize(struct intel_engine_cs *engine)
-{
- GEM_BUG_ON(execlists_active(&engine->execlists));
-
- /*
- * Poison residual state on resume, in case the suspend didn't!
- *
- * We have to assume that across suspend/resume (or other loss
- * of control) that the contents of our pinned buffers has been
- * lost, replaced by garbage. Since this doesn't always happen,
- * let's poison such state so that we more quickly spot when
- * we falsely assume it has been preserved.
- */
- if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
- memset(engine->status_page.addr, POISON_INUSE, PAGE_SIZE);
-
- reset_csb_pointers(engine);
-
- /*
- * The kernel_context HWSP is stored in the status_page. As above,
- * that may be lost on resume/initialisation, and so we need to
- * reset the value in the HWSP.
- */
- intel_timeline_reset_seqno(engine->kernel_context->timeline);
-
- /* And scrub the dirty cachelines for the HWSP */
- clflush_cache_range(engine->status_page.addr, PAGE_SIZE);
-}
-
-static void enable_error_interrupt(struct intel_engine_cs *engine)
-{
- u32 status;
-
- engine->execlists.error_interrupt = 0;
- ENGINE_WRITE(engine, RING_EMR, ~0u);
- ENGINE_WRITE(engine, RING_EIR, ~0u); /* clear all existing errors */
-
- status = ENGINE_READ(engine, RING_ESR);
- if (unlikely(status)) {
- drm_err(&engine->i915->drm,
- "engine '%s' resumed still in error: %08x\n",
- engine->name, status);
- __intel_gt_reset(engine->gt, engine->mask);
- }
-
- /*
- * On current gen8+, we have 2 signals to play with
- *
- * - I915_ERROR_INSTUCTION (bit 0)
- *
- * Generate an error if the command parser encounters an invalid
- * instruction
- *
- * This is a fatal error.
- *
- * - CP_PRIV (bit 2)
- *
- * Generate an error on privilege violation (where the CP replaces
- * the instruction with a no-op). This also fires for writes into
- * read-only scratch pages.
- *
- * This is a non-fatal error, parsing continues.
- *
- * * there are a few others defined for odd HW that we do not use
- *
- * Since CP_PRIV fires for cases where we have chosen to ignore the
- * error (as the HW is validating and suppressing the mistakes), we
- * only unmask the instruction error bit.
- */
- ENGINE_WRITE(engine, RING_EMR, ~I915_ERROR_INSTRUCTION);
-}
-
-static void enable_execlists(struct intel_engine_cs *engine)
-{
- u32 mode;
-
- assert_forcewakes_active(engine->uncore, FORCEWAKE_ALL);
-
- intel_engine_set_hwsp_writemask(engine, ~0u); /* HWSTAM */
-
- if (INTEL_GEN(engine->i915) >= 11)
- mode = _MASKED_BIT_ENABLE(GEN11_GFX_DISABLE_LEGACY_MODE);
- else
- mode = _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE);
- ENGINE_WRITE_FW(engine, RING_MODE_GEN7, mode);
-
- ENGINE_WRITE_FW(engine, RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING));
-
- ENGINE_WRITE_FW(engine,
- RING_HWS_PGA,
- i915_ggtt_offset(engine->status_page.vma));
- ENGINE_POSTING_READ(engine, RING_HWS_PGA);
-
- enable_error_interrupt(engine);
-
- engine->context_tag = GENMASK(BITS_PER_LONG - 2, 0);
-}
-
-static bool unexpected_starting_state(struct intel_engine_cs *engine)
-{
- bool unexpected = false;
-
- if (ENGINE_READ_FW(engine, RING_MI_MODE) & STOP_RING) {
- drm_dbg(&engine->i915->drm,
- "STOP_RING still set in RING_MI_MODE\n");
- unexpected = true;
- }
-
- return unexpected;
-}
-
-static int execlists_resume(struct intel_engine_cs *engine)
-{
- intel_mocs_init_engine(engine);
-
- intel_breadcrumbs_reset(engine->breadcrumbs);
-
- if (GEM_SHOW_DEBUG() && unexpected_starting_state(engine)) {
- struct drm_printer p = drm_debug_printer(__func__);
-
- intel_engine_dump(engine, &p, NULL);
- }
-
- enable_execlists(engine);
-
- return 0;
-}
-static void execlists_reset_prepare(struct intel_engine_cs *engine)
-{
- struct intel_engine_execlists * const execlists = &engine->execlists;
- unsigned long flags;
-
- ENGINE_TRACE(engine, "depth<-%d\n",
- atomic_read(&execlists->tasklet.count));
-
- /*
- * Prevent request submission to the hardware until we have
- * completed the reset in i915_gem_reset_finish(). If a request
- * is completed by one engine, it may then queue a request
- * to a second via its execlists->tasklet *just* as we are
- * calling engine->resume() and also writing the ELSP.
- * Turning off the execlists->tasklet until the reset is over
- * prevents the race.
- */
- __tasklet_disable_sync_once(&execlists->tasklet);
- GEM_BUG_ON(!reset_in_progress(execlists));
-
- /* And flush any current direct submission. */
- spin_lock_irqsave(&engine->active.lock, flags);
- spin_unlock_irqrestore(&engine->active.lock, flags);
-
- /*
- * We stop engines, otherwise we might get failed reset and a
- * dead gpu (on elk). Also as modern gpu as kbl can suffer
- * from system hang if batchbuffer is progressing when
- * the reset is issued, regardless of READY_TO_RESET ack.
- * Thus assume it is best to stop engines on all gens
- * where we have a gpu reset.
- *
- * WaKBLVECSSemaphoreWaitPoll:kbl (on ALL_ENGINES)
- *
- * FIXME: Wa for more modern gens needs to be validated
- */
- ring_set_paused(engine, 1);
- intel_engine_stop_cs(engine);
-
- engine->execlists.reset_ccid = active_ccid(engine);
-}
-
-static void __reset_stop_ring(u32 *regs, const struct intel_engine_cs *engine)
-{
- int x;
-
- x = lrc_ring_mi_mode(engine);
- if (x != -1) {
- regs[x + 1] &= ~STOP_RING;
- regs[x + 1] |= STOP_RING << 16;
- }
-}
-
-static void __execlists_reset_reg_state(const struct intel_context *ce,
- const struct intel_engine_cs *engine)
-{
- u32 *regs = ce->lrc_reg_state;
-
- __reset_stop_ring(regs, engine);
+ /* Verify that we can handle failure to setup the wa_ctx */
+ if (err || i915_inject_probe_error(engine->i915, -ENODEV))
+ lrc_fini_wa_ctx(engine);
}
-static void __execlists_reset(struct intel_engine_cs *engine, bool stalled)
-{
- struct intel_engine_execlists * const execlists = &engine->execlists;
- struct intel_context *ce;
- struct i915_request *rq;
- u32 head;
-
- mb(); /* paranoia: read the CSB pointers from after the reset */
- clflush(execlists->csb_write);
- mb();
-
- process_csb(engine); /* drain preemption events */
-
- /* Following the reset, we need to reload the CSB read/write pointers */
- reset_csb_pointers(engine);
-
- /*
- * Save the currently executing context, even if we completed
- * its request, it was still running at the time of the
- * reset and will have been clobbered.
- */
- rq = active_context(engine, engine->execlists.reset_ccid);
- if (!rq)
- goto unwind;
-
- ce = rq->context;
- GEM_BUG_ON(!i915_vma_is_pinned(ce->state));
-
- if (i915_request_completed(rq)) {
- /* Idle context; tidy up the ring so we can restart afresh */
- head = intel_ring_wrap(ce->ring, rq->tail);
- goto out_replay;
- }
-
- /* We still have requests in-flight; the engine should be active */
- GEM_BUG_ON(!intel_engine_pm_is_awake(engine));
-
- /* Context has requests still in-flight; it should not be idle! */
- GEM_BUG_ON(i915_active_is_idle(&ce->active));
-
- rq = active_request(ce->timeline, rq);
- head = intel_ring_wrap(ce->ring, rq->head);
- GEM_BUG_ON(head == ce->ring->tail);
-
- /*
- * If this request hasn't started yet, e.g. it is waiting on a
- * semaphore, we need to avoid skipping the request or else we
- * break the signaling chain. However, if the context is corrupt
- * the request will not restart and we will be stuck with a wedged
- * device. It is quite often the case that if we issue a reset
- * while the GPU is loading the context image, that the context
- * image becomes corrupt.
- *
- * Otherwise, if we have not started yet, the request should replay
- * perfectly and we do not need to flag the result as being erroneous.
- */
- if (!i915_request_started(rq))
- goto out_replay;
-
- /*
- * If the request was innocent, we leave the request in the ELSP
- * and will try to replay it on restarting. The context image may
- * have been corrupted by the reset, in which case we may have
- * to service a new GPU hang, but more likely we can continue on
- * without impact.
- *
- * If the request was guilty, we presume the context is corrupt
- * and have to at least restore the RING register in the context
- * image back to the expected values to skip over the guilty request.
- */
- __i915_request_reset(rq, stalled);
-
- /*
- * We want a simple context + ring to execute the breadcrumb update.
- * We cannot rely on the context being intact across the GPU hang,
- * so clear it and rebuild just what we need for the breadcrumb.
- * All pending requests for this context will be zapped, and any
- * future request will be after userspace has had the opportunity
- * to recreate its own state.
- */
-out_replay:
- ENGINE_TRACE(engine, "replay {head:%04x, tail:%04x}\n",
- head, ce->ring->tail);
- __execlists_reset_reg_state(ce, engine);
- __execlists_update_reg_state(ce, engine, head);
- ce->lrc.desc |= CTX_DESC_FORCE_RESTORE; /* paranoid: GPU was reset! */
-
-unwind:
- /* Push back any incomplete requests for replay after the reset. */
- cancel_port_requests(execlists);
- __unwind_incomplete_requests(engine);
-}
-
-static void execlists_reset_rewind(struct intel_engine_cs *engine, bool stalled)
-{
- unsigned long flags;
-
- ENGINE_TRACE(engine, "\n");
-
- spin_lock_irqsave(&engine->active.lock, flags);
-
- __execlists_reset(engine, stalled);
-
- spin_unlock_irqrestore(&engine->active.lock, flags);
-}
-
-static void nop_submission_tasklet(unsigned long data)
-{
- struct intel_engine_cs * const engine = (struct intel_engine_cs *)data;
-
- /* The driver is wedged; don't process any more events. */
- WRITE_ONCE(engine->execlists.queue_priority_hint, INT_MIN);
-}
-
-static void execlists_reset_cancel(struct intel_engine_cs *engine)
-{
- struct intel_engine_execlists * const execlists = &engine->execlists;
- struct i915_request *rq, *rn;
- struct rb_node *rb;
- unsigned long flags;
-
- ENGINE_TRACE(engine, "\n");
-
- /*
- * Before we call engine->cancel_requests(), we should have exclusive
- * access to the submission state. This is arranged for us by the
- * caller disabling the interrupt generation, the tasklet and other
- * threads that may then access the same state, giving us a free hand
- * to reset state. However, we still need to let lockdep be aware that
- * we know this state may be accessed in hardirq context, so we
- * disable the irq around this manipulation and we want to keep
- * the spinlock focused on its duties and not accidentally conflate
- * coverage to the submission's irq state. (Similarly, although we
- * shouldn't need to disable irq around the manipulation of the
- * submission's irq state, we also wish to remind ourselves that
- * it is irq state.)
- */
- spin_lock_irqsave(&engine->active.lock, flags);
-
- __execlists_reset(engine, true);
-
- /* Mark all executing requests as skipped. */
- list_for_each_entry(rq, &engine->active.requests, sched.link)
- mark_eio(rq);
- intel_engine_signal_breadcrumbs(engine);
-
- /* Flush the queued requests to the timeline list (for retiring). */
- while ((rb = rb_first_cached(&execlists->queue))) {
- struct i915_priolist *p = to_priolist(rb);
- int i;
-
- priolist_for_each_request_consume(rq, rn, p, i) {
- mark_eio(rq);
- __i915_request_submit(rq);
- }
-
- rb_erase_cached(&p->node, &execlists->queue);
- i915_priolist_free(p);
- }
-
- /* On-hold requests will be flushed to timeline upon their release */
- list_for_each_entry(rq, &engine->active.hold, sched.link)
- mark_eio(rq);
-
- /* Cancel all attached virtual engines */
- while ((rb = rb_first_cached(&execlists->virtual))) {
- struct virtual_engine *ve =
- rb_entry(rb, typeof(*ve), nodes[engine->id].rb);
-
- rb_erase_cached(rb, &execlists->virtual);
- RB_CLEAR_NODE(rb);
-
- spin_lock(&ve->base.active.lock);
- rq = fetch_and_zero(&ve->request);
- if (rq) {
- mark_eio(rq);
-
- rq->engine = engine;
- __i915_request_submit(rq);
- i915_request_put(rq);
-
- ve->base.execlists.queue_priority_hint = INT_MIN;
- }
- spin_unlock(&ve->base.active.lock);
- }
-
- /* Remaining _unready_ requests will be nop'ed when submitted */
-
- execlists->queue_priority_hint = INT_MIN;
- execlists->queue = RB_ROOT_CACHED;
-
- GEM_BUG_ON(__tasklet_is_enabled(&execlists->tasklet));
- execlists->tasklet.func = nop_submission_tasklet;
-
- spin_unlock_irqrestore(&engine->active.lock, flags);
-}
-
-static void execlists_reset_finish(struct intel_engine_cs *engine)
-{
- struct intel_engine_execlists * const execlists = &engine->execlists;
-
- /*
- * After a GPU reset, we may have requests to replay. Do so now while
- * we still have the forcewake to be sure that the GPU is not allowed
- * to sleep before we restart and reload a context.
- */
- GEM_BUG_ON(!reset_in_progress(execlists));
- if (!RB_EMPTY_ROOT(&execlists->queue.rb_root))
- execlists->tasklet.func(execlists->tasklet.data);
-
- if (__tasklet_enable(&execlists->tasklet))
- /* And kick in case we missed a new request submission. */
- tasklet_hi_schedule(&execlists->tasklet);
- ENGINE_TRACE(engine, "depth->%d\n",
- atomic_read(&execlists->tasklet.count));
-}
-
-static int gen8_emit_bb_start_noarb(struct i915_request *rq,
- u64 offset, u32 len,
- const unsigned int flags)
-{
- u32 *cs;
-
- cs = intel_ring_begin(rq, 4);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- /*
- * WaDisableCtxRestoreArbitration:bdw,chv
- *
- * We don't need to perform MI_ARB_ENABLE as often as we do (in
- * particular all the gen that do not need the w/a at all!), if we
- * took care to make sure that on every switch into this context
- * (both ordinary and for preemption) that arbitrartion was enabled
- * we would be fine. However, for gen8 there is another w/a that
- * requires us to not preempt inside GPGPU execution, so we keep
- * arbitration disabled for gen8 batches. Arbitration will be
- * re-enabled before we close the request
- * (engine->emit_fini_breadcrumb).
- */
- *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
-
- /* FIXME(BDW+): Address space and security selectors. */
- *cs++ = MI_BATCH_BUFFER_START_GEN8 |
- (flags & I915_DISPATCH_SECURE ? 0 : BIT(8));
- *cs++ = lower_32_bits(offset);
- *cs++ = upper_32_bits(offset);
-
- intel_ring_advance(rq, cs);
-
- return 0;
-}
-
-static int gen8_emit_bb_start(struct i915_request *rq,
- u64 offset, u32 len,
- const unsigned int flags)
-{
- u32 *cs;
-
- cs = intel_ring_begin(rq, 6);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
-
- *cs++ = MI_BATCH_BUFFER_START_GEN8 |
- (flags & I915_DISPATCH_SECURE ? 0 : BIT(8));
- *cs++ = lower_32_bits(offset);
- *cs++ = upper_32_bits(offset);
-
- *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
- *cs++ = MI_NOOP;
-
- intel_ring_advance(rq, cs);
-
- return 0;
-}
-
-static void gen8_logical_ring_enable_irq(struct intel_engine_cs *engine)
-{
- ENGINE_WRITE(engine, RING_IMR,
- ~(engine->irq_enable_mask | engine->irq_keep_mask));
- ENGINE_POSTING_READ(engine, RING_IMR);
-}
-
-static void gen8_logical_ring_disable_irq(struct intel_engine_cs *engine)
-{
- ENGINE_WRITE(engine, RING_IMR, ~engine->irq_keep_mask);
-}
-
-static int gen8_emit_flush(struct i915_request *request, u32 mode)
-{
- u32 cmd, *cs;
-
- cs = intel_ring_begin(request, 4);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- cmd = MI_FLUSH_DW + 1;
-
- /* We always require a command barrier so that subsequent
- * commands, such as breadcrumb interrupts, are strictly ordered
- * wrt the contents of the write cache being flushed to memory
- * (and thus being coherent from the CPU).
- */
- cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
-
- if (mode & EMIT_INVALIDATE) {
- cmd |= MI_INVALIDATE_TLB;
- if (request->engine->class == VIDEO_DECODE_CLASS)
- cmd |= MI_INVALIDATE_BSD;
- }
-
- *cs++ = cmd;
- *cs++ = LRC_PPHWSP_SCRATCH_ADDR;
- *cs++ = 0; /* upper addr */
- *cs++ = 0; /* value */
- intel_ring_advance(request, cs);
-
- return 0;
-}
-
-static int gen8_emit_flush_render(struct i915_request *request,
- u32 mode)
-{
- bool vf_flush_wa = false, dc_flush_wa = false;
- u32 *cs, flags = 0;
- int len;
-
- flags |= PIPE_CONTROL_CS_STALL;
-
- if (mode & EMIT_FLUSH) {
- flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
- flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
- flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
- flags |= PIPE_CONTROL_FLUSH_ENABLE;
- }
-
- if (mode & EMIT_INVALIDATE) {
- flags |= PIPE_CONTROL_TLB_INVALIDATE;
- flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
- flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
- flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
- flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
- flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
- flags |= PIPE_CONTROL_QW_WRITE;
- flags |= PIPE_CONTROL_STORE_DATA_INDEX;
-
- /*
- * On GEN9: before VF_CACHE_INVALIDATE we need to emit a NULL
- * pipe control.
- */
- if (IS_GEN(request->engine->i915, 9))
- vf_flush_wa = true;
-
- /* WaForGAMHang:kbl */
- if (IS_KBL_GT_REVID(request->engine->i915, 0, KBL_REVID_B0))
- dc_flush_wa = true;
- }
-
- len = 6;
-
- if (vf_flush_wa)
- len += 6;
-
- if (dc_flush_wa)
- len += 12;
-
- cs = intel_ring_begin(request, len);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- if (vf_flush_wa)
- cs = gen8_emit_pipe_control(cs, 0, 0);
-
- if (dc_flush_wa)
- cs = gen8_emit_pipe_control(cs, PIPE_CONTROL_DC_FLUSH_ENABLE,
- 0);
-
- cs = gen8_emit_pipe_control(cs, flags, LRC_PPHWSP_SCRATCH_ADDR);
-
- if (dc_flush_wa)
- cs = gen8_emit_pipe_control(cs, PIPE_CONTROL_CS_STALL, 0);
-
- intel_ring_advance(request, cs);
-
- return 0;
-}
-
-static int gen11_emit_flush_render(struct i915_request *request,
- u32 mode)
-{
- if (mode & EMIT_FLUSH) {
- u32 *cs;
- u32 flags = 0;
-
- flags |= PIPE_CONTROL_CS_STALL;
-
- flags |= PIPE_CONTROL_TILE_CACHE_FLUSH;
- flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
- flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
- flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
- flags |= PIPE_CONTROL_FLUSH_ENABLE;
- flags |= PIPE_CONTROL_QW_WRITE;
- flags |= PIPE_CONTROL_STORE_DATA_INDEX;
-
- cs = intel_ring_begin(request, 6);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- cs = gen8_emit_pipe_control(cs, flags, LRC_PPHWSP_SCRATCH_ADDR);
- intel_ring_advance(request, cs);
- }
-
- if (mode & EMIT_INVALIDATE) {
- u32 *cs;
- u32 flags = 0;
-
- flags |= PIPE_CONTROL_CS_STALL;
-
- flags |= PIPE_CONTROL_COMMAND_CACHE_INVALIDATE;
- flags |= PIPE_CONTROL_TLB_INVALIDATE;
- flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
- flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
- flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
- flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
- flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
- flags |= PIPE_CONTROL_QW_WRITE;
- flags |= PIPE_CONTROL_STORE_DATA_INDEX;
-
- cs = intel_ring_begin(request, 6);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- cs = gen8_emit_pipe_control(cs, flags, LRC_PPHWSP_SCRATCH_ADDR);
- intel_ring_advance(request, cs);
- }
-
- return 0;
-}
-
-static u32 preparser_disable(bool state)
-{
- return MI_ARB_CHECK | 1 << 8 | state;
-}
-
-static i915_reg_t aux_inv_reg(const struct intel_engine_cs *engine)
-{
- static const i915_reg_t vd[] = {
- GEN12_VD0_AUX_NV,
- GEN12_VD1_AUX_NV,
- GEN12_VD2_AUX_NV,
- GEN12_VD3_AUX_NV,
- };
-
- static const i915_reg_t ve[] = {
- GEN12_VE0_AUX_NV,
- GEN12_VE1_AUX_NV,
- };
-
- if (engine->class == VIDEO_DECODE_CLASS)
- return vd[engine->instance];
-
- if (engine->class == VIDEO_ENHANCEMENT_CLASS)
- return ve[engine->instance];
-
- GEM_BUG_ON("unknown aux_inv_reg\n");
-
- return INVALID_MMIO_REG;
-}
-
-static u32 *
-gen12_emit_aux_table_inv(const i915_reg_t inv_reg, u32 *cs)
-{
- *cs++ = MI_LOAD_REGISTER_IMM(1);
- *cs++ = i915_mmio_reg_offset(inv_reg);
- *cs++ = AUX_INV;
- *cs++ = MI_NOOP;
-
- return cs;
-}
-
-static int gen12_emit_flush_render(struct i915_request *request,
- u32 mode)
-{
- if (mode & EMIT_FLUSH) {
- u32 flags = 0;
- u32 *cs;
-
- flags |= PIPE_CONTROL_TILE_CACHE_FLUSH;
- flags |= PIPE_CONTROL_FLUSH_L3;
- flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
- flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
- /* Wa_1409600907:tgl */
- flags |= PIPE_CONTROL_DEPTH_STALL;
- flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
- flags |= PIPE_CONTROL_FLUSH_ENABLE;
-
- flags |= PIPE_CONTROL_STORE_DATA_INDEX;
- flags |= PIPE_CONTROL_QW_WRITE;
-
- flags |= PIPE_CONTROL_CS_STALL;
-
- cs = intel_ring_begin(request, 6);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- cs = gen12_emit_pipe_control(cs,
- PIPE_CONTROL0_HDC_PIPELINE_FLUSH,
- flags, LRC_PPHWSP_SCRATCH_ADDR);
- intel_ring_advance(request, cs);
- }
-
- if (mode & EMIT_INVALIDATE) {
- u32 flags = 0;
- u32 *cs;
-
- flags |= PIPE_CONTROL_COMMAND_CACHE_INVALIDATE;
- flags |= PIPE_CONTROL_TLB_INVALIDATE;
- flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
- flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
- flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
- flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
- flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
-
- flags |= PIPE_CONTROL_STORE_DATA_INDEX;
- flags |= PIPE_CONTROL_QW_WRITE;
-
- flags |= PIPE_CONTROL_CS_STALL;
-
- cs = intel_ring_begin(request, 8 + 4);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- /*
- * Prevent the pre-parser from skipping past the TLB
- * invalidate and loading a stale page for the batch
- * buffer / request payload.
- */
- *cs++ = preparser_disable(true);
-
- cs = gen8_emit_pipe_control(cs, flags, LRC_PPHWSP_SCRATCH_ADDR);
-
- /* hsdes: 1809175790 */
- cs = gen12_emit_aux_table_inv(GEN12_GFX_CCS_AUX_NV, cs);
-
- *cs++ = preparser_disable(false);
- intel_ring_advance(request, cs);
- }
-
- return 0;
-}
-
-static int gen12_emit_flush(struct i915_request *request, u32 mode)
-{
- intel_engine_mask_t aux_inv = 0;
- u32 cmd, *cs;
-
- cmd = 4;
- if (mode & EMIT_INVALIDATE)
- cmd += 2;
- if (mode & EMIT_INVALIDATE)
- aux_inv = request->engine->mask & ~BIT(BCS0);
- if (aux_inv)
- cmd += 2 * hweight8(aux_inv) + 2;
-
- cs = intel_ring_begin(request, cmd);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- if (mode & EMIT_INVALIDATE)
- *cs++ = preparser_disable(true);
-
- cmd = MI_FLUSH_DW + 1;
-
- /* We always require a command barrier so that subsequent
- * commands, such as breadcrumb interrupts, are strictly ordered
- * wrt the contents of the write cache being flushed to memory
- * (and thus being coherent from the CPU).
- */
- cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
-
- if (mode & EMIT_INVALIDATE) {
- cmd |= MI_INVALIDATE_TLB;
- if (request->engine->class == VIDEO_DECODE_CLASS)
- cmd |= MI_INVALIDATE_BSD;
- }
-
- *cs++ = cmd;
- *cs++ = LRC_PPHWSP_SCRATCH_ADDR;
- *cs++ = 0; /* upper addr */
- *cs++ = 0; /* value */
-
- if (aux_inv) { /* hsdes: 1809175790 */
- struct intel_engine_cs *engine;
- unsigned int tmp;
-
- *cs++ = MI_LOAD_REGISTER_IMM(hweight8(aux_inv));
- for_each_engine_masked(engine, request->engine->gt,
- aux_inv, tmp) {
- *cs++ = i915_mmio_reg_offset(aux_inv_reg(engine));
- *cs++ = AUX_INV;
- }
- *cs++ = MI_NOOP;
- }
-
- if (mode & EMIT_INVALIDATE)
- *cs++ = preparser_disable(false);
-
- intel_ring_advance(request, cs);
-
- return 0;
-}
-
-static void assert_request_valid(struct i915_request *rq)
-{
- struct intel_ring *ring __maybe_unused = rq->ring;
-
- /* Can we unwind this request without appearing to go forwards? */
- GEM_BUG_ON(intel_ring_direction(ring, rq->wa_tail, rq->head) <= 0);
-}
-
-/*
- * Reserve space for 2 NOOPs at the end of each request to be
- * used as a workaround for not being allowed to do lite
- * restore with HEAD==TAIL (WaIdleLiteRestore).
- */
-static u32 *gen8_emit_wa_tail(struct i915_request *request, u32 *cs)
-{
- /* Ensure there's always at least one preemption point per-request. */
- *cs++ = MI_ARB_CHECK;
- *cs++ = MI_NOOP;
- request->wa_tail = intel_ring_offset(request, cs);
-
- /* Check that entire request is less than half the ring */
- assert_request_valid(request);
-
- return cs;
-}
-
-static u32 *emit_preempt_busywait(struct i915_request *request, u32 *cs)
-{
- *cs++ = MI_SEMAPHORE_WAIT |
- MI_SEMAPHORE_GLOBAL_GTT |
- MI_SEMAPHORE_POLL |
- MI_SEMAPHORE_SAD_EQ_SDD;
- *cs++ = 0;
- *cs++ = intel_hws_preempt_address(request->engine);
- *cs++ = 0;
-
- return cs;
-}
-
-static __always_inline u32*
-gen8_emit_fini_breadcrumb_tail(struct i915_request *request, u32 *cs)
-{
- *cs++ = MI_USER_INTERRUPT;
-
- *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
- if (intel_engine_has_semaphores(request->engine))
- cs = emit_preempt_busywait(request, cs);
-
- request->tail = intel_ring_offset(request, cs);
- assert_ring_tail_valid(request->ring, request->tail);
-
- return gen8_emit_wa_tail(request, cs);
-}
-
-static u32 *emit_xcs_breadcrumb(struct i915_request *rq, u32 *cs)
-{
- return gen8_emit_ggtt_write(cs, rq->fence.seqno, hwsp_offset(rq), 0);
-}
-
-static u32 *gen8_emit_fini_breadcrumb(struct i915_request *rq, u32 *cs)
-{
- return gen8_emit_fini_breadcrumb_tail(rq, emit_xcs_breadcrumb(rq, cs));
-}
-
-static u32 *gen8_emit_fini_breadcrumb_rcs(struct i915_request *request, u32 *cs)
-{
- cs = gen8_emit_pipe_control(cs,
- PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
- PIPE_CONTROL_DEPTH_CACHE_FLUSH |
- PIPE_CONTROL_DC_FLUSH_ENABLE,
- 0);
-
- /* XXX flush+write+CS_STALL all in one upsets gem_concurrent_blt:kbl */
- cs = gen8_emit_ggtt_write_rcs(cs,
- request->fence.seqno,
- hwsp_offset(request),
- PIPE_CONTROL_FLUSH_ENABLE |
- PIPE_CONTROL_CS_STALL);
-
- return gen8_emit_fini_breadcrumb_tail(request, cs);
-}
-
-static u32 *
-gen11_emit_fini_breadcrumb_rcs(struct i915_request *request, u32 *cs)
-{
- cs = gen8_emit_ggtt_write_rcs(cs,
- request->fence.seqno,
- hwsp_offset(request),
- PIPE_CONTROL_CS_STALL |
- PIPE_CONTROL_TILE_CACHE_FLUSH |
- PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
- PIPE_CONTROL_DEPTH_CACHE_FLUSH |
- PIPE_CONTROL_DC_FLUSH_ENABLE |
- PIPE_CONTROL_FLUSH_ENABLE);
-
- return gen8_emit_fini_breadcrumb_tail(request, cs);
-}
-
-/*
- * Note that the CS instruction pre-parser will not stall on the breadcrumb
- * flush and will continue pre-fetching the instructions after it before the
- * memory sync is completed. On pre-gen12 HW, the pre-parser will stop at
- * BB_START/END instructions, so, even though we might pre-fetch the pre-amble
- * of the next request before the memory has been flushed, we're guaranteed that
- * we won't access the batch itself too early.
- * However, on gen12+ the parser can pre-fetch across the BB_START/END commands,
- * so, if the current request is modifying an instruction in the next request on
- * the same intel_context, we might pre-fetch and then execute the pre-update
- * instruction. To avoid this, the users of self-modifying code should either
- * disable the parser around the code emitting the memory writes, via a new flag
- * added to MI_ARB_CHECK, or emit the writes from a different intel_context. For
- * the in-kernel use-cases we've opted to use a separate context, see
- * reloc_gpu() as an example.
- * All the above applies only to the instructions themselves. Non-inline data
- * used by the instructions is not pre-fetched.
- */
-
-static u32 *gen12_emit_preempt_busywait(struct i915_request *request, u32 *cs)
-{
- *cs++ = MI_SEMAPHORE_WAIT_TOKEN |
- MI_SEMAPHORE_GLOBAL_GTT |
- MI_SEMAPHORE_POLL |
- MI_SEMAPHORE_SAD_EQ_SDD;
- *cs++ = 0;
- *cs++ = intel_hws_preempt_address(request->engine);
- *cs++ = 0;
- *cs++ = 0;
- *cs++ = MI_NOOP;
-
- return cs;
-}
-
-static __always_inline u32*
-gen12_emit_fini_breadcrumb_tail(struct i915_request *request, u32 *cs)
-{
- *cs++ = MI_USER_INTERRUPT;
-
- *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
- if (intel_engine_has_semaphores(request->engine))
- cs = gen12_emit_preempt_busywait(request, cs);
-
- request->tail = intel_ring_offset(request, cs);
- assert_ring_tail_valid(request->ring, request->tail);
-
- return gen8_emit_wa_tail(request, cs);
-}
-
-static u32 *gen12_emit_fini_breadcrumb(struct i915_request *rq, u32 *cs)
-{
- /* XXX Stalling flush before seqno write; post-sync not */
- cs = emit_xcs_breadcrumb(rq, __gen8_emit_flush_dw(cs, 0, 0, 0));
- return gen12_emit_fini_breadcrumb_tail(rq, cs);
-}
-
-static u32 *
-gen12_emit_fini_breadcrumb_rcs(struct i915_request *request, u32 *cs)
-{
- cs = gen12_emit_ggtt_write_rcs(cs,
- request->fence.seqno,
- hwsp_offset(request),
- PIPE_CONTROL0_HDC_PIPELINE_FLUSH,
- PIPE_CONTROL_CS_STALL |
- PIPE_CONTROL_TILE_CACHE_FLUSH |
- PIPE_CONTROL_FLUSH_L3 |
- PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
- PIPE_CONTROL_DEPTH_CACHE_FLUSH |
- /* Wa_1409600907:tgl */
- PIPE_CONTROL_DEPTH_STALL |
- PIPE_CONTROL_DC_FLUSH_ENABLE |
- PIPE_CONTROL_FLUSH_ENABLE);
-
- return gen12_emit_fini_breadcrumb_tail(request, cs);
-}
-
-static void execlists_park(struct intel_engine_cs *engine)
-{
- cancel_timer(&engine->execlists.timer);
- cancel_timer(&engine->execlists.preempt);
-}
-
-void intel_execlists_set_default_submission(struct intel_engine_cs *engine)
-{
- engine->submit_request = execlists_submit_request;
- engine->schedule = i915_schedule;
- engine->execlists.tasklet.func = execlists_submission_tasklet;
-
- engine->reset.prepare = execlists_reset_prepare;
- engine->reset.rewind = execlists_reset_rewind;
- engine->reset.cancel = execlists_reset_cancel;
- engine->reset.finish = execlists_reset_finish;
-
- engine->park = execlists_park;
- engine->unpark = NULL;
-
- engine->flags |= I915_ENGINE_SUPPORTS_STATS;
- if (!intel_vgpu_active(engine->i915)) {
- engine->flags |= I915_ENGINE_HAS_SEMAPHORES;
- if (HAS_LOGICAL_RING_PREEMPTION(engine->i915)) {
- engine->flags |= I915_ENGINE_HAS_PREEMPTION;
- if (IS_ACTIVE(CONFIG_DRM_I915_TIMESLICE_DURATION))
- engine->flags |= I915_ENGINE_HAS_TIMESLICES;
- }
- }
-
- if (INTEL_GEN(engine->i915) >= 12)
- engine->flags |= I915_ENGINE_HAS_RELATIVE_MMIO;
-
- if (intel_engine_has_preemption(engine))
- engine->emit_bb_start = gen8_emit_bb_start;
- else
- engine->emit_bb_start = gen8_emit_bb_start_noarb;
-}
-
-static void execlists_shutdown(struct intel_engine_cs *engine)
-{
- /* Synchronise with residual timers and any softirq they raise */
- del_timer_sync(&engine->execlists.timer);
- del_timer_sync(&engine->execlists.preempt);
- tasklet_kill(&engine->execlists.tasklet);
-}
-
-static void execlists_release(struct intel_engine_cs *engine)
-{
- engine->sanitize = NULL; /* no longer in control, nothing to sanitize */
-
- execlists_shutdown(engine);
-
- intel_engine_cleanup_common(engine);
- lrc_destroy_wa_ctx(engine);
-}
-
-static void
-logical_ring_default_vfuncs(struct intel_engine_cs *engine)
-{
- /* Default vfuncs which can be overriden by each engine. */
-
- engine->resume = execlists_resume;
-
- engine->cops = &execlists_context_ops;
- engine->request_alloc = execlists_request_alloc;
-
- engine->emit_flush = gen8_emit_flush;
- engine->emit_init_breadcrumb = gen8_emit_init_breadcrumb;
- engine->emit_fini_breadcrumb = gen8_emit_fini_breadcrumb;
- if (INTEL_GEN(engine->i915) >= 12) {
- engine->emit_fini_breadcrumb = gen12_emit_fini_breadcrumb;
- engine->emit_flush = gen12_emit_flush;
- }
- engine->set_default_submission = intel_execlists_set_default_submission;
-
- if (INTEL_GEN(engine->i915) < 11) {
- engine->irq_enable = gen8_logical_ring_enable_irq;
- engine->irq_disable = gen8_logical_ring_disable_irq;
- } else {
- /*
- * TODO: On Gen11 interrupt masks need to be clear
- * to allow C6 entry. Keep interrupts enabled at
- * and take the hit of generating extra interrupts
- * until a more refined solution exists.
- */
- }
-}
-
-static inline void
-logical_ring_default_irqs(struct intel_engine_cs *engine)
-{
- unsigned int shift = 0;
-
- if (INTEL_GEN(engine->i915) < 11) {
- const u8 irq_shifts[] = {
- [RCS0] = GEN8_RCS_IRQ_SHIFT,
- [BCS0] = GEN8_BCS_IRQ_SHIFT,
- [VCS0] = GEN8_VCS0_IRQ_SHIFT,
- [VCS1] = GEN8_VCS1_IRQ_SHIFT,
- [VECS0] = GEN8_VECS_IRQ_SHIFT,
- };
-
- shift = irq_shifts[engine->id];
- }
-
- engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << shift;
- engine->irq_keep_mask = GT_CONTEXT_SWITCH_INTERRUPT << shift;
- engine->irq_keep_mask |= GT_CS_MASTER_ERROR_INTERRUPT << shift;
- engine->irq_keep_mask |= GT_WAIT_SEMAPHORE_INTERRUPT << shift;
-}
-
-static void rcs_submission_override(struct intel_engine_cs *engine)
-{
- switch (INTEL_GEN(engine->i915)) {
- case 12:
- engine->emit_flush = gen12_emit_flush_render;
- engine->emit_fini_breadcrumb = gen12_emit_fini_breadcrumb_rcs;
- break;
- case 11:
- engine->emit_flush = gen11_emit_flush_render;
- engine->emit_fini_breadcrumb = gen11_emit_fini_breadcrumb_rcs;
- break;
- default:
- engine->emit_flush = gen8_emit_flush_render;
- engine->emit_fini_breadcrumb = gen8_emit_fini_breadcrumb_rcs;
- break;
- }
-}
-
-int intel_execlists_submission_setup(struct intel_engine_cs *engine)
-{
- struct intel_engine_execlists * const execlists = &engine->execlists;
- struct drm_i915_private *i915 = engine->i915;
- struct intel_uncore *uncore = engine->uncore;
- u32 base = engine->mmio_base;
-
- tasklet_init(&engine->execlists.tasklet,
- execlists_submission_tasklet, (unsigned long)engine);
- timer_setup(&engine->execlists.timer, execlists_timeslice, 0);
- timer_setup(&engine->execlists.preempt, execlists_preempt, 0);
-
- logical_ring_default_vfuncs(engine);
- logical_ring_default_irqs(engine);
-
- if (engine->class == RENDER_CLASS)
- rcs_submission_override(engine);
-
- if (intel_init_workaround_bb(engine))
- /*
- * We continue even if we fail to initialize WA batch
- * because we only expect rare glitches but nothing
- * critical to prevent us from using GPU
- */
- drm_err(&i915->drm, "WA batch buffer initialization failed\n");
-
- if (HAS_LOGICAL_RING_ELSQ(i915)) {
- execlists->submit_reg = uncore->regs +
- i915_mmio_reg_offset(RING_EXECLIST_SQ_CONTENTS(base));
- execlists->ctrl_reg = uncore->regs +
- i915_mmio_reg_offset(RING_EXECLIST_CONTROL(base));
- } else {
- execlists->submit_reg = uncore->regs +
- i915_mmio_reg_offset(RING_ELSP(base));
- }
-
- execlists->csb_status =
- (u64 *)&engine->status_page.addr[I915_HWS_CSB_BUF0_INDEX];
-
- execlists->csb_write =
- &engine->status_page.addr[intel_hws_csb_write_index(i915)];
-
- if (INTEL_GEN(i915) < 11)
- execlists->csb_size = GEN8_CSB_ENTRIES;
- else
- execlists->csb_size = GEN11_CSB_ENTRIES;
-
- if (INTEL_GEN(engine->i915) >= 11) {
- execlists->ccid |= engine->instance << (GEN11_ENGINE_INSTANCE_SHIFT - 32);
- execlists->ccid |= engine->class << (GEN11_ENGINE_CLASS_SHIFT - 32);
- }
-
- /* Finally, take ownership and responsibility for cleanup! */
- engine->sanitize = execlists_sanitize;
- engine->release = execlists_release;
-
- return 0;
-}
-
-static void init_common_reg_state(u32 * const regs,
- const struct intel_engine_cs *engine,
- const struct intel_ring *ring,
- bool inhibit)
-{
- u32 ctl;
-
- ctl = _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH);
- ctl |= _MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
- if (inhibit)
- ctl |= CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT;
- if (INTEL_GEN(engine->i915) < 11)
- ctl |= _MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT |
- CTX_CTRL_RS_CTX_ENABLE);
- regs[CTX_CONTEXT_CONTROL] = ctl;
-
- regs[CTX_RING_CTL] = RING_CTL_SIZE(ring->size) | RING_VALID;
- regs[CTX_TIMESTAMP] = 0;
-}
-
-static void init_wa_bb_reg_state(u32 * const regs,
- const struct intel_engine_cs *engine)
-{
- const struct i915_ctx_workarounds * const wa_ctx = &engine->wa_ctx;
-
- if (wa_ctx->per_ctx.size) {
- const u32 ggtt_offset = i915_ggtt_offset(wa_ctx->vma);
-
- GEM_BUG_ON(lrc_ring_wa_bb_per_ctx(engine) == -1);
- regs[lrc_ring_wa_bb_per_ctx(engine) + 1] =
- (ggtt_offset + wa_ctx->per_ctx.offset) | 0x01;
- }
-
- if (wa_ctx->indirect_ctx.size) {
- lrc_ring_setup_indirect_ctx(regs, engine,
- i915_ggtt_offset(wa_ctx->vma) +
- wa_ctx->indirect_ctx.offset,
- wa_ctx->indirect_ctx.size);
- }
-}
-
-static void init_ppgtt_reg_state(u32 *regs, const struct i915_ppgtt *ppgtt)
-{
- if (i915_vm_is_4lvl(&ppgtt->vm)) {
- /* 64b PPGTT (48bit canonical)
- * PDP0_DESCRIPTOR contains the base address to PML4 and
- * other PDP Descriptors are ignored.
- */
- ASSIGN_CTX_PML4(ppgtt, regs);
- } else {
- ASSIGN_CTX_PDP(ppgtt, regs, 3);
- ASSIGN_CTX_PDP(ppgtt, regs, 2);
- ASSIGN_CTX_PDP(ppgtt, regs, 1);
- ASSIGN_CTX_PDP(ppgtt, regs, 0);
- }
-}
-
-static struct i915_ppgtt *vm_alias(struct i915_address_space *vm)
-{
- if (i915_is_ggtt(vm))
- return i915_vm_to_ggtt(vm)->alias;
- else
- return i915_vm_to_ppgtt(vm);
-}
-
-static void execlists_init_reg_state(u32 *regs,
- const struct intel_context *ce,
- const struct intel_engine_cs *engine,
- const struct intel_ring *ring,
- bool inhibit)
-{
- /*
- * A context is actually a big batch buffer with several
- * MI_LOAD_REGISTER_IMM commands followed by (reg, value) pairs. The
- * values we are setting here are only for the first context restore:
- * on a subsequent save, the GPU will recreate this batchbuffer with new
- * values (including all the missing MI_LOAD_REGISTER_IMM commands that
- * we are not initializing here).
- *
- * Must keep consistent with virtual_update_register_offsets().
- */
- set_offsets(regs, reg_offsets(engine), engine, inhibit);
-
- init_common_reg_state(regs, engine, ring, inhibit);
- init_ppgtt_reg_state(regs, vm_alias(ce->vm));
-
- init_wa_bb_reg_state(regs, engine);
-
- __reset_stop_ring(regs, engine);
-}
-
-static int
-populate_lr_context(struct intel_context *ce,
- struct drm_i915_gem_object *ctx_obj,
- struct intel_engine_cs *engine,
- struct intel_ring *ring)
-{
- bool inhibit = true;
- void *vaddr;
-
- vaddr = i915_gem_object_pin_map(ctx_obj, I915_MAP_WB);
- if (IS_ERR(vaddr)) {
- drm_dbg(&engine->i915->drm, "Could not map object pages!\n");
- return PTR_ERR(vaddr);
- }
-
- set_redzone(vaddr, engine);
-
- if (engine->default_state) {
- shmem_read(engine->default_state, 0,
- vaddr, engine->context_size);
- __set_bit(CONTEXT_VALID_BIT, &ce->flags);
- inhibit = false;
- }
-
- /* Clear the ppHWSP (inc. per-context counters) */
- memset(vaddr, 0, PAGE_SIZE);
-
- /*
- * The second page of the context object contains some registers which
- * must be set up prior to the first execution.
- */
- execlists_init_reg_state(vaddr + LRC_STATE_OFFSET,
- ce, engine, ring, inhibit);
-
- __i915_gem_object_flush_map(ctx_obj, 0, engine->context_size);
- i915_gem_object_unpin_map(ctx_obj);
- return 0;
-}
-
-static struct intel_timeline *pinned_timeline(struct intel_context *ce)
-{
- struct intel_timeline *tl = fetch_and_zero(&ce->timeline);
-
- return intel_timeline_create_from_engine(ce->engine,
- page_unmask_bits(tl));
-}
-
-static int __execlists_context_alloc(struct intel_context *ce,
- struct intel_engine_cs *engine)
-{
- struct drm_i915_gem_object *ctx_obj;
- struct intel_ring *ring;
- struct i915_vma *vma;
- u32 context_size;
- int ret;
-
- GEM_BUG_ON(ce->state);
- context_size = round_up(engine->context_size, I915_GTT_PAGE_SIZE);
-
- if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
- context_size += I915_GTT_PAGE_SIZE; /* for redzone */
-
- if (INTEL_GEN(engine->i915) == 12) {
- ce->wa_bb_page = context_size / PAGE_SIZE;
- context_size += PAGE_SIZE;
- }
-
- ctx_obj = i915_gem_object_create_shmem(engine->i915, context_size);
- if (IS_ERR(ctx_obj))
- return PTR_ERR(ctx_obj);
-
- vma = i915_vma_instance(ctx_obj, &engine->gt->ggtt->vm, NULL);
- if (IS_ERR(vma)) {
- ret = PTR_ERR(vma);
- goto error_deref_obj;
- }
-
- if (!page_mask_bits(ce->timeline)) {
- struct intel_timeline *tl;
-
- /*
- * Use the static global HWSP for the kernel context, and
- * a dynamically allocated cacheline for everyone else.
- */
- if (unlikely(ce->timeline))
- tl = pinned_timeline(ce);
- else
- tl = intel_timeline_create(engine->gt);
- if (IS_ERR(tl)) {
- ret = PTR_ERR(tl);
- goto error_deref_obj;
- }
-
- ce->timeline = tl;
- }
-
- ring = intel_engine_create_ring(engine, (unsigned long)ce->ring);
- if (IS_ERR(ring)) {
- ret = PTR_ERR(ring);
- goto error_deref_obj;
- }
-
- ret = populate_lr_context(ce, ctx_obj, engine, ring);
- if (ret) {
- drm_dbg(&engine->i915->drm,
- "Failed to populate LRC: %d\n", ret);
- goto error_ring_free;
- }
-
- ce->ring = ring;
- ce->state = vma;
-
- return 0;
-
-error_ring_free:
- intel_ring_put(ring);
-error_deref_obj:
- i915_gem_object_put(ctx_obj);
- return ret;
-}
-
-static struct list_head *virtual_queue(struct virtual_engine *ve)
-{
- return &ve->base.execlists.default_priolist.requests[0];
-}
-
-static void rcu_virtual_context_destroy(struct work_struct *wrk)
-{
- struct virtual_engine *ve =
- container_of(wrk, typeof(*ve), rcu.work);
- unsigned int n;
-
- GEM_BUG_ON(ve->context.inflight);
-
- /* Preempt-to-busy may leave a stale request behind. */
- if (unlikely(ve->request)) {
- struct i915_request *old;
-
- spin_lock_irq(&ve->base.active.lock);
-
- old = fetch_and_zero(&ve->request);
- if (old) {
- GEM_BUG_ON(!i915_request_completed(old));
- __i915_request_submit(old);
- i915_request_put(old);
- }
-
- spin_unlock_irq(&ve->base.active.lock);
- }
-
- /*
- * Flush the tasklet in case it is still running on another core.
- *
- * This needs to be done before we remove ourselves from the siblings'
- * rbtrees as in the case it is running in parallel, it may reinsert
- * the rb_node into a sibling.
- */
- tasklet_kill(&ve->base.execlists.tasklet);
-
- /* Decouple ourselves from the siblings, no more access allowed. */
- for (n = 0; n < ve->num_siblings; n++) {
- struct intel_engine_cs *sibling = ve->siblings[n];
- struct rb_node *node = &ve->nodes[sibling->id].rb;
-
- if (RB_EMPTY_NODE(node))
- continue;
-
- spin_lock_irq(&sibling->active.lock);
-
- /* Detachment is lazily performed in the execlists tasklet */
- if (!RB_EMPTY_NODE(node))
- rb_erase_cached(node, &sibling->execlists.virtual);
-
- spin_unlock_irq(&sibling->active.lock);
- }
- GEM_BUG_ON(__tasklet_is_scheduled(&ve->base.execlists.tasklet));
- GEM_BUG_ON(!list_empty(virtual_queue(ve)));
-
- if (ve->context.state)
- __execlists_context_fini(&ve->context);
- intel_context_fini(&ve->context);
-
- intel_breadcrumbs_free(ve->base.breadcrumbs);
- intel_engine_free_request_pool(&ve->base);
-
- kfree(ve->bonds);
- kfree(ve);
-}
-
-static void virtual_context_destroy(struct kref *kref)
-{
- struct virtual_engine *ve =
- container_of(kref, typeof(*ve), context.ref);
-
- GEM_BUG_ON(!list_empty(&ve->context.signals));
-
- /*
- * When destroying the virtual engine, we have to be aware that
- * it may still be in use from an hardirq/softirq context causing
- * the resubmission of a completed request (background completion
- * due to preempt-to-busy). Before we can free the engine, we need
- * to flush the submission code and tasklets that are still potentially
- * accessing the engine. Flushing the tasklets requires process context,
- * and since we can guard the resubmit onto the engine with an RCU read
- * lock, we can delegate the free of the engine to an RCU worker.
- */
- INIT_RCU_WORK(&ve->rcu, rcu_virtual_context_destroy);
- queue_rcu_work(system_wq, &ve->rcu);
-}
-
-static void virtual_engine_initial_hint(struct virtual_engine *ve)
-{
- int swp;
-
- /*
- * Pick a random sibling on starting to help spread the load around.
- *
- * New contexts are typically created with exactly the same order
- * of siblings, and often started in batches. Due to the way we iterate
- * the array of sibling when submitting requests, sibling[0] is
- * prioritised for dequeuing. If we make sure that sibling[0] is fairly
- * randomised across the system, we also help spread the load by the
- * first engine we inspect being different each time.
- *
- * NB This does not force us to execute on this engine, it will just
- * typically be the first we inspect for submission.
- */
- swp = prandom_u32_max(ve->num_siblings);
- if (swp)
- swap(ve->siblings[swp], ve->siblings[0]);
-}
-
-static int virtual_context_alloc(struct intel_context *ce)
-{
- struct virtual_engine *ve = container_of(ce, typeof(*ve), context);
-
- return __execlists_context_alloc(ce, ve->siblings[0]);
-}
-
-static int virtual_context_pin(struct intel_context *ce, void *vaddr)
-{
- struct virtual_engine *ve = container_of(ce, typeof(*ve), context);
-
- /* Note: we must use a real engine class for setting up reg state */
- return __execlists_context_pin(ce, ve->siblings[0], vaddr);
-}
-
-static void virtual_context_enter(struct intel_context *ce)
-{
- struct virtual_engine *ve = container_of(ce, typeof(*ve), context);
- unsigned int n;
-
- for (n = 0; n < ve->num_siblings; n++)
- intel_engine_pm_get(ve->siblings[n]);
-
- intel_timeline_enter(ce->timeline);
-}
-
-static void virtual_context_exit(struct intel_context *ce)
+static void st_update_runtime_underflow(struct intel_context *ce, s32 dt)
{
- struct virtual_engine *ve = container_of(ce, typeof(*ve), context);
- unsigned int n;
-
- intel_timeline_exit(ce->timeline);
-
- for (n = 0; n < ve->num_siblings; n++)
- intel_engine_pm_put(ve->siblings[n]);
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+ ce->runtime.num_underflow++;
+ ce->runtime.max_underflow = max_t(u32, ce->runtime.max_underflow, -dt);
+#endif
}
-static const struct intel_context_ops virtual_context_ops = {
- .alloc = virtual_context_alloc,
-
- .pre_pin = execlists_context_pre_pin,
- .pin = virtual_context_pin,
- .unpin = execlists_context_unpin,
- .post_unpin = execlists_context_post_unpin,
-
- .enter = virtual_context_enter,
- .exit = virtual_context_exit,
-
- .destroy = virtual_context_destroy,
-};
-
-static intel_engine_mask_t virtual_submission_mask(struct virtual_engine *ve)
+void lrc_update_runtime(struct intel_context *ce)
{
- struct i915_request *rq;
- intel_engine_mask_t mask;
-
- rq = READ_ONCE(ve->request);
- if (!rq)
- return 0;
-
- /* The rq is ready for submission; rq->execution_mask is now stable. */
- mask = rq->execution_mask;
- if (unlikely(!mask)) {
- /* Invalid selection, submit to a random engine in error */
- i915_request_set_error_once(rq, -ENODEV);
- mask = ve->siblings[0]->mask;
- }
-
- ENGINE_TRACE(&ve->base, "rq=%llx:%lld, mask=%x, prio=%d\n",
- rq->fence.context, rq->fence.seqno,
- mask, ve->base.execlists.queue_priority_hint);
-
- return mask;
-}
+ u32 old;
+ s32 dt;
-static void virtual_submission_tasklet(unsigned long data)
-{
- struct virtual_engine * const ve = (struct virtual_engine *)data;
- const int prio = READ_ONCE(ve->base.execlists.queue_priority_hint);
- intel_engine_mask_t mask;
- unsigned int n;
-
- rcu_read_lock();
- mask = virtual_submission_mask(ve);
- rcu_read_unlock();
- if (unlikely(!mask))
+ if (intel_context_is_barrier(ce))
return;
- local_irq_disable();
- for (n = 0; n < ve->num_siblings; n++) {
- struct intel_engine_cs *sibling = READ_ONCE(ve->siblings[n]);
- struct ve_node * const node = &ve->nodes[sibling->id];
- struct rb_node **parent, *rb;
- bool first;
-
- if (!READ_ONCE(ve->request))
- break; /* already handled by a sibling's tasklet */
-
- if (unlikely(!(mask & sibling->mask))) {
- if (!RB_EMPTY_NODE(&node->rb)) {
- spin_lock(&sibling->active.lock);
- rb_erase_cached(&node->rb,
- &sibling->execlists.virtual);
- RB_CLEAR_NODE(&node->rb);
- spin_unlock(&sibling->active.lock);
- }
- continue;
- }
-
- spin_lock(&sibling->active.lock);
-
- if (!RB_EMPTY_NODE(&node->rb)) {
- /*
- * Cheat and avoid rebalancing the tree if we can
- * reuse this node in situ.
- */
- first = rb_first_cached(&sibling->execlists.virtual) ==
- &node->rb;
- if (prio == node->prio || (prio > node->prio && first))
- goto submit_engine;
-
- rb_erase_cached(&node->rb, &sibling->execlists.virtual);
- }
-
- rb = NULL;
- first = true;
- parent = &sibling->execlists.virtual.rb_root.rb_node;
- while (*parent) {
- struct ve_node *other;
-
- rb = *parent;
- other = rb_entry(rb, typeof(*other), rb);
- if (prio > other->prio) {
- parent = &rb->rb_left;
- } else {
- parent = &rb->rb_right;
- first = false;
- }
- }
-
- rb_link_node(&node->rb, rb, parent);
- rb_insert_color_cached(&node->rb,
- &sibling->execlists.virtual,
- first);
-
-submit_engine:
- GEM_BUG_ON(RB_EMPTY_NODE(&node->rb));
- node->prio = prio;
- if (first && prio > sibling->execlists.queue_priority_hint)
- tasklet_hi_schedule(&sibling->execlists.tasklet);
-
- spin_unlock(&sibling->active.lock);
- }
- local_irq_enable();
-}
-
-static void virtual_submit_request(struct i915_request *rq)
-{
- struct virtual_engine *ve = to_virtual_engine(rq->engine);
- struct i915_request *old;
- unsigned long flags;
-
- ENGINE_TRACE(&ve->base, "rq=%llx:%lld\n",
- rq->fence.context,
- rq->fence.seqno);
-
- GEM_BUG_ON(ve->base.submit_request != virtual_submit_request);
-
- spin_lock_irqsave(&ve->base.active.lock, flags);
-
- old = ve->request;
- if (old) { /* background completion event from preempt-to-busy */
- GEM_BUG_ON(!i915_request_completed(old));
- __i915_request_submit(old);
- i915_request_put(old);
- }
-
- if (i915_request_completed(rq)) {
- __i915_request_submit(rq);
-
- ve->base.execlists.queue_priority_hint = INT_MIN;
- ve->request = NULL;
- } else {
- ve->base.execlists.queue_priority_hint = rq_prio(rq);
- ve->request = i915_request_get(rq);
-
- GEM_BUG_ON(!list_empty(virtual_queue(ve)));
- list_move_tail(&rq->sched.link, virtual_queue(ve));
-
- tasklet_hi_schedule(&ve->base.execlists.tasklet);
- }
-
- spin_unlock_irqrestore(&ve->base.active.lock, flags);
-}
-
-static struct ve_bond *
-virtual_find_bond(struct virtual_engine *ve,
- const struct intel_engine_cs *master)
-{
- int i;
-
- for (i = 0; i < ve->num_bonds; i++) {
- if (ve->bonds[i].master == master)
- return &ve->bonds[i];
- }
-
- return NULL;
-}
-
-static void
-virtual_bond_execute(struct i915_request *rq, struct dma_fence *signal)
-{
- struct virtual_engine *ve = to_virtual_engine(rq->engine);
- intel_engine_mask_t allowed, exec;
- struct ve_bond *bond;
-
- allowed = ~to_request(signal)->engine->mask;
-
- bond = virtual_find_bond(ve, to_request(signal)->engine);
- if (bond)
- allowed &= bond->sibling_mask;
-
- /* Restrict the bonded request to run on only the available engines */
- exec = READ_ONCE(rq->execution_mask);
- while (!try_cmpxchg(&rq->execution_mask, &exec, exec & allowed))
- ;
-
- /* Prevent the master from being re-run on the bonded engines */
- to_request(signal)->execution_mask &= ~allowed;
-}
-
-struct intel_context *
-intel_execlists_create_virtual(struct intel_engine_cs **siblings,
- unsigned int count)
-{
- struct virtual_engine *ve;
- unsigned int n;
- int err;
-
- if (count == 0)
- return ERR_PTR(-EINVAL);
-
- if (count == 1)
- return intel_context_create(siblings[0]);
-
- ve = kzalloc(struct_size(ve, siblings, count), GFP_KERNEL);
- if (!ve)
- return ERR_PTR(-ENOMEM);
-
- ve->base.i915 = siblings[0]->i915;
- ve->base.gt = siblings[0]->gt;
- ve->base.uncore = siblings[0]->uncore;
- ve->base.id = -1;
-
- ve->base.class = OTHER_CLASS;
- ve->base.uabi_class = I915_ENGINE_CLASS_INVALID;
- ve->base.instance = I915_ENGINE_CLASS_INVALID_VIRTUAL;
- ve->base.uabi_instance = I915_ENGINE_CLASS_INVALID_VIRTUAL;
-
- /*
- * The decision on whether to submit a request using semaphores
- * depends on the saturated state of the engine. We only compute
- * this during HW submission of the request, and we need for this
- * state to be globally applied to all requests being submitted
- * to this engine. Virtual engines encompass more than one physical
- * engine and so we cannot accurately tell in advance if one of those
- * engines is already saturated and so cannot afford to use a semaphore
- * and be pessimized in priority for doing so -- if we are the only
- * context using semaphores after all other clients have stopped, we
- * will be starved on the saturated system. Such a global switch for
- * semaphores is less than ideal, but alas is the current compromise.
- */
- ve->base.saturated = ALL_ENGINES;
-
- snprintf(ve->base.name, sizeof(ve->base.name), "virtual");
-
- intel_engine_init_active(&ve->base, ENGINE_VIRTUAL);
- intel_engine_init_execlists(&ve->base);
-
- ve->base.cops = &virtual_context_ops;
- ve->base.request_alloc = execlists_request_alloc;
-
- ve->base.schedule = i915_schedule;
- ve->base.submit_request = virtual_submit_request;
- ve->base.bond_execute = virtual_bond_execute;
-
- INIT_LIST_HEAD(virtual_queue(ve));
- ve->base.execlists.queue_priority_hint = INT_MIN;
- tasklet_init(&ve->base.execlists.tasklet,
- virtual_submission_tasklet,
- (unsigned long)ve);
-
- intel_context_init(&ve->context, &ve->base);
-
- ve->base.breadcrumbs = intel_breadcrumbs_create(NULL);
- if (!ve->base.breadcrumbs) {
- err = -ENOMEM;
- goto err_put;
- }
-
- for (n = 0; n < count; n++) {
- struct intel_engine_cs *sibling = siblings[n];
-
- GEM_BUG_ON(!is_power_of_2(sibling->mask));
- if (sibling->mask & ve->base.mask) {
- DRM_DEBUG("duplicate %s entry in load balancer\n",
- sibling->name);
- err = -EINVAL;
- goto err_put;
- }
-
- /*
- * The virtual engine implementation is tightly coupled to
- * the execlists backend -- we push out request directly
- * into a tree inside each physical engine. We could support
- * layering if we handle cloning of the requests and
- * submitting a copy into each backend.
- */
- if (sibling->execlists.tasklet.func !=
- execlists_submission_tasklet) {
- err = -ENODEV;
- goto err_put;
- }
-
- GEM_BUG_ON(RB_EMPTY_NODE(&ve->nodes[sibling->id].rb));
- RB_CLEAR_NODE(&ve->nodes[sibling->id].rb);
-
- ve->siblings[ve->num_siblings++] = sibling;
- ve->base.mask |= sibling->mask;
-
- /*
- * All physical engines must be compatible for their emission
- * functions (as we build the instructions during request
- * construction and do not alter them before submission
- * on the physical engine). We use the engine class as a guide
- * here, although that could be refined.
- */
- if (ve->base.class != OTHER_CLASS) {
- if (ve->base.class != sibling->class) {
- DRM_DEBUG("invalid mixing of engine class, sibling %d, already %d\n",
- sibling->class, ve->base.class);
- err = -EINVAL;
- goto err_put;
- }
- continue;
- }
-
- ve->base.class = sibling->class;
- ve->base.uabi_class = sibling->uabi_class;
- snprintf(ve->base.name, sizeof(ve->base.name),
- "v%dx%d", ve->base.class, count);
- ve->base.context_size = sibling->context_size;
-
- ve->base.emit_bb_start = sibling->emit_bb_start;
- ve->base.emit_flush = sibling->emit_flush;
- ve->base.emit_init_breadcrumb = sibling->emit_init_breadcrumb;
- ve->base.emit_fini_breadcrumb = sibling->emit_fini_breadcrumb;
- ve->base.emit_fini_breadcrumb_dw =
- sibling->emit_fini_breadcrumb_dw;
-
- ve->base.flags = sibling->flags;
- }
-
- ve->base.flags |= I915_ENGINE_IS_VIRTUAL;
-
- virtual_engine_initial_hint(ve);
- return &ve->context;
-
-err_put:
- intel_context_put(&ve->context);
- return ERR_PTR(err);
-}
-
-struct intel_context *
-intel_execlists_clone_virtual(struct intel_engine_cs *src)
-{
- struct virtual_engine *se = to_virtual_engine(src);
- struct intel_context *dst;
-
- dst = intel_execlists_create_virtual(se->siblings,
- se->num_siblings);
- if (IS_ERR(dst))
- return dst;
-
- if (se->num_bonds) {
- struct virtual_engine *de = to_virtual_engine(dst->engine);
-
- de->bonds = kmemdup(se->bonds,
- sizeof(*se->bonds) * se->num_bonds,
- GFP_KERNEL);
- if (!de->bonds) {
- intel_context_put(dst);
- return ERR_PTR(-ENOMEM);
- }
-
- de->num_bonds = se->num_bonds;
- }
-
- return dst;
-}
-
-int intel_virtual_engine_attach_bond(struct intel_engine_cs *engine,
- const struct intel_engine_cs *master,
- const struct intel_engine_cs *sibling)
-{
- struct virtual_engine *ve = to_virtual_engine(engine);
- struct ve_bond *bond;
- int n;
-
- /* Sanity check the sibling is part of the virtual engine */
- for (n = 0; n < ve->num_siblings; n++)
- if (sibling == ve->siblings[n])
- break;
- if (n == ve->num_siblings)
- return -EINVAL;
-
- bond = virtual_find_bond(ve, master);
- if (bond) {
- bond->sibling_mask |= sibling->mask;
- return 0;
- }
-
- bond = krealloc(ve->bonds,
- sizeof(*bond) * (ve->num_bonds + 1),
- GFP_KERNEL);
- if (!bond)
- return -ENOMEM;
-
- bond[ve->num_bonds].master = master;
- bond[ve->num_bonds].sibling_mask = sibling->mask;
-
- ve->bonds = bond;
- ve->num_bonds++;
-
- return 0;
-}
-
-void intel_execlists_show_requests(struct intel_engine_cs *engine,
- struct drm_printer *m,
- void (*show_request)(struct drm_printer *m,
- struct i915_request *rq,
- const char *prefix),
- unsigned int max)
-{
- const struct intel_engine_execlists *execlists = &engine->execlists;
- struct i915_request *rq, *last;
- unsigned long flags;
- unsigned int count;
- struct rb_node *rb;
-
- spin_lock_irqsave(&engine->active.lock, flags);
-
- last = NULL;
- count = 0;
- list_for_each_entry(rq, &engine->active.requests, sched.link) {
- if (count++ < max - 1)
- show_request(m, rq, "\t\tE ");
- else
- last = rq;
- }
- if (last) {
- if (count > max) {
- drm_printf(m,
- "\t\t...skipping %d executing requests...\n",
- count - max);
- }
- show_request(m, last, "\t\tE ");
- }
-
- if (execlists->switch_priority_hint != INT_MIN)
- drm_printf(m, "\t\tSwitch priority hint: %d\n",
- READ_ONCE(execlists->switch_priority_hint));
- if (execlists->queue_priority_hint != INT_MIN)
- drm_printf(m, "\t\tQueue priority hint: %d\n",
- READ_ONCE(execlists->queue_priority_hint));
-
- last = NULL;
- count = 0;
- for (rb = rb_first_cached(&execlists->queue); rb; rb = rb_next(rb)) {
- struct i915_priolist *p = rb_entry(rb, typeof(*p), node);
- int i;
-
- priolist_for_each_request(rq, p, i) {
- if (count++ < max - 1)
- show_request(m, rq, "\t\tQ ");
- else
- last = rq;
- }
- }
- if (last) {
- if (count > max) {
- drm_printf(m,
- "\t\t...skipping %d queued requests...\n",
- count - max);
- }
- show_request(m, last, "\t\tQ ");
- }
+ old = ce->runtime.last;
+ ce->runtime.last = lrc_get_runtime(ce);
+ dt = ce->runtime.last - old;
- last = NULL;
- count = 0;
- for (rb = rb_first_cached(&execlists->virtual); rb; rb = rb_next(rb)) {
- struct virtual_engine *ve =
- rb_entry(rb, typeof(*ve), nodes[engine->id].rb);
- struct i915_request *rq = READ_ONCE(ve->request);
-
- if (rq) {
- if (count++ < max - 1)
- show_request(m, rq, "\t\tV ");
- else
- last = rq;
- }
- }
- if (last) {
- if (count > max) {
- drm_printf(m,
- "\t\t...skipping %d virtual requests...\n",
- count - max);
- }
- show_request(m, last, "\t\tV ");
+ if (unlikely(dt < 0)) {
+ CE_TRACE(ce, "runtime underflow: last=%u, new=%u, delta=%d\n",
+ old, ce->runtime.last, dt);
+ st_update_runtime_underflow(ce, dt);
+ return;
}
- spin_unlock_irqrestore(&engine->active.lock, flags);
-}
-
-void intel_lr_context_reset(struct intel_engine_cs *engine,
- struct intel_context *ce,
- u32 head,
- bool scrub)
-{
- GEM_BUG_ON(!intel_context_is_pinned(ce));
-
- /*
- * We want a simple context + ring to execute the breadcrumb update.
- * We cannot rely on the context being intact across the GPU hang,
- * so clear it and rebuild just what we need for the breadcrumb.
- * All pending requests for this context will be zapped, and any
- * future request will be after userspace has had the opportunity
- * to recreate its own state.
- */
- if (scrub)
- restore_default_state(ce, engine);
-
- /* Rerun the request; its payload has been neutered (if guilty). */
- __execlists_update_reg_state(ce, engine, head);
-}
-
-bool
-intel_engine_in_execlists_submission_mode(const struct intel_engine_cs *engine)
-{
- return engine->set_default_submission ==
- intel_execlists_set_default_submission;
+ ewma_runtime_add(&ce->runtime.avg, dt);
+ ce->runtime.total += dt;
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.h b/drivers/gpu/drm/i915/gt/intel_lrc.h
index c2d287f25497..7f697845c4cf 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.h
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.h
@@ -1,90 +1,20 @@
+/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2014 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
*/
-#ifndef _INTEL_LRC_H_
-#define _INTEL_LRC_H_
+#ifndef __INTEL_LRC_H__
+#define __INTEL_LRC_H__
#include <linux/types.h>
-struct drm_printer;
+#include "intel_context.h"
+#include "intel_lrc_reg.h"
-struct drm_i915_private;
-struct i915_gem_context;
-struct i915_request;
-struct intel_context;
+struct drm_i915_gem_object;
struct intel_engine_cs;
+struct intel_ring;
-/* Execlists regs */
-#define RING_ELSP(base) _MMIO((base) + 0x230)
-#define RING_EXECLIST_STATUS_LO(base) _MMIO((base) + 0x234)
-#define RING_EXECLIST_STATUS_HI(base) _MMIO((base) + 0x234 + 4)
-#define RING_CONTEXT_CONTROL(base) _MMIO((base) + 0x244)
-#define CTX_CTRL_INHIBIT_SYN_CTX_SWITCH (1 << 3)
-#define CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT (1 << 0)
-#define CTX_CTRL_RS_CTX_ENABLE (1 << 1)
-#define CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT (1 << 2)
-#define GEN12_CTX_CTRL_OAR_CONTEXT_ENABLE (1 << 8)
-#define RING_CONTEXT_STATUS_PTR(base) _MMIO((base) + 0x3a0)
-#define RING_EXECLIST_SQ_CONTENTS(base) _MMIO((base) + 0x510)
-#define RING_EXECLIST_CONTROL(base) _MMIO((base) + 0x550)
-
-#define EL_CTRL_LOAD (1 << 0)
-
-/* The docs specify that the write pointer wraps around after 5h, "After status
- * is written out to the last available status QW at offset 5h, this pointer
- * wraps to 0."
- *
- * Therefore, one must infer than even though there are 3 bits available, 6 and
- * 7 appear to be * reserved.
- */
-#define GEN8_CSB_ENTRIES 6
-#define GEN8_CSB_PTR_MASK 0x7
-#define GEN8_CSB_READ_PTR_MASK (GEN8_CSB_PTR_MASK << 8)
-#define GEN8_CSB_WRITE_PTR_MASK (GEN8_CSB_PTR_MASK << 0)
-
-#define GEN11_CSB_ENTRIES 12
-#define GEN11_CSB_PTR_MASK 0xf
-#define GEN11_CSB_READ_PTR_MASK (GEN11_CSB_PTR_MASK << 8)
-#define GEN11_CSB_WRITE_PTR_MASK (GEN11_CSB_PTR_MASK << 0)
-
-#define MAX_CONTEXT_HW_ID (1<<21) /* exclusive */
-#define MAX_GUC_CONTEXT_HW_ID (1 << 20) /* exclusive */
-#define GEN11_MAX_CONTEXT_HW_ID (1<<11) /* exclusive */
-/* in Gen12 ID 0x7FF is reserved to indicate idle */
-#define GEN12_MAX_CONTEXT_HW_ID (GEN11_MAX_CONTEXT_HW_ID - 1)
-
-enum {
- INTEL_CONTEXT_SCHEDULE_IN = 0,
- INTEL_CONTEXT_SCHEDULE_OUT,
- INTEL_CONTEXT_SCHEDULE_PREEMPTED,
-};
-
-/* Logical Rings */
-void intel_logical_ring_cleanup(struct intel_engine_cs *engine);
-
-int intel_execlists_submission_setup(struct intel_engine_cs *engine);
-
-/* Logical Ring Contexts */
/* At the start of the context image is its per-process HWS page */
#define LRC_PPHWSP_PN (0)
#define LRC_PPHWSP_SZ (1)
@@ -96,32 +26,57 @@ int intel_execlists_submission_setup(struct intel_engine_cs *engine);
#define LRC_PPHWSP_SCRATCH 0x34
#define LRC_PPHWSP_SCRATCH_ADDR (LRC_PPHWSP_SCRATCH * sizeof(u32))
-void intel_execlists_set_default_submission(struct intel_engine_cs *engine);
-
-void intel_lr_context_reset(struct intel_engine_cs *engine,
- struct intel_context *ce,
- u32 head,
- bool scrub);
-
-void intel_execlists_show_requests(struct intel_engine_cs *engine,
- struct drm_printer *m,
- void (*show_request)(struct drm_printer *m,
- struct i915_request *rq,
- const char *prefix),
- unsigned int max);
-
-struct intel_context *
-intel_execlists_create_virtual(struct intel_engine_cs **siblings,
- unsigned int count);
-
-struct intel_context *
-intel_execlists_clone_virtual(struct intel_engine_cs *src);
-
-int intel_virtual_engine_attach_bond(struct intel_engine_cs *engine,
- const struct intel_engine_cs *master,
- const struct intel_engine_cs *sibling);
-
-bool
-intel_engine_in_execlists_submission_mode(const struct intel_engine_cs *engine);
-
-#endif /* _INTEL_LRC_H_ */
+void lrc_init_wa_ctx(struct intel_engine_cs *engine);
+void lrc_fini_wa_ctx(struct intel_engine_cs *engine);
+
+int lrc_alloc(struct intel_context *ce,
+ struct intel_engine_cs *engine);
+void lrc_reset(struct intel_context *ce);
+void lrc_fini(struct intel_context *ce);
+void lrc_destroy(struct kref *kref);
+
+int
+lrc_pre_pin(struct intel_context *ce,
+ struct intel_engine_cs *engine,
+ struct i915_gem_ww_ctx *ww,
+ void **vaddr);
+int
+lrc_pin(struct intel_context *ce,
+ struct intel_engine_cs *engine,
+ void *vaddr);
+void lrc_unpin(struct intel_context *ce);
+void lrc_post_unpin(struct intel_context *ce);
+
+void lrc_init_state(struct intel_context *ce,
+ struct intel_engine_cs *engine,
+ void *state);
+
+void lrc_init_regs(const struct intel_context *ce,
+ const struct intel_engine_cs *engine,
+ bool clear);
+void lrc_reset_regs(const struct intel_context *ce,
+ const struct intel_engine_cs *engine);
+
+u32 lrc_update_regs(const struct intel_context *ce,
+ const struct intel_engine_cs *engine,
+ u32 head);
+void lrc_update_offsets(struct intel_context *ce,
+ struct intel_engine_cs *engine);
+
+void lrc_check_regs(const struct intel_context *ce,
+ const struct intel_engine_cs *engine,
+ const char *when);
+
+void lrc_update_runtime(struct intel_context *ce);
+static inline u32 lrc_get_runtime(const struct intel_context *ce)
+{
+ /*
+ * We can use either ppHWSP[16] which is recorded before the context
+ * switch (and so excludes the cost of context switches) or use the
+ * value from the context image itself, which is saved/restored earlier
+ * and so includes the cost of the save.
+ */
+ return READ_ONCE(ce->lrc_reg_state[CTX_TIMESTAMP]);
+}
+
+#endif /* __INTEL_LRC_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc_reg.h b/drivers/gpu/drm/i915/gt/intel_lrc_reg.h
index 1b51f7b9a5c3..65fe76738335 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc_reg.h
+++ b/drivers/gpu/drm/i915/gt/intel_lrc_reg.h
@@ -9,6 +9,8 @@
#include <linux/types.h>
+#define CTX_DESC_FORCE_RESTORE BIT_ULL(2)
+
/* GEN8 to GEN12 Reg State Context */
#define CTX_CONTEXT_CONTROL (0x02 + 1)
#define CTX_RING_HEAD (0x04 + 1)
@@ -52,4 +54,43 @@
#define GEN8_EXECLISTS_STATUS_BUF 0x370
#define GEN11_EXECLISTS_STATUS_BUF2 0x3c0
+/* Execlists regs */
+#define RING_ELSP(base) _MMIO((base) + 0x230)
+#define RING_EXECLIST_STATUS_LO(base) _MMIO((base) + 0x234)
+#define RING_EXECLIST_STATUS_HI(base) _MMIO((base) + 0x234 + 4)
+#define RING_CONTEXT_CONTROL(base) _MMIO((base) + 0x244)
+#define CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT REG_BIT(0)
+#define CTX_CTRL_RS_CTX_ENABLE REG_BIT(1)
+#define CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT REG_BIT(2)
+#define CTX_CTRL_INHIBIT_SYN_CTX_SWITCH REG_BIT(3)
+#define GEN12_CTX_CTRL_OAR_CONTEXT_ENABLE REG_BIT(8)
+#define RING_CONTEXT_STATUS_PTR(base) _MMIO((base) + 0x3a0)
+#define RING_EXECLIST_SQ_CONTENTS(base) _MMIO((base) + 0x510)
+#define RING_EXECLIST_CONTROL(base) _MMIO((base) + 0x550)
+#define EL_CTRL_LOAD REG_BIT(0)
+
+/*
+ * The docs specify that the write pointer wraps around after 5h, "After status
+ * is written out to the last available status QW at offset 5h, this pointer
+ * wraps to 0."
+ *
+ * Therefore, one must infer than even though there are 3 bits available, 6 and
+ * 7 appear to be * reserved.
+ */
+#define GEN8_CSB_ENTRIES 6
+#define GEN8_CSB_PTR_MASK 0x7
+#define GEN8_CSB_READ_PTR_MASK (GEN8_CSB_PTR_MASK << 8)
+#define GEN8_CSB_WRITE_PTR_MASK (GEN8_CSB_PTR_MASK << 0)
+
+#define GEN11_CSB_ENTRIES 12
+#define GEN11_CSB_PTR_MASK 0xf
+#define GEN11_CSB_READ_PTR_MASK (GEN11_CSB_PTR_MASK << 8)
+#define GEN11_CSB_WRITE_PTR_MASK (GEN11_CSB_PTR_MASK << 0)
+
+#define MAX_CONTEXT_HW_ID (1 << 21) /* exclusive */
+#define MAX_GUC_CONTEXT_HW_ID (1 << 20) /* exclusive */
+#define GEN11_MAX_CONTEXT_HW_ID (1 << 11) /* exclusive */
+/* in Gen12 ID 0x7FF is reserved to indicate idle */
+#define GEN12_MAX_CONTEXT_HW_ID (GEN11_MAX_CONTEXT_HW_ID - 1)
+
#endif /* _INTEL_LRC_REG_H_ */
diff --git a/drivers/gpu/drm/i915/gt/intel_mocs.c b/drivers/gpu/drm/i915/gt/intel_mocs.c
index ab6870242e18..8acb84960cd0 100644
--- a/drivers/gpu/drm/i915/gt/intel_mocs.c
+++ b/drivers/gpu/drm/i915/gt/intel_mocs.c
@@ -24,8 +24,8 @@
#include "intel_engine.h"
#include "intel_gt.h"
+#include "intel_lrc_reg.h"
#include "intel_mocs.h"
-#include "intel_lrc.h"
#include "intel_ring.h"
/* structures required */
@@ -472,7 +472,7 @@ static u16 get_entry_l3cc(const struct drm_i915_mocs_table *table,
return table->table[I915_MOCS_PTE].l3cc_value;
}
-static inline u32 l3cc_combine(u16 low, u16 high)
+static u32 l3cc_combine(u16 low, u16 high)
{
return low | (u32)high << 16;
}
diff --git a/drivers/gpu/drm/i915/gt/intel_ppgtt.c b/drivers/gpu/drm/i915/gt/intel_ppgtt.c
index 46d9aceda64c..96b85a10ef33 100644
--- a/drivers/gpu/drm/i915/gt/intel_ppgtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_ppgtt.c
@@ -80,7 +80,7 @@ void free_px(struct i915_address_space *vm, struct i915_page_table *pt, int lvl)
kfree(pt);
}
-static inline void
+static void
write_dma_entry(struct drm_i915_gem_object * const pdma,
const unsigned short idx,
const u64 encoded_entry)
diff --git a/drivers/gpu/drm/i915/gt/intel_rc6.c b/drivers/gpu/drm/i915/gt/intel_rc6.c
index d7b8e4457fc2..35504c97f11d 100644
--- a/drivers/gpu/drm/i915/gt/intel_rc6.c
+++ b/drivers/gpu/drm/i915/gt/intel_rc6.c
@@ -49,7 +49,7 @@ static struct drm_i915_private *rc6_to_i915(struct intel_rc6 *rc)
return rc6_to_gt(rc)->i915;
}
-static inline void set(struct intel_uncore *uncore, i915_reg_t reg, u32 val)
+static void set(struct intel_uncore *uncore, i915_reg_t reg, u32 val)
{
intel_uncore_write_fw(uncore, reg, val);
}
diff --git a/drivers/gpu/drm/i915/intel_region_lmem.c b/drivers/gpu/drm/i915/gt/intel_region_lmem.c
index 40d8f1a95df6..60393ce5614d 100644
--- a/drivers/gpu/drm/i915/intel_region_lmem.c
+++ b/drivers/gpu/drm/i915/gt/intel_region_lmem.c
@@ -95,10 +95,10 @@ region_lmem_init(struct intel_memory_region *mem)
return ret;
}
-const struct intel_memory_region_ops intel_region_lmem_ops = {
+static const struct intel_memory_region_ops intel_region_lmem_ops = {
.init = region_lmem_init,
.release = region_lmem_release,
- .create_object = __i915_gem_lmem_object_create,
+ .init_object = __i915_gem_lmem_object_init,
};
struct intel_memory_region *
diff --git a/drivers/gpu/drm/i915/intel_region_lmem.h b/drivers/gpu/drm/i915/gt/intel_region_lmem.h
index 213def7c7b8a..8ea43e538dab 100644
--- a/drivers/gpu/drm/i915/intel_region_lmem.h
+++ b/drivers/gpu/drm/i915/gt/intel_region_lmem.h
@@ -8,8 +8,6 @@
struct drm_i915_private;
-extern const struct intel_memory_region_ops intel_region_lmem_ops;
-
struct intel_memory_region *
intel_setup_fake_lmem(struct drm_i915_private *i915);
diff --git a/drivers/gpu/drm/i915/gt/intel_renderstate.c b/drivers/gpu/drm/i915/gt/intel_renderstate.c
index ea2a77c7b469..ca816ba22197 100644
--- a/drivers/gpu/drm/i915/gt/intel_renderstate.c
+++ b/drivers/gpu/drm/i915/gt/intel_renderstate.c
@@ -27,7 +27,8 @@
#include "i915_drv.h"
#include "intel_renderstate.h"
-#include "gt/intel_context.h"
+#include "intel_context.h"
+#include "intel_gpu_commands.h"
#include "intel_ring.h"
static const struct intel_renderstate_rodata *
diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c
index 3654c955e6be..61410cd62927 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset.c
+++ b/drivers/gpu/drm/i915/gt/intel_reset.c
@@ -40,20 +40,19 @@ static void rmw_clear_fw(struct intel_uncore *uncore, i915_reg_t reg, u32 clr)
intel_uncore_rmw_fw(uncore, reg, clr, 0);
}
-static void engine_skip_context(struct i915_request *rq)
+static void skip_context(struct i915_request *rq)
{
- struct intel_engine_cs *engine = rq->engine;
struct intel_context *hung_ctx = rq->context;
- if (!i915_request_is_active(rq))
- return;
+ list_for_each_entry_from_rcu(rq, &hung_ctx->timeline->requests, link) {
+ if (!i915_request_is_active(rq))
+ return;
- lockdep_assert_held(&engine->active.lock);
- list_for_each_entry_continue(rq, &engine->active.requests, sched.link)
if (rq->context == hung_ctx) {
i915_request_set_error_once(rq, -EIO);
__i915_request_skip(rq);
}
+ }
}
static void client_mark_guilty(struct i915_gem_context *ctx, bool banned)
@@ -152,15 +151,14 @@ static void mark_innocent(struct i915_request *rq)
void __i915_request_reset(struct i915_request *rq, bool guilty)
{
RQ_TRACE(rq, "guilty? %s\n", yesno(guilty));
-
- GEM_BUG_ON(i915_request_completed(rq));
+ GEM_BUG_ON(__i915_request_is_complete(rq));
rcu_read_lock(); /* protect the GEM context */
if (guilty) {
i915_request_set_error_once(rq, -EIO);
__i915_request_skip(rq);
if (mark_guilty(rq))
- engine_skip_context(rq);
+ skip_context(rq);
} else {
i915_request_set_error_once(rq, -EAGAIN);
mark_innocent(rq);
@@ -231,7 +229,7 @@ static int g4x_do_reset(struct intel_gt *gt,
GRDOM_MEDIA | GRDOM_RESET_ENABLE);
ret = wait_for_atomic(g4x_reset_complete(pdev), 50);
if (ret) {
- drm_dbg(&gt->i915->drm, "Wait for media reset failed\n");
+ GT_TRACE(gt, "Wait for media reset failed\n");
goto out;
}
@@ -239,7 +237,7 @@ static int g4x_do_reset(struct intel_gt *gt,
GRDOM_RENDER | GRDOM_RESET_ENABLE);
ret = wait_for_atomic(g4x_reset_complete(pdev), 50);
if (ret) {
- drm_dbg(&gt->i915->drm, "Wait for render reset failed\n");
+ GT_TRACE(gt, "Wait for render reset failed\n");
goto out;
}
@@ -265,7 +263,7 @@ static int ilk_do_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask,
5000, 0,
NULL);
if (ret) {
- drm_dbg(&gt->i915->drm, "Wait for render reset failed\n");
+ GT_TRACE(gt, "Wait for render reset failed\n");
goto out;
}
@@ -276,7 +274,7 @@ static int ilk_do_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask,
5000, 0,
NULL);
if (ret) {
- drm_dbg(&gt->i915->drm, "Wait for media reset failed\n");
+ GT_TRACE(gt, "Wait for media reset failed\n");
goto out;
}
@@ -305,9 +303,9 @@ static int gen6_hw_domain_reset(struct intel_gt *gt, u32 hw_domain_mask)
500, 0,
NULL);
if (err)
- drm_dbg(&gt->i915->drm,
- "Wait for 0x%08x engines reset failed\n",
- hw_domain_mask);
+ GT_TRACE(gt,
+ "Wait for 0x%08x engines reset failed\n",
+ hw_domain_mask);
return err;
}
@@ -407,8 +405,7 @@ static int gen11_lock_sfc(struct intel_engine_cs *engine, u32 *hw_mask)
return 0;
if (ret) {
- drm_dbg(&engine->i915->drm,
- "Wait for SFC forced lock ack failed\n");
+ ENGINE_TRACE(engine, "Wait for SFC forced lock ack failed\n");
return ret;
}
@@ -499,6 +496,9 @@ static int gen8_engine_reset_prepare(struct intel_engine_cs *engine)
u32 request, mask, ack;
int ret;
+ if (I915_SELFTEST_ONLY(should_fail(&engine->reset_timeout, 1)))
+ return -ETIMEDOUT;
+
ack = intel_uncore_read_fw(uncore, reg);
if (ack & RESET_CTL_CAT_ERROR) {
/*
@@ -754,8 +754,10 @@ static int gt_reset(struct intel_gt *gt, intel_engine_mask_t stalled_mask)
if (err)
return err;
+ local_bh_disable();
for_each_engine(engine, gt, id)
__intel_engine_reset(engine, stalled_mask & engine->mask);
+ local_bh_enable();
intel_ggtt_restore_fences(gt->ggtt);
@@ -833,9 +835,11 @@ static void __intel_gt_set_wedged(struct intel_gt *gt)
set_bit(I915_WEDGED, &gt->reset.flags);
/* Mark all executing requests as skipped */
+ local_bh_disable();
for_each_engine(engine, gt, id)
if (engine->reset.cancel)
engine->reset.cancel(engine);
+ local_bh_enable();
reset_finish(gt, awake);
@@ -1105,25 +1109,12 @@ error:
goto finish;
}
-static inline int intel_gt_reset_engine(struct intel_engine_cs *engine)
+static int intel_gt_reset_engine(struct intel_engine_cs *engine)
{
return __intel_gt_reset(engine->gt, engine->mask);
}
-/**
- * intel_engine_reset - reset GPU engine to recover from a hang
- * @engine: engine to reset
- * @msg: reason for GPU reset; or NULL for no drm_notice()
- *
- * Reset a specific GPU engine. Useful if a hang is detected.
- * Returns zero on successful reset or otherwise an error code.
- *
- * Procedure is:
- * - identifies the request that caused the hang and it is dropped
- * - reset engine (which will force the engine to idle)
- * - re-init/configure engine
- */
-int intel_engine_reset(struct intel_engine_cs *engine, const char *msg)
+int __intel_engine_reset_bh(struct intel_engine_cs *engine, const char *msg)
{
struct intel_gt *gt = engine->gt;
bool uses_guc = intel_engine_in_guc_submission_mode(engine);
@@ -1148,8 +1139,7 @@ int intel_engine_reset(struct intel_engine_cs *engine, const char *msg)
ret = intel_guc_reset_engine(&engine->gt->uc.guc, engine);
if (ret) {
/* If we fail here, we expect to fallback to a global reset */
- drm_dbg(&gt->i915->drm, "%sFailed to reset %s, ret=%d\n",
- uses_guc ? "GuC " : "", engine->name, ret);
+ ENGINE_TRACE(engine, "Failed to reset, err: %d\n", ret);
goto out;
}
@@ -1174,6 +1164,30 @@ out:
return ret;
}
+/**
+ * intel_engine_reset - reset GPU engine to recover from a hang
+ * @engine: engine to reset
+ * @msg: reason for GPU reset; or NULL for no drm_notice()
+ *
+ * Reset a specific GPU engine. Useful if a hang is detected.
+ * Returns zero on successful reset or otherwise an error code.
+ *
+ * Procedure is:
+ * - identifies the request that caused the hang and it is dropped
+ * - reset engine (which will force the engine to idle)
+ * - re-init/configure engine
+ */
+int intel_engine_reset(struct intel_engine_cs *engine, const char *msg)
+{
+ int err;
+
+ local_bh_disable();
+ err = __intel_engine_reset_bh(engine, msg);
+ local_bh_enable();
+
+ return err;
+}
+
static void intel_gt_reset_global(struct intel_gt *gt,
u32 engine_mask,
const char *reason)
@@ -1186,7 +1200,7 @@ static void intel_gt_reset_global(struct intel_gt *gt,
kobject_uevent_env(kobj, KOBJ_CHANGE, error_event);
- drm_dbg(&gt->i915->drm, "resetting chip, engines=%x\n", engine_mask);
+ GT_TRACE(gt, "resetting chip, engines=%x\n", engine_mask);
kobject_uevent_env(kobj, KOBJ_CHANGE, reset_event);
/* Use a watchdog to ensure that our reset completes */
@@ -1260,18 +1274,20 @@ void intel_gt_handle_error(struct intel_gt *gt,
* single reset fails.
*/
if (intel_has_reset_engine(gt) && !intel_gt_is_wedged(gt)) {
+ local_bh_disable();
for_each_engine_masked(engine, gt, engine_mask, tmp) {
BUILD_BUG_ON(I915_RESET_MODESET >= I915_RESET_ENGINE);
if (test_and_set_bit(I915_RESET_ENGINE + engine->id,
&gt->reset.flags))
continue;
- if (intel_engine_reset(engine, msg) == 0)
+ if (__intel_engine_reset_bh(engine, msg) == 0)
engine_mask &= ~engine->mask;
clear_and_wake_up_bit(I915_RESET_ENGINE + engine->id,
&gt->reset.flags);
}
+ local_bh_enable();
}
if (!engine_mask)
@@ -1380,6 +1396,17 @@ void intel_gt_init_reset(struct intel_gt *gt)
mutex_init(&gt->reset.mutex);
init_srcu_struct(&gt->reset.backoff_srcu);
+ /*
+ * While undesirable to wait inside the shrinker, complain anyway.
+ *
+ * If we have to wait during shrinking, we guarantee forward progress
+ * by forcing the reset. Therefore during the reset we must not
+ * re-enter the shrinker. By declaring that we take the reset mutex
+ * within the shrinker, we forbid ourselves from performing any
+ * fs-reclaim or taking related locks during reset.
+ */
+ i915_gem_shrinker_taints_mutex(gt->i915, &gt->reset.mutex);
+
/* no GPU until we are ready! */
__set_bit(I915_WEDGED, &gt->reset.flags);
}
diff --git a/drivers/gpu/drm/i915/gt/intel_reset.h b/drivers/gpu/drm/i915/gt/intel_reset.h
index a0eec7c11c0c..7dbf5cc8a333 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset.h
+++ b/drivers/gpu/drm/i915/gt/intel_reset.h
@@ -34,6 +34,8 @@ void intel_gt_reset(struct intel_gt *gt,
const char *reason);
int intel_engine_reset(struct intel_engine_cs *engine,
const char *reason);
+int __intel_engine_reset_bh(struct intel_engine_cs *engine,
+ const char *reason);
void __i915_request_reset(struct i915_request *rq, bool guilty);
diff --git a/drivers/gpu/drm/i915/gt/intel_ring.c b/drivers/gpu/drm/i915/gt/intel_ring.c
index 4034a4bac7f0..78d1360caa0f 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring.c
+++ b/drivers/gpu/drm/i915/gt/intel_ring.c
@@ -5,9 +5,11 @@
*/
#include "gem/i915_gem_object.h"
+
#include "i915_drv.h"
#include "i915_vma.h"
#include "intel_engine.h"
+#include "intel_gpu_commands.h"
#include "intel_ring.h"
#include "intel_timeline.h"
@@ -40,7 +42,7 @@ int intel_ring_pin(struct intel_ring *ring, struct i915_gem_ww_ctx *ww)
/* Ring wraparound at offset 0 sometimes hangs. No idea why. */
flags = PIN_OFFSET_BIAS | i915_ggtt_pin_bias(vma);
- if (vma->obj->stolen)
+ if (i915_gem_object_is_stolen(vma->obj))
flags |= PIN_MAPPABLE;
else
flags |= PIN_HIGH;
diff --git a/drivers/gpu/drm/i915/gt/intel_ring_submission.c b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
index a41b43f445b8..4984ff565424 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
@@ -32,6 +32,7 @@
#include "gen6_ppgtt.h"
#include "gen7_renderclear.h"
#include "i915_drv.h"
+#include "i915_mitigations.h"
#include "intel_breadcrumbs.h"
#include "intel_context.h"
#include "intel_gt.h"
@@ -121,31 +122,27 @@ static void set_hwsp(struct intel_engine_cs *engine, u32 offset)
hwsp = RING_HWS_PGA(engine->mmio_base);
}
- intel_uncore_write(engine->uncore, hwsp, offset);
- intel_uncore_posting_read(engine->uncore, hwsp);
+ intel_uncore_write_fw(engine->uncore, hwsp, offset);
+ intel_uncore_posting_read_fw(engine->uncore, hwsp);
}
static void flush_cs_tlb(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = engine->i915;
-
- if (!IS_GEN_RANGE(dev_priv, 6, 7))
+ if (!IS_GEN_RANGE(engine->i915, 6, 7))
return;
/* ring should be idle before issuing a sync flush*/
- drm_WARN_ON(&dev_priv->drm,
- (ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE) == 0);
-
- ENGINE_WRITE(engine, RING_INSTPM,
- _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
- INSTPM_SYNC_FLUSH));
- if (intel_wait_for_register(engine->uncore,
- RING_INSTPM(engine->mmio_base),
- INSTPM_SYNC_FLUSH, 0,
- 1000))
- drm_err(&dev_priv->drm,
- "%s: wait for SyncFlush to complete for TLB invalidation timed out\n",
- engine->name);
+ GEM_DEBUG_WARN_ON((ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE) == 0);
+
+ ENGINE_WRITE_FW(engine, RING_INSTPM,
+ _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
+ INSTPM_SYNC_FLUSH));
+ if (__intel_wait_for_register_fw(engine->uncore,
+ RING_INSTPM(engine->mmio_base),
+ INSTPM_SYNC_FLUSH, 0,
+ 2000, 0, NULL))
+ ENGINE_TRACE(engine,
+ "wait for SyncFlush to complete for TLB invalidation timed out\n");
}
static void ring_setup_status_page(struct intel_engine_cs *engine)
@@ -156,44 +153,6 @@ static void ring_setup_status_page(struct intel_engine_cs *engine)
flush_cs_tlb(engine);
}
-static bool stop_ring(struct intel_engine_cs *engine)
-{
- struct drm_i915_private *dev_priv = engine->i915;
-
- if (INTEL_GEN(dev_priv) > 2) {
- ENGINE_WRITE(engine,
- RING_MI_MODE, _MASKED_BIT_ENABLE(STOP_RING));
- if (intel_wait_for_register(engine->uncore,
- RING_MI_MODE(engine->mmio_base),
- MODE_IDLE,
- MODE_IDLE,
- 1000)) {
- drm_err(&dev_priv->drm,
- "%s : timed out trying to stop ring\n",
- engine->name);
-
- /*
- * Sometimes we observe that the idle flag is not
- * set even though the ring is empty. So double
- * check before giving up.
- */
- if (ENGINE_READ(engine, RING_HEAD) !=
- ENGINE_READ(engine, RING_TAIL))
- return false;
- }
- }
-
- ENGINE_WRITE(engine, RING_HEAD, ENGINE_READ(engine, RING_TAIL));
-
- ENGINE_WRITE(engine, RING_HEAD, 0);
- ENGINE_WRITE(engine, RING_TAIL, 0);
-
- /* The ring must be empty before it is disabled */
- ENGINE_WRITE(engine, RING_CTL, 0);
-
- return (ENGINE_READ(engine, RING_HEAD) & HEAD_ADDR) == 0;
-}
-
static struct i915_address_space *vm_alias(struct i915_address_space *vm)
{
if (i915_is_ggtt(vm))
@@ -211,9 +170,16 @@ static void set_pp_dir(struct intel_engine_cs *engine)
{
struct i915_address_space *vm = vm_alias(engine->gt->vm);
- if (vm) {
- ENGINE_WRITE(engine, RING_PP_DIR_DCLV, PP_DIR_DCLV_2G);
- ENGINE_WRITE(engine, RING_PP_DIR_BASE, pp_dir(vm));
+ if (!vm)
+ return;
+
+ ENGINE_WRITE_FW(engine, RING_PP_DIR_DCLV, PP_DIR_DCLV_2G);
+ ENGINE_WRITE_FW(engine, RING_PP_DIR_BASE, pp_dir(vm));
+
+ if (INTEL_GEN(engine->i915) >= 7) {
+ ENGINE_WRITE_FW(engine,
+ RING_MODE_GEN7,
+ _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
}
}
@@ -221,38 +187,10 @@ static int xcs_resume(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
struct intel_ring *ring = engine->legacy.ring;
- int ret = 0;
ENGINE_TRACE(engine, "ring:{HEAD:%04x, TAIL:%04x}\n",
ring->head, ring->tail);
- intel_uncore_forcewake_get(engine->uncore, FORCEWAKE_ALL);
-
- /* WaClearRingBufHeadRegAtInit:ctg,elk */
- if (!stop_ring(engine)) {
- /* G45 ring initialization often fails to reset head to zero */
- drm_dbg(&dev_priv->drm, "%s head not reset to zero "
- "ctl %08x head %08x tail %08x start %08x\n",
- engine->name,
- ENGINE_READ(engine, RING_CTL),
- ENGINE_READ(engine, RING_HEAD),
- ENGINE_READ(engine, RING_TAIL),
- ENGINE_READ(engine, RING_START));
-
- if (!stop_ring(engine)) {
- drm_err(&dev_priv->drm,
- "failed to set %s head to zero "
- "ctl %08x head %08x tail %08x start %08x\n",
- engine->name,
- ENGINE_READ(engine, RING_CTL),
- ENGINE_READ(engine, RING_HEAD),
- ENGINE_READ(engine, RING_TAIL),
- ENGINE_READ(engine, RING_START));
- ret = -EIO;
- goto out;
- }
- }
-
if (HWS_NEEDS_PHYSICAL(dev_priv))
ring_setup_phys_status_page(engine);
else
@@ -269,7 +207,7 @@ static int xcs_resume(struct intel_engine_cs *engine)
* also enforces ordering), otherwise the hw might lose the new ring
* register values.
*/
- ENGINE_WRITE(engine, RING_START, i915_ggtt_offset(ring->vma));
+ ENGINE_WRITE_FW(engine, RING_START, i915_ggtt_offset(ring->vma));
/* Check that the ring offsets point within the ring! */
GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->head));
@@ -279,53 +217,98 @@ static int xcs_resume(struct intel_engine_cs *engine)
set_pp_dir(engine);
/* First wake the ring up to an empty/idle ring */
- ENGINE_WRITE(engine, RING_HEAD, ring->head);
- ENGINE_WRITE(engine, RING_TAIL, ring->head);
+ ENGINE_WRITE_FW(engine, RING_HEAD, ring->head);
+ ENGINE_WRITE_FW(engine, RING_TAIL, ring->head);
ENGINE_POSTING_READ(engine, RING_TAIL);
- ENGINE_WRITE(engine, RING_CTL, RING_CTL_SIZE(ring->size) | RING_VALID);
+ ENGINE_WRITE_FW(engine, RING_CTL,
+ RING_CTL_SIZE(ring->size) | RING_VALID);
/* If the head is still not zero, the ring is dead */
- if (intel_wait_for_register(engine->uncore,
- RING_CTL(engine->mmio_base),
- RING_VALID, RING_VALID,
- 50)) {
- drm_err(&dev_priv->drm, "%s initialization failed "
- "ctl %08x (valid? %d) head %08x [%08x] tail %08x [%08x] start %08x [expected %08x]\n",
- engine->name,
- ENGINE_READ(engine, RING_CTL),
- ENGINE_READ(engine, RING_CTL) & RING_VALID,
- ENGINE_READ(engine, RING_HEAD), ring->head,
- ENGINE_READ(engine, RING_TAIL), ring->tail,
- ENGINE_READ(engine, RING_START),
- i915_ggtt_offset(ring->vma));
- ret = -EIO;
- goto out;
+ if (__intel_wait_for_register_fw(engine->uncore,
+ RING_CTL(engine->mmio_base),
+ RING_VALID, RING_VALID,
+ 5000, 0, NULL)) {
+ drm_err(&dev_priv->drm,
+ "%s initialization failed; "
+ "ctl %08x (valid? %d) head %08x [%08x] tail %08x [%08x] start %08x [expected %08x]\n",
+ engine->name,
+ ENGINE_READ(engine, RING_CTL),
+ ENGINE_READ(engine, RING_CTL) & RING_VALID,
+ ENGINE_READ(engine, RING_HEAD), ring->head,
+ ENGINE_READ(engine, RING_TAIL), ring->tail,
+ ENGINE_READ(engine, RING_START),
+ i915_ggtt_offset(ring->vma));
+ return -EIO;
}
if (INTEL_GEN(dev_priv) > 2)
- ENGINE_WRITE(engine,
- RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING));
+ ENGINE_WRITE_FW(engine,
+ RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING));
/* Now awake, let it get started */
if (ring->tail != ring->head) {
- ENGINE_WRITE(engine, RING_TAIL, ring->tail);
+ ENGINE_WRITE_FW(engine, RING_TAIL, ring->tail);
ENGINE_POSTING_READ(engine, RING_TAIL);
}
/* Papering over lost _interrupts_ immediately following the restart */
intel_engine_signal_breadcrumbs(engine);
-out:
- intel_uncore_forcewake_put(engine->uncore, FORCEWAKE_ALL);
+ return 0;
+}
- return ret;
+static void sanitize_hwsp(struct intel_engine_cs *engine)
+{
+ struct intel_timeline *tl;
+
+ list_for_each_entry(tl, &engine->status_page.timelines, engine_link)
+ intel_timeline_reset_seqno(tl);
}
-static void reset_prepare(struct intel_engine_cs *engine)
+static void xcs_sanitize(struct intel_engine_cs *engine)
+{
+ /*
+ * Poison residual state on resume, in case the suspend didn't!
+ *
+ * We have to assume that across suspend/resume (or other loss
+ * of control) that the contents of our pinned buffers has been
+ * lost, replaced by garbage. Since this doesn't always happen,
+ * let's poison such state so that we more quickly spot when
+ * we falsely assume it has been preserved.
+ */
+ if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
+ memset(engine->status_page.addr, POISON_INUSE, PAGE_SIZE);
+
+ /*
+ * The kernel_context HWSP is stored in the status_page. As above,
+ * that may be lost on resume/initialisation, and so we need to
+ * reset the value in the HWSP.
+ */
+ sanitize_hwsp(engine);
+
+ /* And scrub the dirty cachelines for the HWSP */
+ clflush_cache_range(engine->status_page.addr, PAGE_SIZE);
+}
+
+static bool stop_ring(struct intel_engine_cs *engine)
{
- struct intel_uncore *uncore = engine->uncore;
- const u32 base = engine->mmio_base;
+ /* Empty the ring by skipping to the end */
+ ENGINE_WRITE_FW(engine, RING_HEAD, ENGINE_READ_FW(engine, RING_TAIL));
+ ENGINE_POSTING_READ(engine, RING_HEAD);
+
+ /* The ring must be empty before it is disabled */
+ ENGINE_WRITE_FW(engine, RING_CTL, 0);
+ ENGINE_POSTING_READ(engine, RING_CTL);
+
+ /* Then reset the disabled ring */
+ ENGINE_WRITE_FW(engine, RING_HEAD, 0);
+ ENGINE_WRITE_FW(engine, RING_TAIL, 0);
+
+ return (ENGINE_READ_FW(engine, RING_HEAD) & HEAD_ADDR) == 0;
+}
+static void reset_prepare(struct intel_engine_cs *engine)
+{
/*
* We stop engines, otherwise we might get failed reset and a
* dead gpu (on elk). Also as modern gpu as kbl can suffer
@@ -337,30 +320,35 @@ static void reset_prepare(struct intel_engine_cs *engine)
* WaKBLVECSSemaphoreWaitPoll:kbl (on ALL_ENGINES)
*
* WaMediaResetMainRingCleanup:ctg,elk (presumably)
+ * WaClearRingBufHeadRegAtInit:ctg,elk
*
* FIXME: Wa for more modern gens needs to be validated
*/
ENGINE_TRACE(engine, "\n");
+ intel_engine_stop_cs(engine);
- if (intel_engine_stop_cs(engine))
- ENGINE_TRACE(engine, "timed out on STOP_RING\n");
-
- intel_uncore_write_fw(uncore,
- RING_HEAD(base),
- intel_uncore_read_fw(uncore, RING_TAIL(base)));
- intel_uncore_posting_read_fw(uncore, RING_HEAD(base)); /* paranoia */
-
- intel_uncore_write_fw(uncore, RING_HEAD(base), 0);
- intel_uncore_write_fw(uncore, RING_TAIL(base), 0);
- intel_uncore_posting_read_fw(uncore, RING_TAIL(base));
-
- /* The ring must be empty before it is disabled */
- intel_uncore_write_fw(uncore, RING_CTL(base), 0);
+ if (!stop_ring(engine)) {
+ /* G45 ring initialization often fails to reset head to zero */
+ drm_dbg(&engine->i915->drm,
+ "%s head not reset to zero "
+ "ctl %08x head %08x tail %08x start %08x\n",
+ engine->name,
+ ENGINE_READ_FW(engine, RING_CTL),
+ ENGINE_READ_FW(engine, RING_HEAD),
+ ENGINE_READ_FW(engine, RING_TAIL),
+ ENGINE_READ_FW(engine, RING_START));
+ }
- /* Check acts as a post */
- if (intel_uncore_read_fw(uncore, RING_HEAD(base)))
- ENGINE_TRACE(engine, "ring head [%x] not parked\n",
- intel_uncore_read_fw(uncore, RING_HEAD(base)));
+ if (!stop_ring(engine)) {
+ drm_err(&engine->i915->drm,
+ "failed to set %s head to zero "
+ "ctl %08x head %08x tail %08x start %08x\n",
+ engine->name,
+ ENGINE_READ_FW(engine, RING_CTL),
+ ENGINE_READ_FW(engine, RING_HEAD),
+ ENGINE_READ_FW(engine, RING_TAIL),
+ ENGINE_READ_FW(engine, RING_START));
+ }
}
static void reset_rewind(struct intel_engine_cs *engine, bool stalled)
@@ -371,12 +359,14 @@ static void reset_rewind(struct intel_engine_cs *engine, bool stalled)
rq = NULL;
spin_lock_irqsave(&engine->active.lock, flags);
+ rcu_read_lock();
list_for_each_entry(pos, &engine->active.requests, sched.link) {
- if (!i915_request_completed(pos)) {
+ if (!__i915_request_is_complete(pos)) {
rq = pos;
break;
}
}
+ rcu_read_unlock();
/*
* The guilty request will get skipped on a hung engine.
@@ -440,10 +430,8 @@ static void reset_cancel(struct intel_engine_cs *engine)
spin_lock_irqsave(&engine->active.lock, flags);
/* Mark all submitted requests as skipped. */
- list_for_each_entry(request, &engine->active.requests, sched.link) {
- i915_request_set_error_once(request, -EIO);
- i915_request_mark_complete(request);
- }
+ list_for_each_entry(request, &engine->active.requests, sched.link)
+ i915_request_mark_eio(request);
intel_engine_signal_breadcrumbs(engine);
/* Remaining _unready_ requests will be nop'ed when submitted */
@@ -602,6 +590,7 @@ static int ring_context_pin(struct intel_context *ce, void *unused)
static void ring_context_reset(struct intel_context *ce)
{
intel_ring_reset(ce->ring, ce->ring->emit);
+ clear_bit(CONTEXT_VALID_BIT, &ce->flags);
}
static const struct intel_context_ops ring_context_ops = {
@@ -653,9 +642,9 @@ static int load_pd_dir(struct i915_request *rq,
return rq->engine->emit_flush(rq, EMIT_FLUSH);
}
-static inline int mi_set_context(struct i915_request *rq,
- struct intel_context *ce,
- u32 flags)
+static int mi_set_context(struct i915_request *rq,
+ struct intel_context *ce,
+ u32 flags)
{
struct intel_engine_cs *engine = rq->engine;
struct drm_i915_private *i915 = engine->i915;
@@ -886,7 +875,8 @@ static int switch_context(struct i915_request *rq)
GEM_BUG_ON(HAS_EXECLISTS(engine->i915));
if (engine->wa_ctx.vma && ce != engine->kernel_context) {
- if (engine->wa_ctx.vma->private != ce) {
+ if (engine->wa_ctx.vma->private != ce &&
+ i915_mitigate_clear_residuals()) {
ret = clear_residuals(rq);
if (ret)
return ret;
@@ -1069,6 +1059,8 @@ static void setup_common(struct intel_engine_cs *engine)
setup_irq(engine);
engine->resume = xcs_resume;
+ engine->sanitize = xcs_sanitize;
+
engine->reset.prepare = reset_prepare;
engine->reset.rewind = reset_rewind;
engine->reset.cancel = reset_cancel;
@@ -1290,7 +1282,7 @@ int intel_ring_submission_setup(struct intel_engine_cs *engine)
GEM_BUG_ON(timeline->hwsp_ggtt != engine->status_page.vma);
- if (IS_HASWELL(engine->i915) && engine->class == RENDER_CLASS) {
+ if (IS_GEN(engine->i915, 7) && engine->class == RENDER_CLASS) {
err = gen7_ctx_switch_bb_init(engine);
if (err)
goto err_ring_unpin;
diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c
index b629eeb14002..ee5835c29c03 100644
--- a/drivers/gpu/drm/i915/gt/intel_rps.c
+++ b/drivers/gpu/drm/i915/gt/intel_rps.c
@@ -43,7 +43,7 @@ static u32 rps_pm_sanitize_mask(struct intel_rps *rps, u32 mask)
return mask & ~rps->pm_intrmsk_mbz;
}
-static inline void set(struct intel_uncore *uncore, i915_reg_t reg, u32 val)
+static void set(struct intel_uncore *uncore, i915_reg_t reg, u32 val)
{
intel_uncore_write_fw(uncore, reg, val);
}
@@ -400,7 +400,7 @@ static unsigned int gen5_invert_freq(struct intel_rps *rps,
return val;
}
-static bool gen5_rps_set(struct intel_rps *rps, u8 val)
+static int __gen5_rps_set(struct intel_rps *rps, u8 val)
{
struct intel_uncore *uncore = rps_to_uncore(rps);
u16 rgvswctl;
@@ -410,7 +410,7 @@ static bool gen5_rps_set(struct intel_rps *rps, u8 val)
rgvswctl = intel_uncore_read16(uncore, MEMSWCTL);
if (rgvswctl & MEMCTL_CMD_STS) {
DRM_DEBUG("gpu busy, RCS change rejected\n");
- return false; /* still busy with another command */
+ return -EBUSY; /* still busy with another command */
}
/* Invert the frequency bin into an ips delay */
@@ -426,7 +426,18 @@ static bool gen5_rps_set(struct intel_rps *rps, u8 val)
rgvswctl |= MEMCTL_CMD_STS;
intel_uncore_write16(uncore, MEMSWCTL, rgvswctl);
- return true;
+ return 0;
+}
+
+static int gen5_rps_set(struct intel_rps *rps, u8 val)
+{
+ int err;
+
+ spin_lock_irq(&mchdev_lock);
+ err = __gen5_rps_set(rps, val);
+ spin_unlock_irq(&mchdev_lock);
+
+ return err;
}
static unsigned long intel_pxfreq(u32 vidfreq)
@@ -557,7 +568,7 @@ static bool gen5_rps_enable(struct intel_rps *rps)
"stuck trying to change perf mode\n");
mdelay(1);
- gen5_rps_set(rps, rps->cur_freq);
+ __gen5_rps_set(rps, rps->cur_freq);
rps->ips.last_count1 = intel_uncore_read(uncore, DMIEC);
rps->ips.last_count1 += intel_uncore_read(uncore, DDREC);
@@ -599,7 +610,7 @@ static void gen5_rps_disable(struct intel_rps *rps)
intel_uncore_write(uncore, MEMINTRSTS, MEMINT_EVAL_CHG);
/* Go back to the starting frequency */
- gen5_rps_set(rps, rps->idle_freq);
+ __gen5_rps_set(rps, rps->idle_freq);
mdelay(1);
rgvswctl |= MEMCTL_CMD_STS;
intel_uncore_write(uncore, MEMSWCTL, rgvswctl);
@@ -797,20 +808,19 @@ static int rps_set(struct intel_rps *rps, u8 val, bool update)
struct drm_i915_private *i915 = rps_to_i915(rps);
int err;
- if (INTEL_GEN(i915) < 6)
- return 0;
-
if (val == rps->last_freq)
return 0;
if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
err = vlv_rps_set(rps, val);
- else
+ else if (INTEL_GEN(i915) >= 6)
err = gen6_rps_set(rps, val);
+ else
+ err = gen5_rps_set(rps, val);
if (err)
return err;
- if (update)
+ if (update && INTEL_GEN(i915) >= 6)
gen6_rps_set_thresholds(rps, val);
rps->last_freq = val;
@@ -852,6 +862,8 @@ void intel_rps_park(struct intel_rps *rps)
{
int adj;
+ GEM_BUG_ON(atomic_read(&rps->num_waiters));
+
if (!intel_rps_clear_active(rps))
return;
@@ -907,28 +919,27 @@ void intel_rps_park(struct intel_rps *rps)
void intel_rps_boost(struct i915_request *rq)
{
- struct intel_rps *rps = &READ_ONCE(rq->engine)->gt->rps;
- unsigned long flags;
-
- if (i915_request_signaled(rq) || !intel_rps_is_active(rps))
+ if (i915_request_signaled(rq) || i915_request_has_waitboost(rq))
return;
/* Serializes with i915_request_retire() */
- spin_lock_irqsave(&rq->lock, flags);
- if (!i915_request_has_waitboost(rq) &&
- !dma_fence_is_signaled_locked(&rq->fence)) {
- set_bit(I915_FENCE_FLAG_BOOST, &rq->fence.flags);
+ if (!test_and_set_bit(I915_FENCE_FLAG_BOOST, &rq->fence.flags)) {
+ struct intel_rps *rps = &READ_ONCE(rq->engine)->gt->rps;
+
+ if (atomic_fetch_inc(&rps->num_waiters))
+ return;
+
+ if (!intel_rps_is_active(rps))
+ return;
GT_TRACE(rps_to_gt(rps), "boost fence:%llx:%llx\n",
rq->fence.context, rq->fence.seqno);
- if (!atomic_fetch_inc(&rps->num_waiters) &&
- READ_ONCE(rps->cur_freq) < rps->boost_freq)
+ if (READ_ONCE(rps->cur_freq) < rps->boost_freq)
schedule_work(&rps->work);
- atomic_inc(&rps->boosts);
+ WRITE_ONCE(rps->boosts, rps->boosts + 1); /* debug only */
}
- spin_unlock_irqrestore(&rq->lock, flags);
}
int intel_rps_set(struct intel_rps *rps, u8 val)
@@ -1798,7 +1809,7 @@ void gen5_rps_irq_handler(struct intel_rps *rps)
rps->min_freq_softlimit,
rps->max_freq_softlimit);
- if (new_freq != rps->cur_freq && gen5_rps_set(rps, new_freq))
+ if (new_freq != rps->cur_freq && !__gen5_rps_set(rps, new_freq))
rps->cur_freq = new_freq;
spin_unlock(&mchdev_lock);
@@ -2109,7 +2120,7 @@ bool i915_gpu_turbo_disable(void)
spin_lock_irq(&mchdev_lock);
rps->max_freq_softlimit = rps->min_freq;
- ret = gen5_rps_set(&i915->gt.rps, rps->min_freq);
+ ret = !__gen5_rps_set(&i915->gt.rps, rps->min_freq);
spin_unlock_irq(&mchdev_lock);
drm_dev_put(&i915->drm);
diff --git a/drivers/gpu/drm/i915/gt/intel_rps_types.h b/drivers/gpu/drm/i915/gt/intel_rps_types.h
index 38083f0402d9..029fe13cf303 100644
--- a/drivers/gpu/drm/i915/gt/intel_rps_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_rps_types.h
@@ -93,7 +93,7 @@ struct intel_rps {
} power;
atomic_t num_waiters;
- atomic_t boosts;
+ unsigned int boosts;
/* manual wa residency calculations */
struct intel_rps_ei ei;
diff --git a/drivers/gpu/drm/i915/gt/intel_timeline.c b/drivers/gpu/drm/i915/gt/intel_timeline.c
index 7ea94d201fe6..037b0e3ccbed 100644
--- a/drivers/gpu/drm/i915/gt/intel_timeline.c
+++ b/drivers/gpu/drm/i915/gt/intel_timeline.c
@@ -126,6 +126,10 @@ static void __rcu_cacheline_free(struct rcu_head *rcu)
struct intel_timeline_cacheline *cl =
container_of(rcu, typeof(*cl), rcu);
+ /* Must wait until after all *rq->hwsp are complete before removing */
+ i915_gem_object_unpin_map(cl->hwsp->vma->obj);
+ __idle_hwsp_free(cl->hwsp, ptr_unmask_bits(cl->vaddr, CACHELINE_BITS));
+
i915_active_fini(&cl->active);
kfree(cl);
}
@@ -133,11 +137,6 @@ static void __rcu_cacheline_free(struct rcu_head *rcu)
static void __idle_cacheline_free(struct intel_timeline_cacheline *cl)
{
GEM_BUG_ON(!i915_active_is_idle(&cl->active));
-
- i915_gem_object_unpin_map(cl->hwsp->vma->obj);
- i915_vma_put(cl->hwsp->vma);
- __idle_hwsp_free(cl->hwsp, ptr_unmask_bits(cl->vaddr, CACHELINE_BITS));
-
call_rcu(&cl->rcu, __rcu_cacheline_free);
}
@@ -179,7 +178,6 @@ cacheline_alloc(struct intel_timeline_hwsp *hwsp, unsigned int cacheline)
return ERR_CAST(vaddr);
}
- i915_vma_get(hwsp->vma);
cl->hwsp = hwsp;
cl->vaddr = page_pack_bits(vaddr, cacheline);
@@ -321,6 +319,25 @@ __intel_timeline_create(struct intel_gt *gt,
return timeline;
}
+struct intel_timeline *
+intel_timeline_create_from_engine(struct intel_engine_cs *engine,
+ unsigned int offset)
+{
+ struct i915_vma *hwsp = engine->status_page.vma;
+ struct intel_timeline *tl;
+
+ tl = __intel_timeline_create(engine->gt, hwsp, offset);
+ if (IS_ERR(tl))
+ return tl;
+
+ /* Borrow a nearby lock; we only create these timelines during init */
+ mutex_lock(&hwsp->vm->mutex);
+ list_add_tail(&tl->engine_link, &engine->status_page.timelines);
+ mutex_unlock(&hwsp->vm->mutex);
+
+ return tl;
+}
+
void __intel_timeline_pin(struct intel_timeline *tl)
{
GEM_BUG_ON(!atomic_read(&tl->pin_count));
@@ -565,11 +582,11 @@ int intel_timeline_read_hwsp(struct i915_request *from,
rcu_read_lock();
cl = rcu_dereference(from->hwsp_cacheline);
- if (i915_request_completed(from)) /* confirm cacheline is valid */
+ if (i915_request_signaled(from)) /* confirm cacheline is valid */
goto unlock;
if (unlikely(!i915_active_acquire_if_busy(&cl->active)))
goto unlock; /* seqno wrapped and completed! */
- if (unlikely(i915_request_completed(from)))
+ if (unlikely(__i915_request_is_complete(from)))
goto release;
rcu_read_unlock();
@@ -617,6 +634,86 @@ void intel_gt_fini_timelines(struct intel_gt *gt)
GEM_BUG_ON(!list_empty(&timelines->hwsp_free_list));
}
+void intel_gt_show_timelines(struct intel_gt *gt,
+ struct drm_printer *m,
+ void (*show_request)(struct drm_printer *m,
+ const struct i915_request *rq,
+ const char *prefix,
+ int indent))
+{
+ struct intel_gt_timelines *timelines = &gt->timelines;
+ struct intel_timeline *tl, *tn;
+ LIST_HEAD(free);
+
+ spin_lock(&timelines->lock);
+ list_for_each_entry_safe(tl, tn, &timelines->active_list, link) {
+ unsigned long count, ready, inflight;
+ struct i915_request *rq, *rn;
+ struct dma_fence *fence;
+
+ if (!mutex_trylock(&tl->mutex)) {
+ drm_printf(m, "Timeline %llx: busy; skipping\n",
+ tl->fence_context);
+ continue;
+ }
+
+ intel_timeline_get(tl);
+ GEM_BUG_ON(!atomic_read(&tl->active_count));
+ atomic_inc(&tl->active_count); /* pin the list element */
+ spin_unlock(&timelines->lock);
+
+ count = 0;
+ ready = 0;
+ inflight = 0;
+ list_for_each_entry_safe(rq, rn, &tl->requests, link) {
+ if (i915_request_completed(rq))
+ continue;
+
+ count++;
+ if (i915_request_is_ready(rq))
+ ready++;
+ if (i915_request_is_active(rq))
+ inflight++;
+ }
+
+ drm_printf(m, "Timeline %llx: { ", tl->fence_context);
+ drm_printf(m, "count: %lu, ready: %lu, inflight: %lu",
+ count, ready, inflight);
+ drm_printf(m, ", seqno: { current: %d, last: %d }",
+ *tl->hwsp_seqno, tl->seqno);
+ fence = i915_active_fence_get(&tl->last_request);
+ if (fence) {
+ drm_printf(m, ", engine: %s",
+ to_request(fence)->engine->name);
+ dma_fence_put(fence);
+ }
+ drm_printf(m, " }\n");
+
+ if (show_request) {
+ list_for_each_entry_safe(rq, rn, &tl->requests, link)
+ show_request(m, rq, "", 2);
+ }
+
+ mutex_unlock(&tl->mutex);
+ spin_lock(&timelines->lock);
+
+ /* Resume list iteration after reacquiring spinlock */
+ list_safe_reset_next(tl, tn, link);
+ if (atomic_dec_and_test(&tl->active_count))
+ list_del(&tl->link);
+
+ /* Defer the final release to after the spinlock */
+ if (refcount_dec_and_test(&tl->kref.refcount)) {
+ GEM_BUG_ON(atomic_read(&tl->active_count));
+ list_add(&tl->link, &free);
+ }
+ }
+ spin_unlock(&timelines->lock);
+
+ list_for_each_entry_safe(tl, tn, &free, link)
+ __intel_timeline_free(&tl->kref);
+}
+
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "gt/selftests/mock_timeline.c"
#include "gt/selftest_timeline.c"
diff --git a/drivers/gpu/drm/i915/gt/intel_timeline.h b/drivers/gpu/drm/i915/gt/intel_timeline.h
index 9882cd911d8e..dcdee692a80e 100644
--- a/drivers/gpu/drm/i915/gt/intel_timeline.h
+++ b/drivers/gpu/drm/i915/gt/intel_timeline.h
@@ -31,6 +31,8 @@
#include "i915_syncmap.h"
#include "intel_timeline_types.h"
+struct drm_printer;
+
struct intel_timeline *
__intel_timeline_create(struct intel_gt *gt,
struct i915_vma *global_hwsp,
@@ -42,14 +44,9 @@ intel_timeline_create(struct intel_gt *gt)
return __intel_timeline_create(gt, NULL, 0);
}
-static inline struct intel_timeline *
+struct intel_timeline *
intel_timeline_create_from_engine(struct intel_engine_cs *engine,
- unsigned int offset)
-{
- return __intel_timeline_create(engine->gt,
- engine->status_page.vma,
- offset);
-}
+ unsigned int offset);
static inline struct intel_timeline *
intel_timeline_get(struct intel_timeline *timeline)
@@ -106,4 +103,18 @@ int intel_timeline_read_hwsp(struct i915_request *from,
void intel_gt_init_timelines(struct intel_gt *gt);
void intel_gt_fini_timelines(struct intel_gt *gt);
+void intel_gt_show_timelines(struct intel_gt *gt,
+ struct drm_printer *m,
+ void (*show_request)(struct drm_printer *m,
+ const struct i915_request *rq,
+ const char *prefix,
+ int indent));
+
+static inline bool
+intel_timeline_is_last(const struct intel_timeline *tl,
+ const struct i915_request *rq)
+{
+ return list_is_last_rcu(&rq->link, &tl->requests);
+}
+
#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_timeline_types.h b/drivers/gpu/drm/i915/gt/intel_timeline_types.h
index 4474f487f589..e360f50706bf 100644
--- a/drivers/gpu/drm/i915/gt/intel_timeline_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_timeline_types.h
@@ -84,6 +84,8 @@ struct intel_timeline {
struct list_head link;
struct intel_gt *gt;
+ struct list_head engine_link;
+
struct kref kref;
struct rcu_head rcu;
};
diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c
index adc9a8ea410a..3fdcd5ff71dd 100644
--- a/drivers/gpu/drm/i915/gt/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c
@@ -7,6 +7,7 @@
#include "i915_drv.h"
#include "intel_context.h"
#include "intel_engine_pm.h"
+#include "intel_gpu_commands.h"
#include "intel_gt.h"
#include "intel_ring.h"
#include "intel_workarounds.h"
@@ -194,7 +195,7 @@ static void wa_add(struct i915_wa_list *wal, i915_reg_t reg,
}
static void
-wa_write_masked_or(struct i915_wa_list *wal, i915_reg_t reg, u32 clear, u32 set)
+wa_write_clr_set(struct i915_wa_list *wal, i915_reg_t reg, u32 clear, u32 set)
{
wa_add(wal, reg, clear, set, clear);
}
@@ -202,21 +203,32 @@ wa_write_masked_or(struct i915_wa_list *wal, i915_reg_t reg, u32 clear, u32 set)
static void
wa_write(struct i915_wa_list *wal, i915_reg_t reg, u32 set)
{
- wa_write_masked_or(wal, reg, ~0, set);
+ wa_write_clr_set(wal, reg, ~0, set);
}
static void
wa_write_or(struct i915_wa_list *wal, i915_reg_t reg, u32 set)
{
- wa_write_masked_or(wal, reg, set, set);
+ wa_write_clr_set(wal, reg, set, set);
}
static void
wa_write_clr(struct i915_wa_list *wal, i915_reg_t reg, u32 clr)
{
- wa_write_masked_or(wal, reg, clr, 0);
+ wa_write_clr_set(wal, reg, clr, 0);
}
+/*
+ * WA operations on "masked register". A masked register has the upper 16 bits
+ * documented as "masked" in b-spec. Its purpose is to allow writing to just a
+ * portion of the register without a rmw: you simply write in the upper 16 bits
+ * the mask of bits you are going to modify.
+ *
+ * The wa_masked_* family of functions already does the necessary operations to
+ * calculate the mask based on the parameters passed, so user only has to
+ * provide the lower 16 bits of that register.
+ */
+
static void
wa_masked_en(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
{
@@ -229,38 +241,36 @@ wa_masked_dis(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
wa_add(wal, reg, 0, _MASKED_BIT_DISABLE(val), val);
}
-#define WA_SET_BIT_MASKED(addr, mask) \
- wa_masked_en(wal, (addr), (mask))
-
-#define WA_CLR_BIT_MASKED(addr, mask) \
- wa_masked_dis(wal, (addr), (mask))
-
-#define WA_SET_FIELD_MASKED(addr, mask, value) \
- wa_write_masked_or(wal, (addr), 0, _MASKED_FIELD((mask), (value)))
+static void
+wa_masked_field_set(struct i915_wa_list *wal, i915_reg_t reg,
+ u32 mask, u32 val)
+{
+ wa_add(wal, reg, 0, _MASKED_FIELD(mask, val), mask);
+}
static void gen6_ctx_workarounds_init(struct intel_engine_cs *engine,
struct i915_wa_list *wal)
{
- WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING);
+ wa_masked_en(wal, INSTPM, INSTPM_FORCE_ORDERING);
}
static void gen7_ctx_workarounds_init(struct intel_engine_cs *engine,
struct i915_wa_list *wal)
{
- WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING);
+ wa_masked_en(wal, INSTPM, INSTPM_FORCE_ORDERING);
}
static void gen8_ctx_workarounds_init(struct intel_engine_cs *engine,
struct i915_wa_list *wal)
{
- WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING);
+ wa_masked_en(wal, INSTPM, INSTPM_FORCE_ORDERING);
/* WaDisableAsyncFlipPerfMode:bdw,chv */
- WA_SET_BIT_MASKED(MI_MODE, ASYNC_FLIP_PERF_DISABLE);
+ wa_masked_en(wal, MI_MODE, ASYNC_FLIP_PERF_DISABLE);
/* WaDisablePartialInstShootdown:bdw,chv */
- WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
- PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
+ wa_masked_en(wal, GEN8_ROW_CHICKEN,
+ PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
/* Use Force Non-Coherent whenever executing a 3D context. This is a
* workaround for for a possible hang in the unlikely event a TLB
@@ -268,9 +278,9 @@ static void gen8_ctx_workarounds_init(struct intel_engine_cs *engine,
*/
/* WaForceEnableNonCoherent:bdw,chv */
/* WaHdcDisableFetchWhenMasked:bdw,chv */
- WA_SET_BIT_MASKED(HDC_CHICKEN0,
- HDC_DONOT_FETCH_MEM_WHEN_MASKED |
- HDC_FORCE_NON_COHERENT);
+ wa_masked_en(wal, HDC_CHICKEN0,
+ HDC_DONOT_FETCH_MEM_WHEN_MASKED |
+ HDC_FORCE_NON_COHERENT);
/* From the Haswell PRM, Command Reference: Registers, CACHE_MODE_0:
* "The Hierarchical Z RAW Stall Optimization allows non-overlapping
@@ -280,10 +290,10 @@ static void gen8_ctx_workarounds_init(struct intel_engine_cs *engine,
*
* This optimization is off by default for BDW and CHV; turn it on.
*/
- WA_CLR_BIT_MASKED(CACHE_MODE_0_GEN7, HIZ_RAW_STALL_OPT_DISABLE);
+ wa_masked_dis(wal, CACHE_MODE_0_GEN7, HIZ_RAW_STALL_OPT_DISABLE);
/* Wa4x4STCOptimizationDisable:bdw,chv */
- WA_SET_BIT_MASKED(CACHE_MODE_1, GEN8_4x4_STC_OPTIMIZATION_DISABLE);
+ wa_masked_en(wal, CACHE_MODE_1, GEN8_4x4_STC_OPTIMIZATION_DISABLE);
/*
* BSpec recommends 8x4 when MSAA is used,
@@ -293,7 +303,7 @@ static void gen8_ctx_workarounds_init(struct intel_engine_cs *engine,
* disable bit, which we don't touch here, but it's good
* to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
*/
- WA_SET_FIELD_MASKED(GEN7_GT_MODE,
+ wa_masked_field_set(wal, GEN7_GT_MODE,
GEN6_WIZ_HASHING_MASK,
GEN6_WIZ_HASHING_16x4);
}
@@ -306,24 +316,24 @@ static void bdw_ctx_workarounds_init(struct intel_engine_cs *engine,
gen8_ctx_workarounds_init(engine, wal);
/* WaDisableThreadStallDopClockGating:bdw (pre-production) */
- WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
+ wa_masked_en(wal, GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
/* WaDisableDopClockGating:bdw
*
* Also see the related UCGTCL1 write in bdw_init_clock_gating()
* to disable EUTC clock gating.
*/
- WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2,
- DOP_CLOCK_GATING_DISABLE);
+ wa_masked_en(wal, GEN7_ROW_CHICKEN2,
+ DOP_CLOCK_GATING_DISABLE);
- WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
- GEN8_SAMPLER_POWER_BYPASS_DIS);
+ wa_masked_en(wal, HALF_SLICE_CHICKEN3,
+ GEN8_SAMPLER_POWER_BYPASS_DIS);
- WA_SET_BIT_MASKED(HDC_CHICKEN0,
- /* WaForceContextSaveRestoreNonCoherent:bdw */
- HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
- /* WaDisableFenceDestinationToSLM:bdw (pre-prod) */
- (IS_BDW_GT3(i915) ? HDC_FENCE_DEST_SLM_DISABLE : 0));
+ wa_masked_en(wal, HDC_CHICKEN0,
+ /* WaForceContextSaveRestoreNonCoherent:bdw */
+ HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
+ /* WaDisableFenceDestinationToSLM:bdw (pre-prod) */
+ (IS_BDW_GT3(i915) ? HDC_FENCE_DEST_SLM_DISABLE : 0));
}
static void chv_ctx_workarounds_init(struct intel_engine_cs *engine,
@@ -332,10 +342,10 @@ static void chv_ctx_workarounds_init(struct intel_engine_cs *engine,
gen8_ctx_workarounds_init(engine, wal);
/* WaDisableThreadStallDopClockGating:chv */
- WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
+ wa_masked_en(wal, GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
/* Improve HiZ throughput on CHV. */
- WA_SET_BIT_MASKED(HIZ_CHICKEN, CHV_HZ_8X8_MODE_IN_1X);
+ wa_masked_en(wal, HIZ_CHICKEN, CHV_HZ_8X8_MODE_IN_1X);
}
static void gen9_ctx_workarounds_init(struct intel_engine_cs *engine,
@@ -349,38 +359,38 @@ static void gen9_ctx_workarounds_init(struct intel_engine_cs *engine,
* Must match Display Engine. See
* WaCompressedResourceDisplayNewHashMode.
*/
- WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
- GEN9_PBE_COMPRESSED_HASH_SELECTION);
- WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7,
- GEN9_SAMPLER_HASH_COMPRESSED_READ_ADDR);
+ wa_masked_en(wal, COMMON_SLICE_CHICKEN2,
+ GEN9_PBE_COMPRESSED_HASH_SELECTION);
+ wa_masked_en(wal, GEN9_HALF_SLICE_CHICKEN7,
+ GEN9_SAMPLER_HASH_COMPRESSED_READ_ADDR);
}
/* WaClearFlowControlGpgpuContextSave:skl,bxt,kbl,glk,cfl */
/* WaDisablePartialInstShootdown:skl,bxt,kbl,glk,cfl */
- WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
- FLOW_CONTROL_ENABLE |
- PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
+ wa_masked_en(wal, GEN8_ROW_CHICKEN,
+ FLOW_CONTROL_ENABLE |
+ PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
/* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt,kbl,glk,cfl */
/* WaEnableSamplerGPGPUPreemptionSupport:skl,bxt,kbl,cfl */
- WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7,
- GEN9_ENABLE_YV12_BUGFIX |
- GEN9_ENABLE_GPGPU_PREEMPTION);
+ wa_masked_en(wal, GEN9_HALF_SLICE_CHICKEN7,
+ GEN9_ENABLE_YV12_BUGFIX |
+ GEN9_ENABLE_GPGPU_PREEMPTION);
/* Wa4x4STCOptimizationDisable:skl,bxt,kbl,glk,cfl */
/* WaDisablePartialResolveInVc:skl,bxt,kbl,cfl */
- WA_SET_BIT_MASKED(CACHE_MODE_1,
- GEN8_4x4_STC_OPTIMIZATION_DISABLE |
- GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE);
+ wa_masked_en(wal, CACHE_MODE_1,
+ GEN8_4x4_STC_OPTIMIZATION_DISABLE |
+ GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE);
/* WaCcsTlbPrefetchDisable:skl,bxt,kbl,glk,cfl */
- WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
- GEN9_CCS_TLB_PREFETCH_ENABLE);
+ wa_masked_dis(wal, GEN9_HALF_SLICE_CHICKEN5,
+ GEN9_CCS_TLB_PREFETCH_ENABLE);
/* WaForceContextSaveRestoreNonCoherent:skl,bxt,kbl,cfl */
- WA_SET_BIT_MASKED(HDC_CHICKEN0,
- HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
- HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE);
+ wa_masked_en(wal, HDC_CHICKEN0,
+ HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
+ HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE);
/* WaForceEnableNonCoherent and WaDisableHDCInvalidation are
* both tied to WaForceContextSaveRestoreNonCoherent
@@ -396,19 +406,19 @@ static void gen9_ctx_workarounds_init(struct intel_engine_cs *engine,
*/
/* WaForceEnableNonCoherent:skl,bxt,kbl,cfl */
- WA_SET_BIT_MASKED(HDC_CHICKEN0,
- HDC_FORCE_NON_COHERENT);
+ wa_masked_en(wal, HDC_CHICKEN0,
+ HDC_FORCE_NON_COHERENT);
/* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt,kbl,cfl */
if (IS_SKYLAKE(i915) ||
IS_KABYLAKE(i915) ||
IS_COFFEELAKE(i915) ||
IS_COMETLAKE(i915))
- WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
- GEN8_SAMPLER_POWER_BYPASS_DIS);
+ wa_masked_en(wal, HALF_SLICE_CHICKEN3,
+ GEN8_SAMPLER_POWER_BYPASS_DIS);
/* WaDisableSTUnitPowerOptimization:skl,bxt,kbl,glk,cfl */
- WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN2, GEN8_ST_PO_DISABLE);
+ wa_masked_en(wal, HALF_SLICE_CHICKEN2, GEN8_ST_PO_DISABLE);
/*
* Supporting preemption with fine-granularity requires changes in the
@@ -422,16 +432,16 @@ static void gen9_ctx_workarounds_init(struct intel_engine_cs *engine,
*/
/* WaDisable3DMidCmdPreemption:skl,bxt,glk,cfl,[cnl] */
- WA_CLR_BIT_MASKED(GEN8_CS_CHICKEN1, GEN9_PREEMPT_3D_OBJECT_LEVEL);
+ wa_masked_dis(wal, GEN8_CS_CHICKEN1, GEN9_PREEMPT_3D_OBJECT_LEVEL);
/* WaDisableGPGPUMidCmdPreemption:skl,bxt,blk,cfl,[cnl] */
- WA_SET_FIELD_MASKED(GEN8_CS_CHICKEN1,
+ wa_masked_field_set(wal, GEN8_CS_CHICKEN1,
GEN9_PREEMPT_GPGPU_LEVEL_MASK,
GEN9_PREEMPT_GPGPU_COMMAND_LEVEL);
/* WaClearHIZ_WM_CHICKEN3:bxt,glk */
if (IS_GEN9_LP(i915))
- WA_SET_BIT_MASKED(GEN9_WM_CHICKEN3, GEN9_FACTOR_IN_CLR_VAL_HIZ);
+ wa_masked_en(wal, GEN9_WM_CHICKEN3, GEN9_FACTOR_IN_CLR_VAL_HIZ);
}
static void skl_tune_iz_hashing(struct intel_engine_cs *engine,
@@ -465,7 +475,7 @@ static void skl_tune_iz_hashing(struct intel_engine_cs *engine,
return;
/* Tune IZ hashing. See intel_device_info_runtime_init() */
- WA_SET_FIELD_MASKED(GEN7_GT_MODE,
+ wa_masked_field_set(wal, GEN7_GT_MODE,
GEN9_IZ_HASHING_MASK(2) |
GEN9_IZ_HASHING_MASK(1) |
GEN9_IZ_HASHING_MASK(0),
@@ -487,12 +497,12 @@ static void bxt_ctx_workarounds_init(struct intel_engine_cs *engine,
gen9_ctx_workarounds_init(engine, wal);
/* WaDisableThreadStallDopClockGating:bxt */
- WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
- STALL_DOP_GATING_DISABLE);
+ wa_masked_en(wal, GEN8_ROW_CHICKEN,
+ STALL_DOP_GATING_DISABLE);
/* WaToEnableHwFixForPushConstHWBug:bxt */
- WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
- GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
+ wa_masked_en(wal, COMMON_SLICE_CHICKEN2,
+ GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
}
static void kbl_ctx_workarounds_init(struct intel_engine_cs *engine,
@@ -504,12 +514,12 @@ static void kbl_ctx_workarounds_init(struct intel_engine_cs *engine,
/* WaToEnableHwFixForPushConstHWBug:kbl */
if (IS_KBL_GT_REVID(i915, KBL_REVID_C0, REVID_FOREVER))
- WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
- GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
+ wa_masked_en(wal, COMMON_SLICE_CHICKEN2,
+ GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
/* WaDisableSbeCacheDispatchPortSharing:kbl */
- WA_SET_BIT_MASKED(GEN7_HALF_SLICE_CHICKEN1,
- GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
+ wa_masked_en(wal, GEN7_HALF_SLICE_CHICKEN1,
+ GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
}
static void glk_ctx_workarounds_init(struct intel_engine_cs *engine,
@@ -518,8 +528,8 @@ static void glk_ctx_workarounds_init(struct intel_engine_cs *engine,
gen9_ctx_workarounds_init(engine, wal);
/* WaToEnableHwFixForPushConstHWBug:glk */
- WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
- GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
+ wa_masked_en(wal, COMMON_SLICE_CHICKEN2,
+ GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
}
static void cfl_ctx_workarounds_init(struct intel_engine_cs *engine,
@@ -528,41 +538,41 @@ static void cfl_ctx_workarounds_init(struct intel_engine_cs *engine,
gen9_ctx_workarounds_init(engine, wal);
/* WaToEnableHwFixForPushConstHWBug:cfl */
- WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
- GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
+ wa_masked_en(wal, COMMON_SLICE_CHICKEN2,
+ GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
/* WaDisableSbeCacheDispatchPortSharing:cfl */
- WA_SET_BIT_MASKED(GEN7_HALF_SLICE_CHICKEN1,
- GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
+ wa_masked_en(wal, GEN7_HALF_SLICE_CHICKEN1,
+ GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
}
static void cnl_ctx_workarounds_init(struct intel_engine_cs *engine,
struct i915_wa_list *wal)
{
/* WaForceContextSaveRestoreNonCoherent:cnl */
- WA_SET_BIT_MASKED(CNL_HDC_CHICKEN0,
- HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT);
+ wa_masked_en(wal, CNL_HDC_CHICKEN0,
+ HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT);
/* WaDisableReplayBufferBankArbitrationOptimization:cnl */
- WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
- GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
+ wa_masked_en(wal, COMMON_SLICE_CHICKEN2,
+ GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
/* WaPushConstantDereferenceHoldDisable:cnl */
- WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2, PUSH_CONSTANT_DEREF_DISABLE);
+ wa_masked_en(wal, GEN7_ROW_CHICKEN2, PUSH_CONSTANT_DEREF_DISABLE);
/* FtrEnableFastAnisoL1BankingFix:cnl */
- WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3, CNL_FAST_ANISO_L1_BANKING_FIX);
+ wa_masked_en(wal, HALF_SLICE_CHICKEN3, CNL_FAST_ANISO_L1_BANKING_FIX);
/* WaDisable3DMidCmdPreemption:cnl */
- WA_CLR_BIT_MASKED(GEN8_CS_CHICKEN1, GEN9_PREEMPT_3D_OBJECT_LEVEL);
+ wa_masked_dis(wal, GEN8_CS_CHICKEN1, GEN9_PREEMPT_3D_OBJECT_LEVEL);
/* WaDisableGPGPUMidCmdPreemption:cnl */
- WA_SET_FIELD_MASKED(GEN8_CS_CHICKEN1,
+ wa_masked_field_set(wal, GEN8_CS_CHICKEN1,
GEN9_PREEMPT_GPGPU_LEVEL_MASK,
GEN9_PREEMPT_GPGPU_COMMAND_LEVEL);
/* WaDisableEarlyEOT:cnl */
- WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, DISABLE_EARLY_EOT);
+ wa_masked_en(wal, GEN8_ROW_CHICKEN, DISABLE_EARLY_EOT);
}
static void icl_ctx_workarounds_init(struct intel_engine_cs *engine,
@@ -580,8 +590,8 @@ static void icl_ctx_workarounds_init(struct intel_engine_cs *engine,
* Formerly known as WaPushConstantDereferenceHoldDisable
*/
if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_B0))
- WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2,
- PUSH_CONSTANT_DEREF_DISABLE);
+ wa_masked_en(wal, GEN7_ROW_CHICKEN2,
+ PUSH_CONSTANT_DEREF_DISABLE);
/* WaForceEnableNonCoherent:icl
* This is not the same workaround as in early Gen9 platforms, where
@@ -590,40 +600,40 @@ static void icl_ctx_workarounds_init(struct intel_engine_cs *engine,
* (the register is whitelisted in hardware now, so UMDs can opt in
* for coherency if they have a good reason).
*/
- WA_SET_BIT_MASKED(ICL_HDC_MODE, HDC_FORCE_NON_COHERENT);
+ wa_masked_en(wal, ICL_HDC_MODE, HDC_FORCE_NON_COHERENT);
/* Wa_2006611047:icl (pre-prod)
* Formerly known as WaDisableImprovedTdlClkGating
*/
if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_A0))
- WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2,
- GEN11_TDL_CLOCK_GATING_FIX_DISABLE);
+ wa_masked_en(wal, GEN7_ROW_CHICKEN2,
+ GEN11_TDL_CLOCK_GATING_FIX_DISABLE);
/* Wa_2006665173:icl (pre-prod) */
if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_A0))
- WA_SET_BIT_MASKED(GEN11_COMMON_SLICE_CHICKEN3,
- GEN11_BLEND_EMB_FIX_DISABLE_IN_RCC);
+ wa_masked_en(wal, GEN11_COMMON_SLICE_CHICKEN3,
+ GEN11_BLEND_EMB_FIX_DISABLE_IN_RCC);
/* WaEnableFloatBlendOptimization:icl */
- wa_write_masked_or(wal,
- GEN10_CACHE_MODE_SS,
- 0, /* write-only, so skip validation */
- _MASKED_BIT_ENABLE(FLOAT_BLEND_OPTIMIZATION_ENABLE));
+ wa_write_clr_set(wal,
+ GEN10_CACHE_MODE_SS,
+ 0, /* write-only, so skip validation */
+ _MASKED_BIT_ENABLE(FLOAT_BLEND_OPTIMIZATION_ENABLE));
/* WaDisableGPGPUMidThreadPreemption:icl */
- WA_SET_FIELD_MASKED(GEN8_CS_CHICKEN1,
+ wa_masked_field_set(wal, GEN8_CS_CHICKEN1,
GEN9_PREEMPT_GPGPU_LEVEL_MASK,
GEN9_PREEMPT_GPGPU_THREAD_GROUP_LEVEL);
/* allow headerless messages for preemptible GPGPU context */
- WA_SET_BIT_MASKED(GEN10_SAMPLER_MODE,
- GEN11_SAMPLER_ENABLE_HEADLESS_MSG);
+ wa_masked_en(wal, GEN10_SAMPLER_MODE,
+ GEN11_SAMPLER_ENABLE_HEADLESS_MSG);
/* Wa_1604278689:icl,ehl */
wa_write(wal, IVB_FBC_RT_BASE, 0xFFFFFFFF & ~ILK_FBC_RT_VALID);
- wa_write_masked_or(wal, IVB_FBC_RT_BASE_UPPER,
- 0, /* write-only register; skip validation */
- 0xFFFFFFFF);
+ wa_write_clr_set(wal, IVB_FBC_RT_BASE_UPPER,
+ 0, /* write-only register; skip validation */
+ 0xFFFFFFFF);
/* Wa_1406306137:icl,ehl */
wa_masked_en(wal, GEN9_ROW_CHICKEN4, GEN11_DIS_PICK_2ND_EU);
@@ -643,11 +653,11 @@ static void gen12_ctx_workarounds_init(struct intel_engine_cs *engine,
* Wa_14010443199:rkl
* Wa_14010698770:rkl
*/
- WA_SET_BIT_MASKED(GEN11_COMMON_SLICE_CHICKEN3,
- GEN12_DISABLE_CPS_AWARE_COLOR_PIPE);
+ wa_masked_en(wal, GEN11_COMMON_SLICE_CHICKEN3,
+ GEN12_DISABLE_CPS_AWARE_COLOR_PIPE);
/* WaDisableGPGPUMidThreadPreemption:gen12 */
- WA_SET_FIELD_MASKED(GEN8_CS_CHICKEN1,
+ wa_masked_field_set(wal, GEN8_CS_CHICKEN1,
GEN9_PREEMPT_GPGPU_LEVEL_MASK,
GEN9_PREEMPT_GPGPU_THREAD_GROUP_LEVEL);
}
@@ -680,12 +690,22 @@ static void dg1_ctx_workarounds_init(struct intel_engine_cs *engine,
gen12_ctx_workarounds_init(engine, wal);
/* Wa_1409044764 */
- WA_CLR_BIT_MASKED(GEN11_COMMON_SLICE_CHICKEN3,
- DG1_FLOAT_POINT_BLEND_OPT_STRICT_MODE_EN);
+ wa_masked_dis(wal, GEN11_COMMON_SLICE_CHICKEN3,
+ DG1_FLOAT_POINT_BLEND_OPT_STRICT_MODE_EN);
/* Wa_22010493298 */
- WA_SET_BIT_MASKED(HIZ_CHICKEN,
- DG1_HZ_READ_SUPPRESSION_OPTIMIZATION_DISABLE);
+ wa_masked_en(wal, HIZ_CHICKEN,
+ DG1_HZ_READ_SUPPRESSION_OPTIMIZATION_DISABLE);
+
+ /*
+ * Wa_16011163337
+ *
+ * Like in tgl_ctx_workarounds_init(), read verification is ignored due
+ * to Wa_1608008084.
+ */
+ wa_add(wal,
+ FF_MODE2,
+ FF_MODE2_GS_TIMER_MASK, FF_MODE2_GS_TIMER_224, 0);
}
static void
@@ -804,57 +824,11 @@ ilk_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
static void
snb_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
{
- /* WaDisableHiZPlanesWhenMSAAEnabled:snb */
- wa_masked_en(wal,
- _3D_CHICKEN,
- _3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB);
-
- /* WaDisable_RenderCache_OperationalFlush:snb */
- wa_masked_dis(wal, CACHE_MODE_0, RC_OP_FLUSH_ENABLE);
-
- /*
- * BSpec recommends 8x4 when MSAA is used,
- * however in practice 16x4 seems fastest.
- *
- * Note that PS/WM thread counts depend on the WIZ hashing
- * disable bit, which we don't touch here, but it's good
- * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
- */
- wa_add(wal,
- GEN6_GT_MODE, 0,
- _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4),
- GEN6_WIZ_HASHING_16x4);
-
- wa_masked_dis(wal, CACHE_MODE_0, CM0_STC_EVICT_DISABLE_LRA_SNB);
-
- wa_masked_en(wal,
- _3D_CHICKEN3,
- /* WaStripsFansDisableFastClipPerformanceFix:snb */
- _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL |
- /*
- * Bspec says:
- * "This bit must be set if 3DSTATE_CLIP clip mode is set
- * to normal and 3DSTATE_SF number of SF output attributes
- * is more than 16."
- */
- _3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH);
}
static void
ivb_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
{
- /* WaDisableEarlyCull:ivb */
- wa_masked_en(wal, _3D_CHICKEN3, _3D_CHICKEN_SF_DISABLE_OBJEND_CULL);
-
- /* WaDisablePSDDualDispatchEnable:ivb */
- if (IS_IVB_GT1(i915))
- wa_masked_en(wal,
- GEN7_HALF_SLICE_CHICKEN1,
- GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE);
-
- /* WaDisable_RenderCache_OperationalFlush:ivb */
- wa_masked_dis(wal, CACHE_MODE_0_GEN7, RC_OP_FLUSH_ENABLE);
-
/* Apply the WaDisableRHWOOptimizationForRenderHang:ivb workaround. */
wa_masked_dis(wal,
GEN7_COMMON_SLICE_CHICKEN1,
@@ -866,91 +840,15 @@ ivb_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
/* WaForceL3Serialization:ivb */
wa_write_clr(wal, GEN7_L3SQCREG4, L3SQ_URB_READ_CAM_MATCH_DISABLE);
-
- /*
- * WaVSThreadDispatchOverride:ivb,vlv
- *
- * This actually overrides the dispatch
- * mode for all thread types.
- */
- wa_write_masked_or(wal, GEN7_FF_THREAD_MODE,
- GEN7_FF_SCHED_MASK,
- GEN7_FF_TS_SCHED_HW |
- GEN7_FF_VS_SCHED_HW |
- GEN7_FF_DS_SCHED_HW);
-
- if (0) { /* causes HiZ corruption on ivb:gt1 */
- /* enable HiZ Raw Stall Optimization */
- wa_masked_dis(wal, CACHE_MODE_0_GEN7, HIZ_RAW_STALL_OPT_DISABLE);
- }
-
- /* WaDisable4x2SubspanOptimization:ivb */
- wa_masked_en(wal, CACHE_MODE_1, PIXEL_SUBSPAN_COLLECT_OPT_DISABLE);
-
- /*
- * BSpec recommends 8x4 when MSAA is used,
- * however in practice 16x4 seems fastest.
- *
- * Note that PS/WM thread counts depend on the WIZ hashing
- * disable bit, which we don't touch here, but it's good
- * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
- */
- wa_add(wal, GEN7_GT_MODE, 0,
- _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4),
- GEN6_WIZ_HASHING_16x4);
}
static void
vlv_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
{
- /* WaDisableEarlyCull:vlv */
- wa_masked_en(wal, _3D_CHICKEN3, _3D_CHICKEN_SF_DISABLE_OBJEND_CULL);
-
- /* WaPsdDispatchEnable:vlv */
- /* WaDisablePSDDualDispatchEnable:vlv */
- wa_masked_en(wal,
- GEN7_HALF_SLICE_CHICKEN1,
- GEN7_MAX_PS_THREAD_DEP |
- GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE);
-
- /* WaDisable_RenderCache_OperationalFlush:vlv */
- wa_masked_dis(wal, CACHE_MODE_0_GEN7, RC_OP_FLUSH_ENABLE);
-
/* WaForceL3Serialization:vlv */
wa_write_clr(wal, GEN7_L3SQCREG4, L3SQ_URB_READ_CAM_MATCH_DISABLE);
/*
- * WaVSThreadDispatchOverride:ivb,vlv
- *
- * This actually overrides the dispatch
- * mode for all thread types.
- */
- wa_write_masked_or(wal,
- GEN7_FF_THREAD_MODE,
- GEN7_FF_SCHED_MASK,
- GEN7_FF_TS_SCHED_HW |
- GEN7_FF_VS_SCHED_HW |
- GEN7_FF_DS_SCHED_HW);
-
- /*
- * BSpec says this must be set, even though
- * WaDisable4x2SubspanOptimization isn't listed for VLV.
- */
- wa_masked_en(wal, CACHE_MODE_1, PIXEL_SUBSPAN_COLLECT_OPT_DISABLE);
-
- /*
- * BSpec recommends 8x4 when MSAA is used,
- * however in practice 16x4 seems fastest.
- *
- * Note that PS/WM thread counts depend on the WIZ hashing
- * disable bit, which we don't touch here, but it's good
- * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
- */
- wa_add(wal, GEN7_GT_MODE, 0,
- _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4),
- GEN6_WIZ_HASHING_16x4);
-
- /*
* WaIncreaseL3CreditsForVLVB0:vlv
* This is the hardware default actually.
*/
@@ -970,31 +868,6 @@ hsw_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
/* WaVSRefCountFullforceMissDisable:hsw */
wa_write_clr(wal, GEN7_FF_THREAD_MODE, GEN7_FF_VS_REF_CNT_FFME);
-
- wa_masked_dis(wal,
- CACHE_MODE_0_GEN7,
- /* WaDisable_RenderCache_OperationalFlush:hsw */
- RC_OP_FLUSH_ENABLE |
- /* enable HiZ Raw Stall Optimization */
- HIZ_RAW_STALL_OPT_DISABLE);
-
- /* WaDisable4x2SubspanOptimization:hsw */
- wa_masked_en(wal, CACHE_MODE_1, PIXEL_SUBSPAN_COLLECT_OPT_DISABLE);
-
- /*
- * BSpec recommends 8x4 when MSAA is used,
- * however in practice 16x4 seems fastest.
- *
- * Note that PS/WM thread counts depend on the WIZ hashing
- * disable bit, which we don't touch here, but it's good
- * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
- */
- wa_add(wal, GEN7_GT_MODE, 0,
- _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4),
- GEN6_WIZ_HASHING_16x4);
-
- /* WaSampleCChickenBitEnable:hsw */
- wa_masked_en(wal, HALF_SLICE_CHICKEN3, HSW_SAMPLE_C_PERFORMANCE);
}
static void
@@ -1164,7 +1037,7 @@ wa_init_mcr(struct drm_i915_private *i915, struct i915_wa_list *wal)
drm_dbg(&i915->drm, "MCR slice/subslice = %x\n", mcr);
- wa_write_masked_or(wal, GEN8_MCR_SELECTOR, mcr_mask, mcr);
+ wa_write_clr_set(wal, GEN8_MCR_SELECTOR, mcr_mask, mcr);
}
static void
@@ -1189,10 +1062,10 @@ icl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
/* WaModifyGamTlbPartitioning:icl */
- wa_write_masked_or(wal,
- GEN11_GACB_PERF_CTRL,
- GEN11_HASH_CTRL_MASK,
- GEN11_HASH_CTRL_BIT0 | GEN11_HASH_CTRL_BIT4);
+ wa_write_clr_set(wal,
+ GEN11_GACB_PERF_CTRL,
+ GEN11_HASH_CTRL_MASK,
+ GEN11_HASH_CTRL_BIT0 | GEN11_HASH_CTRL_BIT4);
/* Wa_1405766107:icl
* Formerly known as WaCL2SFHalfMaxAlloc
@@ -1260,6 +1133,11 @@ tgl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
wa_write_or(wal,
SLICE_UNIT_LEVEL_CLKGATE,
L3_CLKGATE_DIS | L3_CR2X_CLKGATE_DIS);
+
+ /* Wa_1408615072:tgl[a0] */
+ if (IS_TGL_UY_GT_REVID(i915, TGL_REVID_A0, TGL_REVID_A0))
+ wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE2,
+ VSUNIT_CLKGATE_DIS_TGL);
}
static void
@@ -1358,9 +1236,9 @@ static bool
wa_verify(const struct i915_wa *wa, u32 cur, const char *name, const char *from)
{
if ((cur ^ wa->set) & wa->read) {
- DRM_ERROR("%s workaround lost on %s! (%x=%x/%x, expected %x)\n",
+ DRM_ERROR("%s workaround lost on %s! (reg[%x]=0x%x, relevant bits were 0x%x vs expected 0x%x)\n",
name, from, i915_mmio_reg_offset(wa->reg),
- cur, cur & wa->read, wa->set);
+ cur, cur & wa->read, wa->set & wa->read);
return false;
}
@@ -1425,7 +1303,8 @@ bool intel_gt_verify_workarounds(struct intel_gt *gt, const char *from)
return wa_list_verify(gt->uncore, &gt->i915->gt_wa_list, from);
}
-static inline bool is_nonpriv_flags_valid(u32 flags)
+__maybe_unused
+static bool is_nonpriv_flags_valid(u32 flags)
{
/* Check only valid flag bits are set */
if (flags & ~RING_FORCE_TO_NONPRIV_MASK_VALID)
@@ -1752,10 +1631,6 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
wa_write_or(wal,
GEN7_SARCHKMD,
GEN7_DISABLE_SAMPLER_PREFETCH);
-
- /* Wa_1408615072:tgl */
- wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE2,
- VSUNIT_CLKGATE_DIS_TGL);
}
if (IS_DG1(i915) || IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915)) {
@@ -1770,6 +1645,19 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
*/
wa_write_or(wal, GEN7_FF_THREAD_MODE,
GEN12_FF_TESSELATION_DOP_GATE_DISABLE);
+
+ /*
+ * Wa_1606700617:tgl,dg1
+ * Wa_22010271021:tgl,rkl,dg1
+ */
+ wa_masked_en(wal,
+ GEN9_CS_DEBUG_MODE1,
+ FF_DOP_CLOCK_GATE_DISABLE);
+
+ /* Wa_1406941453:tgl,rkl,dg1 */
+ wa_masked_en(wal,
+ GEN10_SAMPLER_MODE,
+ ENABLE_SMALLPL);
}
if (IS_DG1_REVID(i915, DG1_REVID_A0, DG1_REVID_A0) ||
@@ -1798,21 +1686,6 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
GEN6_RC_SLEEP_PSMI_CONTROL,
GEN12_WAIT_FOR_EVENT_POWER_DOWN_DISABLE |
GEN8_RC_SEMA_IDLE_MSG_DISABLE);
-
- /*
- * Wa_1606700617:tgl
- * Wa_22010271021:tgl,rkl
- */
- wa_masked_en(wal,
- GEN9_CS_DEBUG_MODE1,
- FF_DOP_CLOCK_GATE_DISABLE);
- }
-
- if (IS_GEN(i915, 12)) {
- /* Wa_1406941453:gen12 */
- wa_masked_en(wal,
- GEN10_SAMPLER_MODE,
- ENABLE_SMALLPL);
}
if (IS_GEN(i915, 11)) {
@@ -1838,14 +1711,14 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
* Wa_1604223664:icl
* Formerly known as WaL3BankAddressHashing
*/
- wa_write_masked_or(wal,
- GEN8_GARBCNTL,
- GEN11_HASH_CTRL_EXCL_MASK,
- GEN11_HASH_CTRL_EXCL_BIT0);
- wa_write_masked_or(wal,
- GEN11_GLBLINVL,
- GEN11_BANK_HASH_ADDR_EXCL_MASK,
- GEN11_BANK_HASH_ADDR_EXCL_BIT0);
+ wa_write_clr_set(wal,
+ GEN8_GARBCNTL,
+ GEN11_HASH_CTRL_EXCL_MASK,
+ GEN11_HASH_CTRL_EXCL_BIT0);
+ wa_write_clr_set(wal,
+ GEN11_GLBLINVL,
+ GEN11_BANK_HASH_ADDR_EXCL_MASK,
+ GEN11_BANK_HASH_ADDR_EXCL_BIT0);
/*
* Wa_1405733216:icl
@@ -1874,10 +1747,10 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
GEN7_DISABLE_SAMPLER_PREFETCH);
/* Wa_1409178092:icl */
- wa_write_masked_or(wal,
- GEN11_SCRATCH2,
- GEN11_COHERENT_PARTIAL_WRITE_MERGE_ENABLE,
- 0);
+ wa_write_clr_set(wal,
+ GEN11_SCRATCH2,
+ GEN11_COHERENT_PARTIAL_WRITE_MERGE_ENABLE,
+ 0);
/* WaEnable32PlaneMode:icl */
wa_masked_en(wal, GEN9_CSFE_CHICKEN1_RCS,
@@ -1951,11 +1824,11 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
/* WaProgramL3SqcReg1DefaultForPerf:bxt,glk */
if (IS_GEN9_LP(i915))
- wa_write_masked_or(wal,
- GEN8_L3SQCREG1,
- L3_PRIO_CREDITS_MASK,
- L3_GENERAL_PRIO_CREDITS(62) |
- L3_HIGH_PRIO_CREDITS(2));
+ wa_write_clr_set(wal,
+ GEN8_L3SQCREG1,
+ L3_PRIO_CREDITS_MASK,
+ L3_GENERAL_PRIO_CREDITS(62) |
+ L3_HIGH_PRIO_CREDITS(2));
/* WaOCLCoherentLineFlush:skl,bxt,kbl,cfl */
wa_write_or(wal,
@@ -1963,12 +1836,112 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
GEN8_LQSC_FLUSH_COHERENT_LINES);
}
- if (IS_GEN(i915, 7))
+ if (IS_HASWELL(i915)) {
+ /* WaSampleCChickenBitEnable:hsw */
+ wa_masked_en(wal,
+ HALF_SLICE_CHICKEN3, HSW_SAMPLE_C_PERFORMANCE);
+
+ wa_masked_dis(wal,
+ CACHE_MODE_0_GEN7,
+ /* enable HiZ Raw Stall Optimization */
+ HIZ_RAW_STALL_OPT_DISABLE);
+
+ /* WaDisable4x2SubspanOptimization:hsw */
+ wa_masked_en(wal, CACHE_MODE_1, PIXEL_SUBSPAN_COLLECT_OPT_DISABLE);
+ }
+
+ if (IS_VALLEYVIEW(i915)) {
+ /* WaDisableEarlyCull:vlv */
+ wa_masked_en(wal,
+ _3D_CHICKEN3,
+ _3D_CHICKEN_SF_DISABLE_OBJEND_CULL);
+
+ /*
+ * WaVSThreadDispatchOverride:ivb,vlv
+ *
+ * This actually overrides the dispatch
+ * mode for all thread types.
+ */
+ wa_write_clr_set(wal,
+ GEN7_FF_THREAD_MODE,
+ GEN7_FF_SCHED_MASK,
+ GEN7_FF_TS_SCHED_HW |
+ GEN7_FF_VS_SCHED_HW |
+ GEN7_FF_DS_SCHED_HW);
+
+ /* WaPsdDispatchEnable:vlv */
+ /* WaDisablePSDDualDispatchEnable:vlv */
+ wa_masked_en(wal,
+ GEN7_HALF_SLICE_CHICKEN1,
+ GEN7_MAX_PS_THREAD_DEP |
+ GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE);
+ }
+
+ if (IS_IVYBRIDGE(i915)) {
+ /* WaDisableEarlyCull:ivb */
+ wa_masked_en(wal,
+ _3D_CHICKEN3,
+ _3D_CHICKEN_SF_DISABLE_OBJEND_CULL);
+
+ if (0) { /* causes HiZ corruption on ivb:gt1 */
+ /* enable HiZ Raw Stall Optimization */
+ wa_masked_dis(wal,
+ CACHE_MODE_0_GEN7,
+ HIZ_RAW_STALL_OPT_DISABLE);
+ }
+
+ /*
+ * WaVSThreadDispatchOverride:ivb,vlv
+ *
+ * This actually overrides the dispatch
+ * mode for all thread types.
+ */
+ wa_write_clr_set(wal,
+ GEN7_FF_THREAD_MODE,
+ GEN7_FF_SCHED_MASK,
+ GEN7_FF_TS_SCHED_HW |
+ GEN7_FF_VS_SCHED_HW |
+ GEN7_FF_DS_SCHED_HW);
+
+ /* WaDisablePSDDualDispatchEnable:ivb */
+ if (IS_IVB_GT1(i915))
+ wa_masked_en(wal,
+ GEN7_HALF_SLICE_CHICKEN1,
+ GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE);
+ }
+
+ if (IS_GEN(i915, 7)) {
/* WaBCSVCSTlbInvalidationMode:ivb,vlv,hsw */
wa_masked_en(wal,
GFX_MODE_GEN7,
GFX_TLB_INVALIDATE_EXPLICIT | GFX_REPLAY_MODE);
+ /* WaDisable_RenderCache_OperationalFlush:ivb,vlv,hsw */
+ wa_masked_dis(wal, CACHE_MODE_0_GEN7, RC_OP_FLUSH_ENABLE);
+
+ /*
+ * BSpec says this must be set, even though
+ * WaDisable4x2SubspanOptimization:ivb,hsw
+ * WaDisable4x2SubspanOptimization isn't listed for VLV.
+ */
+ wa_masked_en(wal,
+ CACHE_MODE_1,
+ PIXEL_SUBSPAN_COLLECT_OPT_DISABLE);
+
+ /*
+ * BSpec recommends 8x4 when MSAA is used,
+ * however in practice 16x4 seems fastest.
+ *
+ * Note that PS/WM thread counts depend on the WIZ hashing
+ * disable bit, which we don't touch here, but it's good
+ * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
+ */
+ wa_add(wal, GEN7_GT_MODE, 0,
+ _MASKED_FIELD(GEN6_WIZ_HASHING_MASK,
+ GEN6_WIZ_HASHING_16x4),
+ GEN6_WIZ_HASHING_16x4);
+ }
+
if (IS_GEN_RANGE(i915, 6, 7))
/*
* We need to disable the AsyncFlip performance optimisations in
@@ -1991,6 +1964,39 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
GFX_MODE,
GFX_TLB_INVALIDATE_EXPLICIT);
+ /* WaDisableHiZPlanesWhenMSAAEnabled:snb */
+ wa_masked_en(wal,
+ _3D_CHICKEN,
+ _3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB);
+
+ wa_masked_en(wal,
+ _3D_CHICKEN3,
+ /* WaStripsFansDisableFastClipPerformanceFix:snb */
+ _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL |
+ /*
+ * Bspec says:
+ * "This bit must be set if 3DSTATE_CLIP clip mode is set
+ * to normal and 3DSTATE_SF number of SF output attributes
+ * is more than 16."
+ */
+ _3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH);
+
+ /*
+ * BSpec recommends 8x4 when MSAA is used,
+ * however in practice 16x4 seems fastest.
+ *
+ * Note that PS/WM thread counts depend on the WIZ hashing
+ * disable bit, which we don't touch here, but it's good
+ * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
+ */
+ wa_add(wal,
+ GEN6_GT_MODE, 0,
+ _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4),
+ GEN6_WIZ_HASHING_16x4);
+
+ /* WaDisable_RenderCache_OperationalFlush:snb */
+ wa_masked_dis(wal, CACHE_MODE_0, RC_OP_FLUSH_ENABLE);
+
/*
* From the Sandybridge PRM, volume 1 part 3, page 24:
* "If this bit is set, STCunit will have LRA as replacement
@@ -2067,39 +2073,6 @@ void intel_engine_apply_workarounds(struct intel_engine_cs *engine)
wa_list_apply(engine->uncore, &engine->wa_list);
}
-static struct i915_vma *
-create_scratch(struct i915_address_space *vm, int count)
-{
- struct drm_i915_gem_object *obj;
- struct i915_vma *vma;
- unsigned int size;
- int err;
-
- size = round_up(count * sizeof(u32), PAGE_SIZE);
- obj = i915_gem_object_create_internal(vm->i915, size);
- if (IS_ERR(obj))
- return ERR_CAST(obj);
-
- i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
-
- vma = i915_vma_instance(obj, vm, NULL);
- if (IS_ERR(vma)) {
- err = PTR_ERR(vma);
- goto err_obj;
- }
-
- err = i915_vma_pin(vma, 0, 0,
- i915_vma_is_ggtt(vma) ? PIN_GLOBAL : PIN_USER);
- if (err)
- goto err_obj;
-
- return vma;
-
-err_obj:
- i915_gem_object_put(obj);
- return ERR_PTR(err);
-}
-
struct mcr_range {
u32 start;
u32 end;
@@ -2202,7 +2175,8 @@ static int engine_wa_list_verify(struct intel_context *ce,
if (!wal->count)
return 0;
- vma = create_scratch(&ce->engine->gt->ggtt->vm, wal->count);
+ vma = __vm_create_scratch_for_read(&ce->engine->gt->ggtt->vm,
+ wal->count * sizeof(u32));
if (IS_ERR(vma))
return PTR_ERR(vma);
diff --git a/drivers/gpu/drm/i915/gt/mock_engine.c b/drivers/gpu/drm/i915/gt/mock_engine.c
index 2f830017c51d..4b4f03b70df7 100644
--- a/drivers/gpu/drm/i915/gt/mock_engine.c
+++ b/drivers/gpu/drm/i915/gt/mock_engine.c
@@ -245,17 +245,6 @@ static void mock_reset_rewind(struct intel_engine_cs *engine, bool stalled)
GEM_BUG_ON(stalled);
}
-static void mark_eio(struct i915_request *rq)
-{
- if (i915_request_completed(rq))
- return;
-
- GEM_BUG_ON(i915_request_signaled(rq));
-
- i915_request_set_error_once(rq, -EIO);
- i915_request_mark_complete(rq);
-}
-
static void mock_reset_cancel(struct intel_engine_cs *engine)
{
struct mock_engine *mock =
@@ -269,12 +258,12 @@ static void mock_reset_cancel(struct intel_engine_cs *engine)
/* Mark all submitted requests as skipped. */
list_for_each_entry(rq, &engine->active.requests, sched.link)
- mark_eio(rq);
+ i915_request_mark_eio(rq);
intel_engine_signal_breadcrumbs(engine);
/* Cancel and submit all pending requests. */
list_for_each_entry(rq, &mock->hw_queue, mock.link) {
- mark_eio(rq);
+ i915_request_mark_eio(rq);
__i915_request_submit(rq);
}
INIT_LIST_HEAD(&mock->hw_queue);
diff --git a/drivers/gpu/drm/i915/gt/selftest_context.c b/drivers/gpu/drm/i915/gt/selftest_context.c
index 1f4020e906a8..db738d400168 100644
--- a/drivers/gpu/drm/i915/gt/selftest_context.c
+++ b/drivers/gpu/drm/i915/gt/selftest_context.c
@@ -25,7 +25,7 @@ static int request_sync(struct i915_request *rq)
/* Opencode i915_request_add() so we can keep the timeline locked. */
__i915_request_commit(rq);
rq->sched.attr.priority = I915_PRIORITY_BARRIER;
- __i915_request_queue(rq, NULL);
+ __i915_request_queue_bh(rq);
timeout = i915_request_wait(rq, 0, HZ / 10);
if (timeout < 0)
diff --git a/drivers/gpu/drm/i915/gt/selftest_engine_cs.c b/drivers/gpu/drm/i915/gt/selftest_engine_cs.c
index 729c3c7b11e2..439c8984f5fa 100644
--- a/drivers/gpu/drm/i915/gt/selftest_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/selftest_engine_cs.c
@@ -6,6 +6,7 @@
#include <linux/sort.h>
+#include "intel_gpu_commands.h"
#include "intel_gt_pm.h"
#include "intel_rps.h"
diff --git a/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c b/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c
index b88aa35ad75b..223ab88f7e57 100644
--- a/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c
+++ b/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c
@@ -197,6 +197,7 @@ static int cmp_u32(const void *_a, const void *_b)
static int __live_heartbeat_fast(struct intel_engine_cs *engine)
{
+ const unsigned int error_threshold = max(20000u, jiffies_to_usecs(6));
struct intel_context *ce;
struct i915_request *rq;
ktime_t t0, t1;
@@ -254,12 +255,18 @@ static int __live_heartbeat_fast(struct intel_engine_cs *engine)
times[0],
times[ARRAY_SIZE(times) - 1]);
- /* Min work delay is 2 * 2 (worst), +1 for scheduling, +1 for slack */
- if (times[ARRAY_SIZE(times) / 2] > jiffies_to_usecs(6)) {
+ /*
+ * Ideally, the upper bound on min work delay would be something like
+ * 2 * 2 (worst), +1 for scheduling, +1 for slack. In practice, we
+ * are, even with system_wq_highpri, at the mercy of the CPU scheduler
+ * and may be stuck behind some slow work for many millisecond. Such
+ * as our very own display workers.
+ */
+ if (times[ARRAY_SIZE(times) / 2] > error_threshold) {
pr_err("%s: Heartbeat delay was %uus, expected less than %dus\n",
engine->name,
times[ARRAY_SIZE(times) / 2],
- jiffies_to_usecs(6));
+ error_threshold);
err = -EINVAL;
}
diff --git a/drivers/gpu/drm/i915/gt/selftest_engine_pm.c b/drivers/gpu/drm/i915/gt/selftest_engine_pm.c
index b08fc5390e8a..c3d965279fc3 100644
--- a/drivers/gpu/drm/i915/gt/selftest_engine_pm.c
+++ b/drivers/gpu/drm/i915/gt/selftest_engine_pm.c
@@ -4,13 +4,215 @@
* Copyright © 2018 Intel Corporation
*/
+#include <linux/sort.h>
+
#include "i915_selftest.h"
+#include "intel_gpu_commands.h"
+#include "intel_gt_clock_utils.h"
#include "selftest_engine.h"
#include "selftest_engine_heartbeat.h"
#include "selftests/igt_atomic.h"
#include "selftests/igt_flush_test.h"
#include "selftests/igt_spinner.h"
+#define COUNT 5
+
+static int cmp_u64(const void *A, const void *B)
+{
+ const u64 *a = A, *b = B;
+
+ return *a - *b;
+}
+
+static u64 trifilter(u64 *a)
+{
+ sort(a, COUNT, sizeof(*a), cmp_u64, NULL);
+ return (a[1] + 2 * a[2] + a[3]) >> 2;
+}
+
+static u32 *emit_wait(u32 *cs, u32 offset, int op, u32 value)
+{
+ *cs++ = MI_SEMAPHORE_WAIT |
+ MI_SEMAPHORE_GLOBAL_GTT |
+ MI_SEMAPHORE_POLL |
+ op;
+ *cs++ = value;
+ *cs++ = offset;
+ *cs++ = 0;
+
+ return cs;
+}
+
+static u32 *emit_store(u32 *cs, u32 offset, u32 value)
+{
+ *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
+ *cs++ = offset;
+ *cs++ = 0;
+ *cs++ = value;
+
+ return cs;
+}
+
+static u32 *emit_srm(u32 *cs, i915_reg_t reg, u32 offset)
+{
+ *cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT;
+ *cs++ = i915_mmio_reg_offset(reg);
+ *cs++ = offset;
+ *cs++ = 0;
+
+ return cs;
+}
+
+static void write_semaphore(u32 *x, u32 value)
+{
+ WRITE_ONCE(*x, value);
+ wmb();
+}
+
+static int __measure_timestamps(struct intel_context *ce,
+ u64 *dt, u64 *d_ring, u64 *d_ctx)
+{
+ struct intel_engine_cs *engine = ce->engine;
+ u32 *sema = memset32(engine->status_page.addr + 1000, 0, 5);
+ u32 offset = i915_ggtt_offset(engine->status_page.vma);
+ struct i915_request *rq;
+ u32 *cs;
+
+ rq = intel_context_create_request(ce);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ cs = intel_ring_begin(rq, 28);
+ if (IS_ERR(cs)) {
+ i915_request_add(rq);
+ return PTR_ERR(cs);
+ }
+
+ /* Signal & wait for start */
+ cs = emit_store(cs, offset + 4008, 1);
+ cs = emit_wait(cs, offset + 4008, MI_SEMAPHORE_SAD_NEQ_SDD, 1);
+
+ cs = emit_srm(cs, RING_TIMESTAMP(engine->mmio_base), offset + 4000);
+ cs = emit_srm(cs, RING_CTX_TIMESTAMP(engine->mmio_base), offset + 4004);
+
+ /* Busy wait */
+ cs = emit_wait(cs, offset + 4008, MI_SEMAPHORE_SAD_EQ_SDD, 1);
+
+ cs = emit_srm(cs, RING_TIMESTAMP(engine->mmio_base), offset + 4016);
+ cs = emit_srm(cs, RING_CTX_TIMESTAMP(engine->mmio_base), offset + 4012);
+
+ intel_ring_advance(rq, cs);
+ i915_request_get(rq);
+ i915_request_add(rq);
+ intel_engine_flush_submission(engine);
+
+ /* Wait for the request to start executing, that then waits for us */
+ while (READ_ONCE(sema[2]) == 0)
+ cpu_relax();
+
+ /* Run the request for a 100us, sampling timestamps before/after */
+ preempt_disable();
+ *dt = local_clock();
+ write_semaphore(&sema[2], 0);
+ udelay(100);
+ *dt = local_clock() - *dt;
+ write_semaphore(&sema[2], 1);
+ preempt_enable();
+
+ if (i915_request_wait(rq, 0, HZ / 2) < 0) {
+ i915_request_put(rq);
+ return -ETIME;
+ }
+ i915_request_put(rq);
+
+ pr_debug("%s CTX_TIMESTAMP: [%x, %x], RING_TIMESTAMP: [%x, %x]\n",
+ engine->name, sema[1], sema[3], sema[0], sema[4]);
+
+ *d_ctx = sema[3] - sema[1];
+ *d_ring = sema[4] - sema[0];
+ return 0;
+}
+
+static int __live_engine_timestamps(struct intel_engine_cs *engine)
+{
+ u64 s_ring[COUNT], s_ctx[COUNT], st[COUNT], d_ring, d_ctx, dt;
+ struct intel_context *ce;
+ int i, err = 0;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce))
+ return PTR_ERR(ce);
+
+ for (i = 0; i < COUNT; i++) {
+ err = __measure_timestamps(ce, &st[i], &s_ring[i], &s_ctx[i]);
+ if (err)
+ break;
+ }
+ intel_context_put(ce);
+ if (err)
+ return err;
+
+ dt = trifilter(st);
+ d_ring = trifilter(s_ring);
+ d_ctx = trifilter(s_ctx);
+
+ pr_info("%s elapsed:%lldns, CTX_TIMESTAMP:%lldns, RING_TIMESTAMP:%lldns\n",
+ engine->name, dt,
+ intel_gt_clock_interval_to_ns(engine->gt, d_ctx),
+ intel_gt_clock_interval_to_ns(engine->gt, d_ring));
+
+ d_ring = intel_gt_clock_interval_to_ns(engine->gt, d_ring);
+ if (3 * dt > 4 * d_ring || 4 * dt < 3 * d_ring) {
+ pr_err("%s Mismatch between ring timestamp and walltime!\n",
+ engine->name);
+ return -EINVAL;
+ }
+
+ d_ring = trifilter(s_ring);
+ d_ctx = trifilter(s_ctx);
+
+ d_ctx *= engine->gt->clock_frequency;
+ if (IS_ICELAKE(engine->i915))
+ d_ring *= 12500000; /* Fixed 80ns for icl ctx timestamp? */
+ else
+ d_ring *= engine->gt->clock_frequency;
+
+ if (3 * d_ctx > 4 * d_ring || 4 * d_ctx < 3 * d_ring) {
+ pr_err("%s Mismatch between ring and context timestamps!\n",
+ engine->name);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int live_engine_timestamps(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ /*
+ * Check that CS_TIMESTAMP / CTX_TIMESTAMP are in sync, i.e. share
+ * the same CS clock.
+ */
+
+ if (INTEL_GEN(gt->i915) < 8)
+ return 0;
+
+ for_each_engine(engine, gt, id) {
+ int err;
+
+ st_engine_heartbeat_disable(engine);
+ err = __live_engine_timestamps(engine);
+ st_engine_heartbeat_enable(engine);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
static int live_engine_busy_stats(void *arg)
{
struct intel_gt *gt = arg;
@@ -177,6 +379,7 @@ static int live_engine_pm(void *arg)
int live_engine_pm_selftests(struct intel_gt *gt)
{
static const struct i915_subtest tests[] = {
+ SUBTEST(live_engine_timestamps),
SUBTEST(live_engine_busy_stats),
SUBTEST(live_engine_pm),
};
diff --git a/drivers/gpu/drm/i915/gt/selftest_execlists.c b/drivers/gpu/drm/i915/gt/selftest_execlists.c
new file mode 100644
index 000000000000..264b5ebdb021
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/selftest_execlists.c
@@ -0,0 +1,4741 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2018 Intel Corporation
+ */
+
+#include <linux/prime_numbers.h>
+
+#include "gem/i915_gem_pm.h"
+#include "gt/intel_engine_heartbeat.h"
+#include "gt/intel_reset.h"
+#include "gt/selftest_engine_heartbeat.h"
+
+#include "i915_selftest.h"
+#include "selftests/i915_random.h"
+#include "selftests/igt_flush_test.h"
+#include "selftests/igt_live_test.h"
+#include "selftests/igt_spinner.h"
+#include "selftests/lib_sw_fence.h"
+
+#include "gem/selftests/igt_gem_utils.h"
+#include "gem/selftests/mock_context.h"
+
+#define CS_GPR(engine, n) ((engine)->mmio_base + 0x600 + (n) * 4)
+#define NUM_GPR 16
+#define NUM_GPR_DW (NUM_GPR * 2) /* each GPR is 2 dwords */
+
+static bool is_active(struct i915_request *rq)
+{
+ if (i915_request_is_active(rq))
+ return true;
+
+ if (i915_request_on_hold(rq))
+ return true;
+
+ if (i915_request_has_initial_breadcrumb(rq) && i915_request_started(rq))
+ return true;
+
+ return false;
+}
+
+static int wait_for_submit(struct intel_engine_cs *engine,
+ struct i915_request *rq,
+ unsigned long timeout)
+{
+ /* Ignore our own attempts to suppress excess tasklets */
+ tasklet_hi_schedule(&engine->execlists.tasklet);
+
+ timeout += jiffies;
+ do {
+ bool done = time_after(jiffies, timeout);
+
+ if (i915_request_completed(rq)) /* that was quick! */
+ return 0;
+
+ /* Wait until the HW has acknowleged the submission (or err) */
+ intel_engine_flush_submission(engine);
+ if (!READ_ONCE(engine->execlists.pending[0]) && is_active(rq))
+ return 0;
+
+ if (done)
+ return -ETIME;
+
+ cond_resched();
+ } while (1);
+}
+
+static int wait_for_reset(struct intel_engine_cs *engine,
+ struct i915_request *rq,
+ unsigned long timeout)
+{
+ timeout += jiffies;
+
+ do {
+ cond_resched();
+ intel_engine_flush_submission(engine);
+
+ if (READ_ONCE(engine->execlists.pending[0]))
+ continue;
+
+ if (i915_request_completed(rq))
+ break;
+
+ if (READ_ONCE(rq->fence.error))
+ break;
+ } while (time_before(jiffies, timeout));
+
+ flush_scheduled_work();
+
+ if (rq->fence.error != -EIO) {
+ pr_err("%s: hanging request %llx:%lld not reset\n",
+ engine->name,
+ rq->fence.context,
+ rq->fence.seqno);
+ return -EINVAL;
+ }
+
+ /* Give the request a jiffie to complete after flushing the worker */
+ if (i915_request_wait(rq, 0,
+ max(0l, (long)(timeout - jiffies)) + 1) < 0) {
+ pr_err("%s: hanging request %llx:%lld did not complete\n",
+ engine->name,
+ rq->fence.context,
+ rq->fence.seqno);
+ return -ETIME;
+ }
+
+ return 0;
+}
+
+static int live_sanitycheck(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ struct igt_spinner spin;
+ int err = 0;
+
+ if (!HAS_LOGICAL_RING_CONTEXTS(gt->i915))
+ return 0;
+
+ if (igt_spinner_init(&spin, gt))
+ return -ENOMEM;
+
+ for_each_engine(engine, gt, id) {
+ struct intel_context *ce;
+ struct i915_request *rq;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ break;
+ }
+
+ rq = igt_spinner_create_request(&spin, ce, MI_NOOP);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out_ctx;
+ }
+
+ i915_request_add(rq);
+ if (!igt_wait_for_spinner(&spin, rq)) {
+ GEM_TRACE("spinner failed to start\n");
+ GEM_TRACE_DUMP();
+ intel_gt_set_wedged(gt);
+ err = -EIO;
+ goto out_ctx;
+ }
+
+ igt_spinner_end(&spin);
+ if (igt_flush_test(gt->i915)) {
+ err = -EIO;
+ goto out_ctx;
+ }
+
+out_ctx:
+ intel_context_put(ce);
+ if (err)
+ break;
+ }
+
+ igt_spinner_fini(&spin);
+ return err;
+}
+
+static int live_unlite_restore(struct intel_gt *gt, int prio)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ struct igt_spinner spin;
+ int err = -ENOMEM;
+
+ /*
+ * Check that we can correctly context switch between 2 instances
+ * on the same engine from the same parent context.
+ */
+
+ if (igt_spinner_init(&spin, gt))
+ return err;
+
+ err = 0;
+ for_each_engine(engine, gt, id) {
+ struct intel_context *ce[2] = {};
+ struct i915_request *rq[2];
+ struct igt_live_test t;
+ int n;
+
+ if (prio && !intel_engine_has_preemption(engine))
+ continue;
+
+ if (!intel_engine_can_store_dword(engine))
+ continue;
+
+ if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
+ err = -EIO;
+ break;
+ }
+ st_engine_heartbeat_disable(engine);
+
+ for (n = 0; n < ARRAY_SIZE(ce); n++) {
+ struct intel_context *tmp;
+
+ tmp = intel_context_create(engine);
+ if (IS_ERR(tmp)) {
+ err = PTR_ERR(tmp);
+ goto err_ce;
+ }
+
+ err = intel_context_pin(tmp);
+ if (err) {
+ intel_context_put(tmp);
+ goto err_ce;
+ }
+
+ /*
+ * Setup the pair of contexts such that if we
+ * lite-restore using the RING_TAIL from ce[1] it
+ * will execute garbage from ce[0]->ring.
+ */
+ memset(tmp->ring->vaddr,
+ POISON_INUSE, /* IPEHR: 0x5a5a5a5a [hung!] */
+ tmp->ring->vma->size);
+
+ ce[n] = tmp;
+ }
+ GEM_BUG_ON(!ce[1]->ring->size);
+ intel_ring_reset(ce[1]->ring, ce[1]->ring->size / 2);
+ lrc_update_regs(ce[1], engine, ce[1]->ring->head);
+
+ rq[0] = igt_spinner_create_request(&spin, ce[0], MI_ARB_CHECK);
+ if (IS_ERR(rq[0])) {
+ err = PTR_ERR(rq[0]);
+ goto err_ce;
+ }
+
+ i915_request_get(rq[0]);
+ i915_request_add(rq[0]);
+ GEM_BUG_ON(rq[0]->postfix > ce[1]->ring->emit);
+
+ if (!igt_wait_for_spinner(&spin, rq[0])) {
+ i915_request_put(rq[0]);
+ goto err_ce;
+ }
+
+ rq[1] = i915_request_create(ce[1]);
+ if (IS_ERR(rq[1])) {
+ err = PTR_ERR(rq[1]);
+ i915_request_put(rq[0]);
+ goto err_ce;
+ }
+
+ if (!prio) {
+ /*
+ * Ensure we do the switch to ce[1] on completion.
+ *
+ * rq[0] is already submitted, so this should reduce
+ * to a no-op (a wait on a request on the same engine
+ * uses the submit fence, not the completion fence),
+ * but it will install a dependency on rq[1] for rq[0]
+ * that will prevent the pair being reordered by
+ * timeslicing.
+ */
+ i915_request_await_dma_fence(rq[1], &rq[0]->fence);
+ }
+
+ i915_request_get(rq[1]);
+ i915_request_add(rq[1]);
+ GEM_BUG_ON(rq[1]->postfix <= rq[0]->postfix);
+ i915_request_put(rq[0]);
+
+ if (prio) {
+ struct i915_sched_attr attr = {
+ .priority = prio,
+ };
+
+ /* Alternatively preempt the spinner with ce[1] */
+ engine->schedule(rq[1], &attr);
+ }
+
+ /* And switch back to ce[0] for good measure */
+ rq[0] = i915_request_create(ce[0]);
+ if (IS_ERR(rq[0])) {
+ err = PTR_ERR(rq[0]);
+ i915_request_put(rq[1]);
+ goto err_ce;
+ }
+
+ i915_request_await_dma_fence(rq[0], &rq[1]->fence);
+ i915_request_get(rq[0]);
+ i915_request_add(rq[0]);
+ GEM_BUG_ON(rq[0]->postfix > rq[1]->postfix);
+ i915_request_put(rq[1]);
+ i915_request_put(rq[0]);
+
+err_ce:
+ intel_engine_flush_submission(engine);
+ igt_spinner_end(&spin);
+ for (n = 0; n < ARRAY_SIZE(ce); n++) {
+ if (IS_ERR_OR_NULL(ce[n]))
+ break;
+
+ intel_context_unpin(ce[n]);
+ intel_context_put(ce[n]);
+ }
+
+ st_engine_heartbeat_enable(engine);
+ if (igt_live_test_end(&t))
+ err = -EIO;
+ if (err)
+ break;
+ }
+
+ igt_spinner_fini(&spin);
+ return err;
+}
+
+static int live_unlite_switch(void *arg)
+{
+ return live_unlite_restore(arg, 0);
+}
+
+static int live_unlite_preempt(void *arg)
+{
+ return live_unlite_restore(arg, I915_USER_PRIORITY(I915_PRIORITY_MAX));
+}
+
+static int live_unlite_ring(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ struct igt_spinner spin;
+ enum intel_engine_id id;
+ int err = 0;
+
+ /*
+ * Setup a preemption event that will cause almost the entire ring
+ * to be unwound, potentially fooling our intel_ring_direction()
+ * into emitting a forward lite-restore instead of the rollback.
+ */
+
+ if (igt_spinner_init(&spin, gt))
+ return -ENOMEM;
+
+ for_each_engine(engine, gt, id) {
+ struct intel_context *ce[2] = {};
+ struct i915_request *rq;
+ struct igt_live_test t;
+ int n;
+
+ if (!intel_engine_has_preemption(engine))
+ continue;
+
+ if (!intel_engine_can_store_dword(engine))
+ continue;
+
+ if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
+ err = -EIO;
+ break;
+ }
+ st_engine_heartbeat_disable(engine);
+
+ for (n = 0; n < ARRAY_SIZE(ce); n++) {
+ struct intel_context *tmp;
+
+ tmp = intel_context_create(engine);
+ if (IS_ERR(tmp)) {
+ err = PTR_ERR(tmp);
+ goto err_ce;
+ }
+
+ err = intel_context_pin(tmp);
+ if (err) {
+ intel_context_put(tmp);
+ goto err_ce;
+ }
+
+ memset32(tmp->ring->vaddr,
+ 0xdeadbeef, /* trigger a hang if executed */
+ tmp->ring->vma->size / sizeof(u32));
+
+ ce[n] = tmp;
+ }
+
+ /* Create max prio spinner, followed by N low prio nops */
+ rq = igt_spinner_create_request(&spin, ce[0], MI_ARB_CHECK);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_ce;
+ }
+
+ i915_request_get(rq);
+ rq->sched.attr.priority = I915_PRIORITY_BARRIER;
+ i915_request_add(rq);
+
+ if (!igt_wait_for_spinner(&spin, rq)) {
+ intel_gt_set_wedged(gt);
+ i915_request_put(rq);
+ err = -ETIME;
+ goto err_ce;
+ }
+
+ /* Fill the ring, until we will cause a wrap */
+ n = 0;
+ while (intel_ring_direction(ce[0]->ring,
+ rq->wa_tail,
+ ce[0]->ring->tail) <= 0) {
+ struct i915_request *tmp;
+
+ tmp = intel_context_create_request(ce[0]);
+ if (IS_ERR(tmp)) {
+ err = PTR_ERR(tmp);
+ i915_request_put(rq);
+ goto err_ce;
+ }
+
+ i915_request_add(tmp);
+ intel_engine_flush_submission(engine);
+ n++;
+ }
+ intel_engine_flush_submission(engine);
+ pr_debug("%s: Filled ring with %d nop tails {size:%x, tail:%x, emit:%x, rq.tail:%x}\n",
+ engine->name, n,
+ ce[0]->ring->size,
+ ce[0]->ring->tail,
+ ce[0]->ring->emit,
+ rq->tail);
+ GEM_BUG_ON(intel_ring_direction(ce[0]->ring,
+ rq->tail,
+ ce[0]->ring->tail) <= 0);
+ i915_request_put(rq);
+
+ /* Create a second ring to preempt the first ring after rq[0] */
+ rq = intel_context_create_request(ce[1]);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_ce;
+ }
+
+ rq->sched.attr.priority = I915_PRIORITY_BARRIER;
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ err = wait_for_submit(engine, rq, HZ / 2);
+ i915_request_put(rq);
+ if (err) {
+ pr_err("%s: preemption request was not submitted\n",
+ engine->name);
+ err = -ETIME;
+ }
+
+ pr_debug("%s: ring[0]:{ tail:%x, emit:%x }, ring[1]:{ tail:%x, emit:%x }\n",
+ engine->name,
+ ce[0]->ring->tail, ce[0]->ring->emit,
+ ce[1]->ring->tail, ce[1]->ring->emit);
+
+err_ce:
+ intel_engine_flush_submission(engine);
+ igt_spinner_end(&spin);
+ for (n = 0; n < ARRAY_SIZE(ce); n++) {
+ if (IS_ERR_OR_NULL(ce[n]))
+ break;
+
+ intel_context_unpin(ce[n]);
+ intel_context_put(ce[n]);
+ }
+ st_engine_heartbeat_enable(engine);
+ if (igt_live_test_end(&t))
+ err = -EIO;
+ if (err)
+ break;
+ }
+
+ igt_spinner_fini(&spin);
+ return err;
+}
+
+static int live_pin_rewind(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ int err = 0;
+
+ /*
+ * We have to be careful not to trust intel_ring too much, for example
+ * ring->head is updated upon retire which is out of sync with pinning
+ * the context. Thus we cannot use ring->head to set CTX_RING_HEAD,
+ * or else we risk writing an older, stale value.
+ *
+ * To simulate this, let's apply a bit of deliberate sabotague.
+ */
+
+ for_each_engine(engine, gt, id) {
+ struct intel_context *ce;
+ struct i915_request *rq;
+ struct intel_ring *ring;
+ struct igt_live_test t;
+
+ if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
+ err = -EIO;
+ break;
+ }
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ break;
+ }
+
+ err = intel_context_pin(ce);
+ if (err) {
+ intel_context_put(ce);
+ break;
+ }
+
+ /* Keep the context awake while we play games */
+ err = i915_active_acquire(&ce->active);
+ if (err) {
+ intel_context_unpin(ce);
+ intel_context_put(ce);
+ break;
+ }
+ ring = ce->ring;
+
+ /* Poison the ring, and offset the next request from HEAD */
+ memset32(ring->vaddr, STACK_MAGIC, ring->size / sizeof(u32));
+ ring->emit = ring->size / 2;
+ ring->tail = ring->emit;
+ GEM_BUG_ON(ring->head);
+
+ intel_context_unpin(ce);
+
+ /* Submit a simple nop request */
+ GEM_BUG_ON(intel_context_is_pinned(ce));
+ rq = intel_context_create_request(ce);
+ i915_active_release(&ce->active); /* e.g. async retire */
+ intel_context_put(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ break;
+ }
+ GEM_BUG_ON(!rq->head);
+ i915_request_add(rq);
+
+ /* Expect not to hang! */
+ if (igt_live_test_end(&t)) {
+ err = -EIO;
+ break;
+ }
+ }
+
+ return err;
+}
+
+static int live_hold_reset(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ struct igt_spinner spin;
+ int err = 0;
+
+ /*
+ * In order to support offline error capture for fast preempt reset,
+ * we need to decouple the guilty request and ensure that it and its
+ * descendents are not executed while the capture is in progress.
+ */
+
+ if (!intel_has_reset_engine(gt))
+ return 0;
+
+ if (igt_spinner_init(&spin, gt))
+ return -ENOMEM;
+
+ for_each_engine(engine, gt, id) {
+ struct intel_context *ce;
+ struct i915_request *rq;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ break;
+ }
+
+ st_engine_heartbeat_disable(engine);
+
+ rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out;
+ }
+ i915_request_add(rq);
+
+ if (!igt_wait_for_spinner(&spin, rq)) {
+ intel_gt_set_wedged(gt);
+ err = -ETIME;
+ goto out;
+ }
+
+ /* We have our request executing, now remove it and reset */
+
+ local_bh_disable();
+ if (test_and_set_bit(I915_RESET_ENGINE + id,
+ &gt->reset.flags)) {
+ local_bh_enable();
+ intel_gt_set_wedged(gt);
+ err = -EBUSY;
+ goto out;
+ }
+ tasklet_disable(&engine->execlists.tasklet);
+
+ engine->execlists.tasklet.func(engine->execlists.tasklet.data);
+ GEM_BUG_ON(execlists_active(&engine->execlists) != rq);
+
+ i915_request_get(rq);
+ execlists_hold(engine, rq);
+ GEM_BUG_ON(!i915_request_on_hold(rq));
+
+ __intel_engine_reset_bh(engine, NULL);
+ GEM_BUG_ON(rq->fence.error != -EIO);
+
+ tasklet_enable(&engine->execlists.tasklet);
+ clear_and_wake_up_bit(I915_RESET_ENGINE + id,
+ &gt->reset.flags);
+ local_bh_enable();
+
+ /* Check that we do not resubmit the held request */
+ if (!i915_request_wait(rq, 0, HZ / 5)) {
+ pr_err("%s: on hold request completed!\n",
+ engine->name);
+ i915_request_put(rq);
+ err = -EIO;
+ goto out;
+ }
+ GEM_BUG_ON(!i915_request_on_hold(rq));
+
+ /* But is resubmitted on release */
+ execlists_unhold(engine, rq);
+ if (i915_request_wait(rq, 0, HZ / 5) < 0) {
+ pr_err("%s: held request did not complete!\n",
+ engine->name);
+ intel_gt_set_wedged(gt);
+ err = -ETIME;
+ }
+ i915_request_put(rq);
+
+out:
+ st_engine_heartbeat_enable(engine);
+ intel_context_put(ce);
+ if (err)
+ break;
+ }
+
+ igt_spinner_fini(&spin);
+ return err;
+}
+
+static const char *error_repr(int err)
+{
+ return err ? "bad" : "good";
+}
+
+static int live_error_interrupt(void *arg)
+{
+ static const struct error_phase {
+ enum { GOOD = 0, BAD = -EIO } error[2];
+ } phases[] = {
+ { { BAD, GOOD } },
+ { { BAD, BAD } },
+ { { BAD, GOOD } },
+ { { GOOD, GOOD } }, /* sentinel */
+ };
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ /*
+ * We hook up the CS_MASTER_ERROR_INTERRUPT to have forewarning
+ * of invalid commands in user batches that will cause a GPU hang.
+ * This is a faster mechanism than using hangcheck/heartbeats, but
+ * only detects problems the HW knows about -- it will not warn when
+ * we kill the HW!
+ *
+ * To verify our detection and reset, we throw some invalid commands
+ * at the HW and wait for the interrupt.
+ */
+
+ if (!intel_has_reset_engine(gt))
+ return 0;
+
+ for_each_engine(engine, gt, id) {
+ const struct error_phase *p;
+ int err = 0;
+
+ st_engine_heartbeat_disable(engine);
+
+ for (p = phases; p->error[0] != GOOD; p++) {
+ struct i915_request *client[ARRAY_SIZE(phases->error)];
+ u32 *cs;
+ int i;
+
+ memset(client, 0, sizeof(*client));
+ for (i = 0; i < ARRAY_SIZE(client); i++) {
+ struct intel_context *ce;
+ struct i915_request *rq;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ goto out;
+ }
+
+ rq = intel_context_create_request(ce);
+ intel_context_put(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out;
+ }
+
+ if (rq->engine->emit_init_breadcrumb) {
+ err = rq->engine->emit_init_breadcrumb(rq);
+ if (err) {
+ i915_request_add(rq);
+ goto out;
+ }
+ }
+
+ cs = intel_ring_begin(rq, 2);
+ if (IS_ERR(cs)) {
+ i915_request_add(rq);
+ err = PTR_ERR(cs);
+ goto out;
+ }
+
+ if (p->error[i]) {
+ *cs++ = 0xdeadbeef;
+ *cs++ = 0xdeadbeef;
+ } else {
+ *cs++ = MI_NOOP;
+ *cs++ = MI_NOOP;
+ }
+
+ client[i] = i915_request_get(rq);
+ i915_request_add(rq);
+ }
+
+ err = wait_for_submit(engine, client[0], HZ / 2);
+ if (err) {
+ pr_err("%s: first request did not start within time!\n",
+ engine->name);
+ err = -ETIME;
+ goto out;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(client); i++) {
+ if (i915_request_wait(client[i], 0, HZ / 5) < 0)
+ pr_debug("%s: %s request incomplete!\n",
+ engine->name,
+ error_repr(p->error[i]));
+
+ if (!i915_request_started(client[i])) {
+ pr_err("%s: %s request not started!\n",
+ engine->name,
+ error_repr(p->error[i]));
+ err = -ETIME;
+ goto out;
+ }
+
+ /* Kick the tasklet to process the error */
+ intel_engine_flush_submission(engine);
+ if (client[i]->fence.error != p->error[i]) {
+ pr_err("%s: %s request (%s) with wrong error code: %d\n",
+ engine->name,
+ error_repr(p->error[i]),
+ i915_request_completed(client[i]) ? "completed" : "running",
+ client[i]->fence.error);
+ err = -EINVAL;
+ goto out;
+ }
+ }
+
+out:
+ for (i = 0; i < ARRAY_SIZE(client); i++)
+ if (client[i])
+ i915_request_put(client[i]);
+ if (err) {
+ pr_err("%s: failed at phase[%zd] { %d, %d }\n",
+ engine->name, p - phases,
+ p->error[0], p->error[1]);
+ break;
+ }
+ }
+
+ st_engine_heartbeat_enable(engine);
+ if (err) {
+ intel_gt_set_wedged(gt);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int
+emit_semaphore_chain(struct i915_request *rq, struct i915_vma *vma, int idx)
+{
+ u32 *cs;
+
+ cs = intel_ring_begin(rq, 10);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
+
+ *cs++ = MI_SEMAPHORE_WAIT |
+ MI_SEMAPHORE_GLOBAL_GTT |
+ MI_SEMAPHORE_POLL |
+ MI_SEMAPHORE_SAD_NEQ_SDD;
+ *cs++ = 0;
+ *cs++ = i915_ggtt_offset(vma) + 4 * idx;
+ *cs++ = 0;
+
+ if (idx > 0) {
+ *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
+ *cs++ = i915_ggtt_offset(vma) + 4 * (idx - 1);
+ *cs++ = 0;
+ *cs++ = 1;
+ } else {
+ *cs++ = MI_NOOP;
+ *cs++ = MI_NOOP;
+ *cs++ = MI_NOOP;
+ *cs++ = MI_NOOP;
+ }
+
+ *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
+
+ intel_ring_advance(rq, cs);
+ return 0;
+}
+
+static struct i915_request *
+semaphore_queue(struct intel_engine_cs *engine, struct i915_vma *vma, int idx)
+{
+ struct intel_context *ce;
+ struct i915_request *rq;
+ int err;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce))
+ return ERR_CAST(ce);
+
+ rq = intel_context_create_request(ce);
+ if (IS_ERR(rq))
+ goto out_ce;
+
+ err = 0;
+ if (rq->engine->emit_init_breadcrumb)
+ err = rq->engine->emit_init_breadcrumb(rq);
+ if (err == 0)
+ err = emit_semaphore_chain(rq, vma, idx);
+ if (err == 0)
+ i915_request_get(rq);
+ i915_request_add(rq);
+ if (err)
+ rq = ERR_PTR(err);
+
+out_ce:
+ intel_context_put(ce);
+ return rq;
+}
+
+static int
+release_queue(struct intel_engine_cs *engine,
+ struct i915_vma *vma,
+ int idx, int prio)
+{
+ struct i915_sched_attr attr = {
+ .priority = prio,
+ };
+ struct i915_request *rq;
+ u32 *cs;
+
+ rq = intel_engine_create_kernel_request(engine);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ cs = intel_ring_begin(rq, 4);
+ if (IS_ERR(cs)) {
+ i915_request_add(rq);
+ return PTR_ERR(cs);
+ }
+
+ *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
+ *cs++ = i915_ggtt_offset(vma) + 4 * (idx - 1);
+ *cs++ = 0;
+ *cs++ = 1;
+
+ intel_ring_advance(rq, cs);
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ local_bh_disable();
+ engine->schedule(rq, &attr);
+ local_bh_enable(); /* kick tasklet */
+
+ i915_request_put(rq);
+
+ return 0;
+}
+
+static int
+slice_semaphore_queue(struct intel_engine_cs *outer,
+ struct i915_vma *vma,
+ int count)
+{
+ struct intel_engine_cs *engine;
+ struct i915_request *head;
+ enum intel_engine_id id;
+ int err, i, n = 0;
+
+ head = semaphore_queue(outer, vma, n++);
+ if (IS_ERR(head))
+ return PTR_ERR(head);
+
+ for_each_engine(engine, outer->gt, id) {
+ if (!intel_engine_has_preemption(engine))
+ continue;
+
+ for (i = 0; i < count; i++) {
+ struct i915_request *rq;
+
+ rq = semaphore_queue(engine, vma, n++);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out;
+ }
+
+ i915_request_put(rq);
+ }
+ }
+
+ err = release_queue(outer, vma, n, I915_PRIORITY_BARRIER);
+ if (err)
+ goto out;
+
+ if (i915_request_wait(head, 0,
+ 2 * outer->gt->info.num_engines * (count + 2) * (count + 3)) < 0) {
+ pr_err("%s: Failed to slice along semaphore chain of length (%d, %d)!\n",
+ outer->name, count, n);
+ GEM_TRACE_DUMP();
+ intel_gt_set_wedged(outer->gt);
+ err = -EIO;
+ }
+
+out:
+ i915_request_put(head);
+ return err;
+}
+
+static int live_timeslice_preempt(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct drm_i915_gem_object *obj;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ struct i915_vma *vma;
+ void *vaddr;
+ int err = 0;
+
+ /*
+ * If a request takes too long, we would like to give other users
+ * a fair go on the GPU. In particular, users may create batches
+ * that wait upon external input, where that input may even be
+ * supplied by another GPU job. To avoid blocking forever, we
+ * need to preempt the current task and replace it with another
+ * ready task.
+ */
+ if (!IS_ACTIVE(CONFIG_DRM_I915_TIMESLICE_DURATION))
+ return 0;
+
+ obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto err_obj;
+ }
+
+ vaddr = i915_gem_object_pin_map(obj, I915_MAP_WC);
+ if (IS_ERR(vaddr)) {
+ err = PTR_ERR(vaddr);
+ goto err_obj;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
+ if (err)
+ goto err_map;
+
+ err = i915_vma_sync(vma);
+ if (err)
+ goto err_pin;
+
+ for_each_engine(engine, gt, id) {
+ if (!intel_engine_has_preemption(engine))
+ continue;
+
+ memset(vaddr, 0, PAGE_SIZE);
+
+ st_engine_heartbeat_disable(engine);
+ err = slice_semaphore_queue(engine, vma, 5);
+ st_engine_heartbeat_enable(engine);
+ if (err)
+ goto err_pin;
+
+ if (igt_flush_test(gt->i915)) {
+ err = -EIO;
+ goto err_pin;
+ }
+ }
+
+err_pin:
+ i915_vma_unpin(vma);
+err_map:
+ i915_gem_object_unpin_map(obj);
+err_obj:
+ i915_gem_object_put(obj);
+ return err;
+}
+
+static struct i915_request *
+create_rewinder(struct intel_context *ce,
+ struct i915_request *wait,
+ void *slot, int idx)
+{
+ const u32 offset =
+ i915_ggtt_offset(ce->engine->status_page.vma) +
+ offset_in_page(slot);
+ struct i915_request *rq;
+ u32 *cs;
+ int err;
+
+ rq = intel_context_create_request(ce);
+ if (IS_ERR(rq))
+ return rq;
+
+ if (wait) {
+ err = i915_request_await_dma_fence(rq, &wait->fence);
+ if (err)
+ goto err;
+ }
+
+ cs = intel_ring_begin(rq, 14);
+ if (IS_ERR(cs)) {
+ err = PTR_ERR(cs);
+ goto err;
+ }
+
+ *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
+ *cs++ = MI_NOOP;
+
+ *cs++ = MI_SEMAPHORE_WAIT |
+ MI_SEMAPHORE_GLOBAL_GTT |
+ MI_SEMAPHORE_POLL |
+ MI_SEMAPHORE_SAD_GTE_SDD;
+ *cs++ = idx;
+ *cs++ = offset;
+ *cs++ = 0;
+
+ *cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT;
+ *cs++ = i915_mmio_reg_offset(RING_TIMESTAMP(rq->engine->mmio_base));
+ *cs++ = offset + idx * sizeof(u32);
+ *cs++ = 0;
+
+ *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
+ *cs++ = offset;
+ *cs++ = 0;
+ *cs++ = idx + 1;
+
+ intel_ring_advance(rq, cs);
+
+ rq->sched.attr.priority = I915_PRIORITY_MASK;
+ err = 0;
+err:
+ i915_request_get(rq);
+ i915_request_add(rq);
+ if (err) {
+ i915_request_put(rq);
+ return ERR_PTR(err);
+ }
+
+ return rq;
+}
+
+static int live_timeslice_rewind(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ /*
+ * The usual presumption on timeslice expiration is that we replace
+ * the active context with another. However, given a chain of
+ * dependencies we may end up with replacing the context with itself,
+ * but only a few of those requests, forcing us to rewind the
+ * RING_TAIL of the original request.
+ */
+ if (!IS_ACTIVE(CONFIG_DRM_I915_TIMESLICE_DURATION))
+ return 0;
+
+ for_each_engine(engine, gt, id) {
+ enum { A1, A2, B1 };
+ enum { X = 1, Z, Y };
+ struct i915_request *rq[3] = {};
+ struct intel_context *ce;
+ unsigned long timeslice;
+ int i, err = 0;
+ u32 *slot;
+
+ if (!intel_engine_has_timeslices(engine))
+ continue;
+
+ /*
+ * A:rq1 -- semaphore wait, timestamp X
+ * A:rq2 -- write timestamp Y
+ *
+ * B:rq1 [await A:rq1] -- write timestamp Z
+ *
+ * Force timeslice, release semaphore.
+ *
+ * Expect execution/evaluation order XZY
+ */
+
+ st_engine_heartbeat_disable(engine);
+ timeslice = xchg(&engine->props.timeslice_duration_ms, 1);
+
+ slot = memset32(engine->status_page.addr + 1000, 0, 4);
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ goto err;
+ }
+
+ rq[A1] = create_rewinder(ce, NULL, slot, X);
+ if (IS_ERR(rq[A1])) {
+ intel_context_put(ce);
+ goto err;
+ }
+
+ rq[A2] = create_rewinder(ce, NULL, slot, Y);
+ intel_context_put(ce);
+ if (IS_ERR(rq[A2]))
+ goto err;
+
+ err = wait_for_submit(engine, rq[A2], HZ / 2);
+ if (err) {
+ pr_err("%s: failed to submit first context\n",
+ engine->name);
+ goto err;
+ }
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ goto err;
+ }
+
+ rq[B1] = create_rewinder(ce, rq[A1], slot, Z);
+ intel_context_put(ce);
+ if (IS_ERR(rq[2]))
+ goto err;
+
+ err = wait_for_submit(engine, rq[B1], HZ / 2);
+ if (err) {
+ pr_err("%s: failed to submit second context\n",
+ engine->name);
+ goto err;
+ }
+
+ /* ELSP[] = { { A:rq1, A:rq2 }, { B:rq1 } } */
+ ENGINE_TRACE(engine, "forcing tasklet for rewind\n");
+ while (i915_request_is_active(rq[A2])) { /* semaphore yield! */
+ /* Wait for the timeslice to kick in */
+ del_timer(&engine->execlists.timer);
+ tasklet_hi_schedule(&engine->execlists.tasklet);
+ intel_engine_flush_submission(engine);
+ }
+ /* -> ELSP[] = { { A:rq1 }, { B:rq1 } } */
+ GEM_BUG_ON(!i915_request_is_active(rq[A1]));
+ GEM_BUG_ON(!i915_request_is_active(rq[B1]));
+ GEM_BUG_ON(i915_request_is_active(rq[A2]));
+
+ /* Release the hounds! */
+ slot[0] = 1;
+ wmb(); /* "pairs" with GPU; paranoid kick of internal CPU$ */
+
+ for (i = 1; i <= 3; i++) {
+ unsigned long timeout = jiffies + HZ / 2;
+
+ while (!READ_ONCE(slot[i]) &&
+ time_before(jiffies, timeout))
+ ;
+
+ if (!time_before(jiffies, timeout)) {
+ pr_err("%s: rq[%d] timed out\n",
+ engine->name, i - 1);
+ err = -ETIME;
+ goto err;
+ }
+
+ pr_debug("%s: slot[%d]:%x\n", engine->name, i, slot[i]);
+ }
+
+ /* XZY: XZ < XY */
+ if (slot[Z] - slot[X] >= slot[Y] - slot[X]) {
+ pr_err("%s: timeslicing did not run context B [%u] before A [%u]!\n",
+ engine->name,
+ slot[Z] - slot[X],
+ slot[Y] - slot[X]);
+ err = -EINVAL;
+ }
+
+err:
+ memset32(&slot[0], -1, 4);
+ wmb();
+
+ engine->props.timeslice_duration_ms = timeslice;
+ st_engine_heartbeat_enable(engine);
+ for (i = 0; i < 3; i++)
+ i915_request_put(rq[i]);
+ if (igt_flush_test(gt->i915))
+ err = -EIO;
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static struct i915_request *nop_request(struct intel_engine_cs *engine)
+{
+ struct i915_request *rq;
+
+ rq = intel_engine_create_kernel_request(engine);
+ if (IS_ERR(rq))
+ return rq;
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ return rq;
+}
+
+static long slice_timeout(struct intel_engine_cs *engine)
+{
+ long timeout;
+
+ /* Enough time for a timeslice to kick in, and kick out */
+ timeout = 2 * msecs_to_jiffies_timeout(timeslice(engine));
+
+ /* Enough time for the nop request to complete */
+ timeout += HZ / 5;
+
+ return timeout + 1;
+}
+
+static int live_timeslice_queue(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct drm_i915_gem_object *obj;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ struct i915_vma *vma;
+ void *vaddr;
+ int err = 0;
+
+ /*
+ * Make sure that even if ELSP[0] and ELSP[1] are filled with
+ * timeslicing between them disabled, we *do* enable timeslicing
+ * if the queue demands it. (Normally, we do not submit if
+ * ELSP[1] is already occupied, so must rely on timeslicing to
+ * eject ELSP[0] in favour of the queue.)
+ */
+ if (!IS_ACTIVE(CONFIG_DRM_I915_TIMESLICE_DURATION))
+ return 0;
+
+ obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto err_obj;
+ }
+
+ vaddr = i915_gem_object_pin_map(obj, I915_MAP_WC);
+ if (IS_ERR(vaddr)) {
+ err = PTR_ERR(vaddr);
+ goto err_obj;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
+ if (err)
+ goto err_map;
+
+ err = i915_vma_sync(vma);
+ if (err)
+ goto err_pin;
+
+ for_each_engine(engine, gt, id) {
+ struct i915_sched_attr attr = {
+ .priority = I915_USER_PRIORITY(I915_PRIORITY_MAX),
+ };
+ struct i915_request *rq, *nop;
+
+ if (!intel_engine_has_preemption(engine))
+ continue;
+
+ st_engine_heartbeat_disable(engine);
+ memset(vaddr, 0, PAGE_SIZE);
+
+ /* ELSP[0]: semaphore wait */
+ rq = semaphore_queue(engine, vma, 0);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_heartbeat;
+ }
+ engine->schedule(rq, &attr);
+ err = wait_for_submit(engine, rq, HZ / 2);
+ if (err) {
+ pr_err("%s: Timed out trying to submit semaphores\n",
+ engine->name);
+ goto err_rq;
+ }
+
+ /* ELSP[1]: nop request */
+ nop = nop_request(engine);
+ if (IS_ERR(nop)) {
+ err = PTR_ERR(nop);
+ goto err_rq;
+ }
+ err = wait_for_submit(engine, nop, HZ / 2);
+ i915_request_put(nop);
+ if (err) {
+ pr_err("%s: Timed out trying to submit nop\n",
+ engine->name);
+ goto err_rq;
+ }
+
+ GEM_BUG_ON(i915_request_completed(rq));
+ GEM_BUG_ON(execlists_active(&engine->execlists) != rq);
+
+ /* Queue: semaphore signal, matching priority as semaphore */
+ err = release_queue(engine, vma, 1, effective_prio(rq));
+ if (err)
+ goto err_rq;
+
+ /* Wait until we ack the release_queue and start timeslicing */
+ do {
+ cond_resched();
+ intel_engine_flush_submission(engine);
+ } while (READ_ONCE(engine->execlists.pending[0]));
+
+ /* Timeslice every jiffy, so within 2 we should signal */
+ if (i915_request_wait(rq, 0, slice_timeout(engine)) < 0) {
+ struct drm_printer p =
+ drm_info_printer(gt->i915->drm.dev);
+
+ pr_err("%s: Failed to timeslice into queue\n",
+ engine->name);
+ intel_engine_dump(engine, &p,
+ "%s\n", engine->name);
+
+ memset(vaddr, 0xff, PAGE_SIZE);
+ err = -EIO;
+ }
+err_rq:
+ i915_request_put(rq);
+err_heartbeat:
+ st_engine_heartbeat_enable(engine);
+ if (err)
+ break;
+ }
+
+err_pin:
+ i915_vma_unpin(vma);
+err_map:
+ i915_gem_object_unpin_map(obj);
+err_obj:
+ i915_gem_object_put(obj);
+ return err;
+}
+
+static int live_timeslice_nopreempt(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ struct igt_spinner spin;
+ int err = 0;
+
+ /*
+ * We should not timeslice into a request that is marked with
+ * I915_REQUEST_NOPREEMPT.
+ */
+ if (!IS_ACTIVE(CONFIG_DRM_I915_TIMESLICE_DURATION))
+ return 0;
+
+ if (igt_spinner_init(&spin, gt))
+ return -ENOMEM;
+
+ for_each_engine(engine, gt, id) {
+ struct intel_context *ce;
+ struct i915_request *rq;
+ unsigned long timeslice;
+
+ if (!intel_engine_has_preemption(engine))
+ continue;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ break;
+ }
+
+ st_engine_heartbeat_disable(engine);
+ timeslice = xchg(&engine->props.timeslice_duration_ms, 1);
+
+ /* Create an unpreemptible spinner */
+
+ rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK);
+ intel_context_put(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out_heartbeat;
+ }
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ if (!igt_wait_for_spinner(&spin, rq)) {
+ i915_request_put(rq);
+ err = -ETIME;
+ goto out_spin;
+ }
+
+ set_bit(I915_FENCE_FLAG_NOPREEMPT, &rq->fence.flags);
+ i915_request_put(rq);
+
+ /* Followed by a maximum priority barrier (heartbeat) */
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ goto out_spin;
+ }
+
+ rq = intel_context_create_request(ce);
+ intel_context_put(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out_spin;
+ }
+
+ rq->sched.attr.priority = I915_PRIORITY_BARRIER;
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ /*
+ * Wait until the barrier is in ELSP, and we know timeslicing
+ * will have been activated.
+ */
+ if (wait_for_submit(engine, rq, HZ / 2)) {
+ i915_request_put(rq);
+ err = -ETIME;
+ goto out_spin;
+ }
+
+ /*
+ * Since the ELSP[0] request is unpreemptible, it should not
+ * allow the maximum priority barrier through. Wait long
+ * enough to see if it is timesliced in by mistake.
+ */
+ if (i915_request_wait(rq, 0, slice_timeout(engine)) >= 0) {
+ pr_err("%s: I915_PRIORITY_BARRIER request completed, bypassing no-preempt request\n",
+ engine->name);
+ err = -EINVAL;
+ }
+ i915_request_put(rq);
+
+out_spin:
+ igt_spinner_end(&spin);
+out_heartbeat:
+ xchg(&engine->props.timeslice_duration_ms, timeslice);
+ st_engine_heartbeat_enable(engine);
+ if (err)
+ break;
+
+ if (igt_flush_test(gt->i915)) {
+ err = -EIO;
+ break;
+ }
+ }
+
+ igt_spinner_fini(&spin);
+ return err;
+}
+
+static int live_busywait_preempt(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct i915_gem_context *ctx_hi, *ctx_lo;
+ struct intel_engine_cs *engine;
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ enum intel_engine_id id;
+ int err = -ENOMEM;
+ u32 *map;
+
+ /*
+ * Verify that even without HAS_LOGICAL_RING_PREEMPTION, we can
+ * preempt the busywaits used to synchronise between rings.
+ */
+
+ ctx_hi = kernel_context(gt->i915);
+ if (!ctx_hi)
+ return -ENOMEM;
+ ctx_hi->sched.priority =
+ I915_USER_PRIORITY(I915_CONTEXT_MAX_USER_PRIORITY);
+
+ ctx_lo = kernel_context(gt->i915);
+ if (!ctx_lo)
+ goto err_ctx_hi;
+ ctx_lo->sched.priority =
+ I915_USER_PRIORITY(I915_CONTEXT_MIN_USER_PRIORITY);
+
+ obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ goto err_ctx_lo;
+ }
+
+ map = i915_gem_object_pin_map(obj, I915_MAP_WC);
+ if (IS_ERR(map)) {
+ err = PTR_ERR(map);
+ goto err_obj;
+ }
+
+ vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto err_map;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
+ if (err)
+ goto err_map;
+
+ err = i915_vma_sync(vma);
+ if (err)
+ goto err_vma;
+
+ for_each_engine(engine, gt, id) {
+ struct i915_request *lo, *hi;
+ struct igt_live_test t;
+ u32 *cs;
+
+ if (!intel_engine_has_preemption(engine))
+ continue;
+
+ if (!intel_engine_can_store_dword(engine))
+ continue;
+
+ if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
+ err = -EIO;
+ goto err_vma;
+ }
+
+ /*
+ * We create two requests. The low priority request
+ * busywaits on a semaphore (inside the ringbuffer where
+ * is should be preemptible) and the high priority requests
+ * uses a MI_STORE_DWORD_IMM to update the semaphore value
+ * allowing the first request to complete. If preemption
+ * fails, we hang instead.
+ */
+
+ lo = igt_request_alloc(ctx_lo, engine);
+ if (IS_ERR(lo)) {
+ err = PTR_ERR(lo);
+ goto err_vma;
+ }
+
+ cs = intel_ring_begin(lo, 8);
+ if (IS_ERR(cs)) {
+ err = PTR_ERR(cs);
+ i915_request_add(lo);
+ goto err_vma;
+ }
+
+ *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
+ *cs++ = i915_ggtt_offset(vma);
+ *cs++ = 0;
+ *cs++ = 1;
+
+ /* XXX Do we need a flush + invalidate here? */
+
+ *cs++ = MI_SEMAPHORE_WAIT |
+ MI_SEMAPHORE_GLOBAL_GTT |
+ MI_SEMAPHORE_POLL |
+ MI_SEMAPHORE_SAD_EQ_SDD;
+ *cs++ = 0;
+ *cs++ = i915_ggtt_offset(vma);
+ *cs++ = 0;
+
+ intel_ring_advance(lo, cs);
+
+ i915_request_get(lo);
+ i915_request_add(lo);
+
+ if (wait_for(READ_ONCE(*map), 10)) {
+ i915_request_put(lo);
+ err = -ETIMEDOUT;
+ goto err_vma;
+ }
+
+ /* Low priority request should be busywaiting now */
+ if (i915_request_wait(lo, 0, 1) != -ETIME) {
+ i915_request_put(lo);
+ pr_err("%s: Busywaiting request did not!\n",
+ engine->name);
+ err = -EIO;
+ goto err_vma;
+ }
+
+ hi = igt_request_alloc(ctx_hi, engine);
+ if (IS_ERR(hi)) {
+ err = PTR_ERR(hi);
+ i915_request_put(lo);
+ goto err_vma;
+ }
+
+ cs = intel_ring_begin(hi, 4);
+ if (IS_ERR(cs)) {
+ err = PTR_ERR(cs);
+ i915_request_add(hi);
+ i915_request_put(lo);
+ goto err_vma;
+ }
+
+ *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
+ *cs++ = i915_ggtt_offset(vma);
+ *cs++ = 0;
+ *cs++ = 0;
+
+ intel_ring_advance(hi, cs);
+ i915_request_add(hi);
+
+ if (i915_request_wait(lo, 0, HZ / 5) < 0) {
+ struct drm_printer p = drm_info_printer(gt->i915->drm.dev);
+
+ pr_err("%s: Failed to preempt semaphore busywait!\n",
+ engine->name);
+
+ intel_engine_dump(engine, &p, "%s\n", engine->name);
+ GEM_TRACE_DUMP();
+
+ i915_request_put(lo);
+ intel_gt_set_wedged(gt);
+ err = -EIO;
+ goto err_vma;
+ }
+ GEM_BUG_ON(READ_ONCE(*map));
+ i915_request_put(lo);
+
+ if (igt_live_test_end(&t)) {
+ err = -EIO;
+ goto err_vma;
+ }
+ }
+
+ err = 0;
+err_vma:
+ i915_vma_unpin(vma);
+err_map:
+ i915_gem_object_unpin_map(obj);
+err_obj:
+ i915_gem_object_put(obj);
+err_ctx_lo:
+ kernel_context_close(ctx_lo);
+err_ctx_hi:
+ kernel_context_close(ctx_hi);
+ return err;
+}
+
+static struct i915_request *
+spinner_create_request(struct igt_spinner *spin,
+ struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine,
+ u32 arb)
+{
+ struct intel_context *ce;
+ struct i915_request *rq;
+
+ ce = i915_gem_context_get_engine(ctx, engine->legacy_idx);
+ if (IS_ERR(ce))
+ return ERR_CAST(ce);
+
+ rq = igt_spinner_create_request(spin, ce, arb);
+ intel_context_put(ce);
+ return rq;
+}
+
+static int live_preempt(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct i915_gem_context *ctx_hi, *ctx_lo;
+ struct igt_spinner spin_hi, spin_lo;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ int err = -ENOMEM;
+
+ if (igt_spinner_init(&spin_hi, gt))
+ return -ENOMEM;
+
+ if (igt_spinner_init(&spin_lo, gt))
+ goto err_spin_hi;
+
+ ctx_hi = kernel_context(gt->i915);
+ if (!ctx_hi)
+ goto err_spin_lo;
+ ctx_hi->sched.priority =
+ I915_USER_PRIORITY(I915_CONTEXT_MAX_USER_PRIORITY);
+
+ ctx_lo = kernel_context(gt->i915);
+ if (!ctx_lo)
+ goto err_ctx_hi;
+ ctx_lo->sched.priority =
+ I915_USER_PRIORITY(I915_CONTEXT_MIN_USER_PRIORITY);
+
+ for_each_engine(engine, gt, id) {
+ struct igt_live_test t;
+ struct i915_request *rq;
+
+ if (!intel_engine_has_preemption(engine))
+ continue;
+
+ if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
+ err = -EIO;
+ goto err_ctx_lo;
+ }
+
+ rq = spinner_create_request(&spin_lo, ctx_lo, engine,
+ MI_ARB_CHECK);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_ctx_lo;
+ }
+
+ i915_request_add(rq);
+ if (!igt_wait_for_spinner(&spin_lo, rq)) {
+ GEM_TRACE("lo spinner failed to start\n");
+ GEM_TRACE_DUMP();
+ intel_gt_set_wedged(gt);
+ err = -EIO;
+ goto err_ctx_lo;
+ }
+
+ rq = spinner_create_request(&spin_hi, ctx_hi, engine,
+ MI_ARB_CHECK);
+ if (IS_ERR(rq)) {
+ igt_spinner_end(&spin_lo);
+ err = PTR_ERR(rq);
+ goto err_ctx_lo;
+ }
+
+ i915_request_add(rq);
+ if (!igt_wait_for_spinner(&spin_hi, rq)) {
+ GEM_TRACE("hi spinner failed to start\n");
+ GEM_TRACE_DUMP();
+ intel_gt_set_wedged(gt);
+ err = -EIO;
+ goto err_ctx_lo;
+ }
+
+ igt_spinner_end(&spin_hi);
+ igt_spinner_end(&spin_lo);
+
+ if (igt_live_test_end(&t)) {
+ err = -EIO;
+ goto err_ctx_lo;
+ }
+ }
+
+ err = 0;
+err_ctx_lo:
+ kernel_context_close(ctx_lo);
+err_ctx_hi:
+ kernel_context_close(ctx_hi);
+err_spin_lo:
+ igt_spinner_fini(&spin_lo);
+err_spin_hi:
+ igt_spinner_fini(&spin_hi);
+ return err;
+}
+
+static int live_late_preempt(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct i915_gem_context *ctx_hi, *ctx_lo;
+ struct igt_spinner spin_hi, spin_lo;
+ struct intel_engine_cs *engine;
+ struct i915_sched_attr attr = {};
+ enum intel_engine_id id;
+ int err = -ENOMEM;
+
+ if (igt_spinner_init(&spin_hi, gt))
+ return -ENOMEM;
+
+ if (igt_spinner_init(&spin_lo, gt))
+ goto err_spin_hi;
+
+ ctx_hi = kernel_context(gt->i915);
+ if (!ctx_hi)
+ goto err_spin_lo;
+
+ ctx_lo = kernel_context(gt->i915);
+ if (!ctx_lo)
+ goto err_ctx_hi;
+
+ /* Make sure ctx_lo stays before ctx_hi until we trigger preemption. */
+ ctx_lo->sched.priority = I915_USER_PRIORITY(1);
+
+ for_each_engine(engine, gt, id) {
+ struct igt_live_test t;
+ struct i915_request *rq;
+
+ if (!intel_engine_has_preemption(engine))
+ continue;
+
+ if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
+ err = -EIO;
+ goto err_ctx_lo;
+ }
+
+ rq = spinner_create_request(&spin_lo, ctx_lo, engine,
+ MI_ARB_CHECK);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_ctx_lo;
+ }
+
+ i915_request_add(rq);
+ if (!igt_wait_for_spinner(&spin_lo, rq)) {
+ pr_err("First context failed to start\n");
+ goto err_wedged;
+ }
+
+ rq = spinner_create_request(&spin_hi, ctx_hi, engine,
+ MI_NOOP);
+ if (IS_ERR(rq)) {
+ igt_spinner_end(&spin_lo);
+ err = PTR_ERR(rq);
+ goto err_ctx_lo;
+ }
+
+ i915_request_add(rq);
+ if (igt_wait_for_spinner(&spin_hi, rq)) {
+ pr_err("Second context overtook first?\n");
+ goto err_wedged;
+ }
+
+ attr.priority = I915_USER_PRIORITY(I915_PRIORITY_MAX);
+ engine->schedule(rq, &attr);
+
+ if (!igt_wait_for_spinner(&spin_hi, rq)) {
+ pr_err("High priority context failed to preempt the low priority context\n");
+ GEM_TRACE_DUMP();
+ goto err_wedged;
+ }
+
+ igt_spinner_end(&spin_hi);
+ igt_spinner_end(&spin_lo);
+
+ if (igt_live_test_end(&t)) {
+ err = -EIO;
+ goto err_ctx_lo;
+ }
+ }
+
+ err = 0;
+err_ctx_lo:
+ kernel_context_close(ctx_lo);
+err_ctx_hi:
+ kernel_context_close(ctx_hi);
+err_spin_lo:
+ igt_spinner_fini(&spin_lo);
+err_spin_hi:
+ igt_spinner_fini(&spin_hi);
+ return err;
+
+err_wedged:
+ igt_spinner_end(&spin_hi);
+ igt_spinner_end(&spin_lo);
+ intel_gt_set_wedged(gt);
+ err = -EIO;
+ goto err_ctx_lo;
+}
+
+struct preempt_client {
+ struct igt_spinner spin;
+ struct i915_gem_context *ctx;
+};
+
+static int preempt_client_init(struct intel_gt *gt, struct preempt_client *c)
+{
+ c->ctx = kernel_context(gt->i915);
+ if (!c->ctx)
+ return -ENOMEM;
+
+ if (igt_spinner_init(&c->spin, gt))
+ goto err_ctx;
+
+ return 0;
+
+err_ctx:
+ kernel_context_close(c->ctx);
+ return -ENOMEM;
+}
+
+static void preempt_client_fini(struct preempt_client *c)
+{
+ igt_spinner_fini(&c->spin);
+ kernel_context_close(c->ctx);
+}
+
+static int live_nopreempt(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ struct preempt_client a, b;
+ enum intel_engine_id id;
+ int err = -ENOMEM;
+
+ /*
+ * Verify that we can disable preemption for an individual request
+ * that may be being observed and not want to be interrupted.
+ */
+
+ if (preempt_client_init(gt, &a))
+ return -ENOMEM;
+ if (preempt_client_init(gt, &b))
+ goto err_client_a;
+ b.ctx->sched.priority = I915_USER_PRIORITY(I915_PRIORITY_MAX);
+
+ for_each_engine(engine, gt, id) {
+ struct i915_request *rq_a, *rq_b;
+
+ if (!intel_engine_has_preemption(engine))
+ continue;
+
+ engine->execlists.preempt_hang.count = 0;
+
+ rq_a = spinner_create_request(&a.spin,
+ a.ctx, engine,
+ MI_ARB_CHECK);
+ if (IS_ERR(rq_a)) {
+ err = PTR_ERR(rq_a);
+ goto err_client_b;
+ }
+
+ /* Low priority client, but unpreemptable! */
+ __set_bit(I915_FENCE_FLAG_NOPREEMPT, &rq_a->fence.flags);
+
+ i915_request_add(rq_a);
+ if (!igt_wait_for_spinner(&a.spin, rq_a)) {
+ pr_err("First client failed to start\n");
+ goto err_wedged;
+ }
+
+ rq_b = spinner_create_request(&b.spin,
+ b.ctx, engine,
+ MI_ARB_CHECK);
+ if (IS_ERR(rq_b)) {
+ err = PTR_ERR(rq_b);
+ goto err_client_b;
+ }
+
+ i915_request_add(rq_b);
+
+ /* B is much more important than A! (But A is unpreemptable.) */
+ GEM_BUG_ON(rq_prio(rq_b) <= rq_prio(rq_a));
+
+ /* Wait long enough for preemption and timeslicing */
+ if (igt_wait_for_spinner(&b.spin, rq_b)) {
+ pr_err("Second client started too early!\n");
+ goto err_wedged;
+ }
+
+ igt_spinner_end(&a.spin);
+
+ if (!igt_wait_for_spinner(&b.spin, rq_b)) {
+ pr_err("Second client failed to start\n");
+ goto err_wedged;
+ }
+
+ igt_spinner_end(&b.spin);
+
+ if (engine->execlists.preempt_hang.count) {
+ pr_err("Preemption recorded x%d; should have been suppressed!\n",
+ engine->execlists.preempt_hang.count);
+ err = -EINVAL;
+ goto err_wedged;
+ }
+
+ if (igt_flush_test(gt->i915))
+ goto err_wedged;
+ }
+
+ err = 0;
+err_client_b:
+ preempt_client_fini(&b);
+err_client_a:
+ preempt_client_fini(&a);
+ return err;
+
+err_wedged:
+ igt_spinner_end(&b.spin);
+ igt_spinner_end(&a.spin);
+ intel_gt_set_wedged(gt);
+ err = -EIO;
+ goto err_client_b;
+}
+
+struct live_preempt_cancel {
+ struct intel_engine_cs *engine;
+ struct preempt_client a, b;
+};
+
+static int __cancel_active0(struct live_preempt_cancel *arg)
+{
+ struct i915_request *rq;
+ struct igt_live_test t;
+ int err;
+
+ /* Preempt cancel of ELSP0 */
+ GEM_TRACE("%s(%s)\n", __func__, arg->engine->name);
+ if (igt_live_test_begin(&t, arg->engine->i915,
+ __func__, arg->engine->name))
+ return -EIO;
+
+ rq = spinner_create_request(&arg->a.spin,
+ arg->a.ctx, arg->engine,
+ MI_ARB_CHECK);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ clear_bit(CONTEXT_BANNED, &rq->context->flags);
+ i915_request_get(rq);
+ i915_request_add(rq);
+ if (!igt_wait_for_spinner(&arg->a.spin, rq)) {
+ err = -EIO;
+ goto out;
+ }
+
+ intel_context_set_banned(rq->context);
+ err = intel_engine_pulse(arg->engine);
+ if (err)
+ goto out;
+
+ err = wait_for_reset(arg->engine, rq, HZ / 2);
+ if (err) {
+ pr_err("Cancelled inflight0 request did not reset\n");
+ goto out;
+ }
+
+out:
+ i915_request_put(rq);
+ if (igt_live_test_end(&t))
+ err = -EIO;
+ return err;
+}
+
+static int __cancel_active1(struct live_preempt_cancel *arg)
+{
+ struct i915_request *rq[2] = {};
+ struct igt_live_test t;
+ int err;
+
+ /* Preempt cancel of ELSP1 */
+ GEM_TRACE("%s(%s)\n", __func__, arg->engine->name);
+ if (igt_live_test_begin(&t, arg->engine->i915,
+ __func__, arg->engine->name))
+ return -EIO;
+
+ rq[0] = spinner_create_request(&arg->a.spin,
+ arg->a.ctx, arg->engine,
+ MI_NOOP); /* no preemption */
+ if (IS_ERR(rq[0]))
+ return PTR_ERR(rq[0]);
+
+ clear_bit(CONTEXT_BANNED, &rq[0]->context->flags);
+ i915_request_get(rq[0]);
+ i915_request_add(rq[0]);
+ if (!igt_wait_for_spinner(&arg->a.spin, rq[0])) {
+ err = -EIO;
+ goto out;
+ }
+
+ rq[1] = spinner_create_request(&arg->b.spin,
+ arg->b.ctx, arg->engine,
+ MI_ARB_CHECK);
+ if (IS_ERR(rq[1])) {
+ err = PTR_ERR(rq[1]);
+ goto out;
+ }
+
+ clear_bit(CONTEXT_BANNED, &rq[1]->context->flags);
+ i915_request_get(rq[1]);
+ err = i915_request_await_dma_fence(rq[1], &rq[0]->fence);
+ i915_request_add(rq[1]);
+ if (err)
+ goto out;
+
+ intel_context_set_banned(rq[1]->context);
+ err = intel_engine_pulse(arg->engine);
+ if (err)
+ goto out;
+
+ igt_spinner_end(&arg->a.spin);
+ err = wait_for_reset(arg->engine, rq[1], HZ / 2);
+ if (err)
+ goto out;
+
+ if (rq[0]->fence.error != 0) {
+ pr_err("Normal inflight0 request did not complete\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (rq[1]->fence.error != -EIO) {
+ pr_err("Cancelled inflight1 request did not report -EIO\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+out:
+ i915_request_put(rq[1]);
+ i915_request_put(rq[0]);
+ if (igt_live_test_end(&t))
+ err = -EIO;
+ return err;
+}
+
+static int __cancel_queued(struct live_preempt_cancel *arg)
+{
+ struct i915_request *rq[3] = {};
+ struct igt_live_test t;
+ int err;
+
+ /* Full ELSP and one in the wings */
+ GEM_TRACE("%s(%s)\n", __func__, arg->engine->name);
+ if (igt_live_test_begin(&t, arg->engine->i915,
+ __func__, arg->engine->name))
+ return -EIO;
+
+ rq[0] = spinner_create_request(&arg->a.spin,
+ arg->a.ctx, arg->engine,
+ MI_ARB_CHECK);
+ if (IS_ERR(rq[0]))
+ return PTR_ERR(rq[0]);
+
+ clear_bit(CONTEXT_BANNED, &rq[0]->context->flags);
+ i915_request_get(rq[0]);
+ i915_request_add(rq[0]);
+ if (!igt_wait_for_spinner(&arg->a.spin, rq[0])) {
+ err = -EIO;
+ goto out;
+ }
+
+ rq[1] = igt_request_alloc(arg->b.ctx, arg->engine);
+ if (IS_ERR(rq[1])) {
+ err = PTR_ERR(rq[1]);
+ goto out;
+ }
+
+ clear_bit(CONTEXT_BANNED, &rq[1]->context->flags);
+ i915_request_get(rq[1]);
+ err = i915_request_await_dma_fence(rq[1], &rq[0]->fence);
+ i915_request_add(rq[1]);
+ if (err)
+ goto out;
+
+ rq[2] = spinner_create_request(&arg->b.spin,
+ arg->a.ctx, arg->engine,
+ MI_ARB_CHECK);
+ if (IS_ERR(rq[2])) {
+ err = PTR_ERR(rq[2]);
+ goto out;
+ }
+
+ i915_request_get(rq[2]);
+ err = i915_request_await_dma_fence(rq[2], &rq[1]->fence);
+ i915_request_add(rq[2]);
+ if (err)
+ goto out;
+
+ intel_context_set_banned(rq[2]->context);
+ err = intel_engine_pulse(arg->engine);
+ if (err)
+ goto out;
+
+ err = wait_for_reset(arg->engine, rq[2], HZ / 2);
+ if (err)
+ goto out;
+
+ if (rq[0]->fence.error != -EIO) {
+ pr_err("Cancelled inflight0 request did not report -EIO\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (rq[1]->fence.error != 0) {
+ pr_err("Normal inflight1 request did not complete\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (rq[2]->fence.error != -EIO) {
+ pr_err("Cancelled queued request did not report -EIO\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+out:
+ i915_request_put(rq[2]);
+ i915_request_put(rq[1]);
+ i915_request_put(rq[0]);
+ if (igt_live_test_end(&t))
+ err = -EIO;
+ return err;
+}
+
+static int __cancel_hostile(struct live_preempt_cancel *arg)
+{
+ struct i915_request *rq;
+ int err;
+
+ /* Preempt cancel non-preemptible spinner in ELSP0 */
+ if (!IS_ACTIVE(CONFIG_DRM_I915_PREEMPT_TIMEOUT))
+ return 0;
+
+ if (!intel_has_reset_engine(arg->engine->gt))
+ return 0;
+
+ GEM_TRACE("%s(%s)\n", __func__, arg->engine->name);
+ rq = spinner_create_request(&arg->a.spin,
+ arg->a.ctx, arg->engine,
+ MI_NOOP); /* preemption disabled */
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ clear_bit(CONTEXT_BANNED, &rq->context->flags);
+ i915_request_get(rq);
+ i915_request_add(rq);
+ if (!igt_wait_for_spinner(&arg->a.spin, rq)) {
+ err = -EIO;
+ goto out;
+ }
+
+ intel_context_set_banned(rq->context);
+ err = intel_engine_pulse(arg->engine); /* force reset */
+ if (err)
+ goto out;
+
+ err = wait_for_reset(arg->engine, rq, HZ / 2);
+ if (err) {
+ pr_err("Cancelled inflight0 request did not reset\n");
+ goto out;
+ }
+
+out:
+ i915_request_put(rq);
+ if (igt_flush_test(arg->engine->i915))
+ err = -EIO;
+ return err;
+}
+
+static void force_reset_timeout(struct intel_engine_cs *engine)
+{
+ engine->reset_timeout.probability = 999;
+ atomic_set(&engine->reset_timeout.times, -1);
+}
+
+static void cancel_reset_timeout(struct intel_engine_cs *engine)
+{
+ memset(&engine->reset_timeout, 0, sizeof(engine->reset_timeout));
+}
+
+static int __cancel_fail(struct live_preempt_cancel *arg)
+{
+ struct intel_engine_cs *engine = arg->engine;
+ struct i915_request *rq;
+ int err;
+
+ if (!IS_ACTIVE(CONFIG_DRM_I915_PREEMPT_TIMEOUT))
+ return 0;
+
+ if (!intel_has_reset_engine(engine->gt))
+ return 0;
+
+ GEM_TRACE("%s(%s)\n", __func__, engine->name);
+ rq = spinner_create_request(&arg->a.spin,
+ arg->a.ctx, engine,
+ MI_NOOP); /* preemption disabled */
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ clear_bit(CONTEXT_BANNED, &rq->context->flags);
+ i915_request_get(rq);
+ i915_request_add(rq);
+ if (!igt_wait_for_spinner(&arg->a.spin, rq)) {
+ err = -EIO;
+ goto out;
+ }
+
+ intel_context_set_banned(rq->context);
+
+ err = intel_engine_pulse(engine);
+ if (err)
+ goto out;
+
+ force_reset_timeout(engine);
+
+ /* force preempt reset [failure] */
+ while (!engine->execlists.pending[0])
+ intel_engine_flush_submission(engine);
+ del_timer_sync(&engine->execlists.preempt);
+ intel_engine_flush_submission(engine);
+
+ cancel_reset_timeout(engine);
+
+ /* after failure, require heartbeats to reset device */
+ intel_engine_set_heartbeat(engine, 1);
+ err = wait_for_reset(engine, rq, HZ / 2);
+ intel_engine_set_heartbeat(engine,
+ engine->defaults.heartbeat_interval_ms);
+ if (err) {
+ pr_err("Cancelled inflight0 request did not reset\n");
+ goto out;
+ }
+
+out:
+ i915_request_put(rq);
+ if (igt_flush_test(engine->i915))
+ err = -EIO;
+ return err;
+}
+
+static int live_preempt_cancel(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct live_preempt_cancel data;
+ enum intel_engine_id id;
+ int err = -ENOMEM;
+
+ /*
+ * To cancel an inflight context, we need to first remove it from the
+ * GPU. That sounds like preemption! Plus a little bit of bookkeeping.
+ */
+
+ if (preempt_client_init(gt, &data.a))
+ return -ENOMEM;
+ if (preempt_client_init(gt, &data.b))
+ goto err_client_a;
+
+ for_each_engine(data.engine, gt, id) {
+ if (!intel_engine_has_preemption(data.engine))
+ continue;
+
+ err = __cancel_active0(&data);
+ if (err)
+ goto err_wedged;
+
+ err = __cancel_active1(&data);
+ if (err)
+ goto err_wedged;
+
+ err = __cancel_queued(&data);
+ if (err)
+ goto err_wedged;
+
+ err = __cancel_hostile(&data);
+ if (err)
+ goto err_wedged;
+
+ err = __cancel_fail(&data);
+ if (err)
+ goto err_wedged;
+ }
+
+ err = 0;
+err_client_b:
+ preempt_client_fini(&data.b);
+err_client_a:
+ preempt_client_fini(&data.a);
+ return err;
+
+err_wedged:
+ GEM_TRACE_DUMP();
+ igt_spinner_end(&data.b.spin);
+ igt_spinner_end(&data.a.spin);
+ intel_gt_set_wedged(gt);
+ goto err_client_b;
+}
+
+static int live_suppress_self_preempt(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ struct i915_sched_attr attr = {
+ .priority = I915_USER_PRIORITY(I915_PRIORITY_MAX)
+ };
+ struct preempt_client a, b;
+ enum intel_engine_id id;
+ int err = -ENOMEM;
+
+ /*
+ * Verify that if a preemption request does not cause a change in
+ * the current execution order, the preempt-to-idle injection is
+ * skipped and that we do not accidentally apply it after the CS
+ * completion event.
+ */
+
+ if (intel_uc_uses_guc_submission(&gt->uc))
+ return 0; /* presume black blox */
+
+ if (intel_vgpu_active(gt->i915))
+ return 0; /* GVT forces single port & request submission */
+
+ if (preempt_client_init(gt, &a))
+ return -ENOMEM;
+ if (preempt_client_init(gt, &b))
+ goto err_client_a;
+
+ for_each_engine(engine, gt, id) {
+ struct i915_request *rq_a, *rq_b;
+ int depth;
+
+ if (!intel_engine_has_preemption(engine))
+ continue;
+
+ if (igt_flush_test(gt->i915))
+ goto err_wedged;
+
+ st_engine_heartbeat_disable(engine);
+ engine->execlists.preempt_hang.count = 0;
+
+ rq_a = spinner_create_request(&a.spin,
+ a.ctx, engine,
+ MI_NOOP);
+ if (IS_ERR(rq_a)) {
+ err = PTR_ERR(rq_a);
+ st_engine_heartbeat_enable(engine);
+ goto err_client_b;
+ }
+
+ i915_request_add(rq_a);
+ if (!igt_wait_for_spinner(&a.spin, rq_a)) {
+ pr_err("First client failed to start\n");
+ st_engine_heartbeat_enable(engine);
+ goto err_wedged;
+ }
+
+ /* Keep postponing the timer to avoid premature slicing */
+ mod_timer(&engine->execlists.timer, jiffies + HZ);
+ for (depth = 0; depth < 8; depth++) {
+ rq_b = spinner_create_request(&b.spin,
+ b.ctx, engine,
+ MI_NOOP);
+ if (IS_ERR(rq_b)) {
+ err = PTR_ERR(rq_b);
+ st_engine_heartbeat_enable(engine);
+ goto err_client_b;
+ }
+ i915_request_add(rq_b);
+
+ GEM_BUG_ON(i915_request_completed(rq_a));
+ engine->schedule(rq_a, &attr);
+ igt_spinner_end(&a.spin);
+
+ if (!igt_wait_for_spinner(&b.spin, rq_b)) {
+ pr_err("Second client failed to start\n");
+ st_engine_heartbeat_enable(engine);
+ goto err_wedged;
+ }
+
+ swap(a, b);
+ rq_a = rq_b;
+ }
+ igt_spinner_end(&a.spin);
+
+ if (engine->execlists.preempt_hang.count) {
+ pr_err("Preemption on %s recorded x%d, depth %d; should have been suppressed!\n",
+ engine->name,
+ engine->execlists.preempt_hang.count,
+ depth);
+ st_engine_heartbeat_enable(engine);
+ err = -EINVAL;
+ goto err_client_b;
+ }
+
+ st_engine_heartbeat_enable(engine);
+ if (igt_flush_test(gt->i915))
+ goto err_wedged;
+ }
+
+ err = 0;
+err_client_b:
+ preempt_client_fini(&b);
+err_client_a:
+ preempt_client_fini(&a);
+ return err;
+
+err_wedged:
+ igt_spinner_end(&b.spin);
+ igt_spinner_end(&a.spin);
+ intel_gt_set_wedged(gt);
+ err = -EIO;
+ goto err_client_b;
+}
+
+static int live_chain_preempt(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ struct preempt_client hi, lo;
+ enum intel_engine_id id;
+ int err = -ENOMEM;
+
+ /*
+ * Build a chain AB...BA between two contexts (A, B) and request
+ * preemption of the last request. It should then complete before
+ * the previously submitted spinner in B.
+ */
+
+ if (preempt_client_init(gt, &hi))
+ return -ENOMEM;
+
+ if (preempt_client_init(gt, &lo))
+ goto err_client_hi;
+
+ for_each_engine(engine, gt, id) {
+ struct i915_sched_attr attr = {
+ .priority = I915_USER_PRIORITY(I915_PRIORITY_MAX),
+ };
+ struct igt_live_test t;
+ struct i915_request *rq;
+ int ring_size, count, i;
+
+ if (!intel_engine_has_preemption(engine))
+ continue;
+
+ rq = spinner_create_request(&lo.spin,
+ lo.ctx, engine,
+ MI_ARB_CHECK);
+ if (IS_ERR(rq))
+ goto err_wedged;
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ ring_size = rq->wa_tail - rq->head;
+ if (ring_size < 0)
+ ring_size += rq->ring->size;
+ ring_size = rq->ring->size / ring_size;
+ pr_debug("%s(%s): Using maximum of %d requests\n",
+ __func__, engine->name, ring_size);
+
+ igt_spinner_end(&lo.spin);
+ if (i915_request_wait(rq, 0, HZ / 2) < 0) {
+ pr_err("Timed out waiting to flush %s\n", engine->name);
+ i915_request_put(rq);
+ goto err_wedged;
+ }
+ i915_request_put(rq);
+
+ if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
+ err = -EIO;
+ goto err_wedged;
+ }
+
+ for_each_prime_number_from(count, 1, ring_size) {
+ rq = spinner_create_request(&hi.spin,
+ hi.ctx, engine,
+ MI_ARB_CHECK);
+ if (IS_ERR(rq))
+ goto err_wedged;
+ i915_request_add(rq);
+ if (!igt_wait_for_spinner(&hi.spin, rq))
+ goto err_wedged;
+
+ rq = spinner_create_request(&lo.spin,
+ lo.ctx, engine,
+ MI_ARB_CHECK);
+ if (IS_ERR(rq))
+ goto err_wedged;
+ i915_request_add(rq);
+
+ for (i = 0; i < count; i++) {
+ rq = igt_request_alloc(lo.ctx, engine);
+ if (IS_ERR(rq))
+ goto err_wedged;
+ i915_request_add(rq);
+ }
+
+ rq = igt_request_alloc(hi.ctx, engine);
+ if (IS_ERR(rq))
+ goto err_wedged;
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+ engine->schedule(rq, &attr);
+
+ igt_spinner_end(&hi.spin);
+ if (i915_request_wait(rq, 0, HZ / 5) < 0) {
+ struct drm_printer p =
+ drm_info_printer(gt->i915->drm.dev);
+
+ pr_err("Failed to preempt over chain of %d\n",
+ count);
+ intel_engine_dump(engine, &p,
+ "%s\n", engine->name);
+ i915_request_put(rq);
+ goto err_wedged;
+ }
+ igt_spinner_end(&lo.spin);
+ i915_request_put(rq);
+
+ rq = igt_request_alloc(lo.ctx, engine);
+ if (IS_ERR(rq))
+ goto err_wedged;
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ if (i915_request_wait(rq, 0, HZ / 5) < 0) {
+ struct drm_printer p =
+ drm_info_printer(gt->i915->drm.dev);
+
+ pr_err("Failed to flush low priority chain of %d requests\n",
+ count);
+ intel_engine_dump(engine, &p,
+ "%s\n", engine->name);
+
+ i915_request_put(rq);
+ goto err_wedged;
+ }
+ i915_request_put(rq);
+ }
+
+ if (igt_live_test_end(&t)) {
+ err = -EIO;
+ goto err_wedged;
+ }
+ }
+
+ err = 0;
+err_client_lo:
+ preempt_client_fini(&lo);
+err_client_hi:
+ preempt_client_fini(&hi);
+ return err;
+
+err_wedged:
+ igt_spinner_end(&hi.spin);
+ igt_spinner_end(&lo.spin);
+ intel_gt_set_wedged(gt);
+ err = -EIO;
+ goto err_client_lo;
+}
+
+static int create_gang(struct intel_engine_cs *engine,
+ struct i915_request **prev)
+{
+ struct drm_i915_gem_object *obj;
+ struct intel_context *ce;
+ struct i915_request *rq;
+ struct i915_vma *vma;
+ u32 *cs;
+ int err;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce))
+ return PTR_ERR(ce);
+
+ obj = i915_gem_object_create_internal(engine->i915, 4096);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ goto err_ce;
+ }
+
+ vma = i915_vma_instance(obj, ce->vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto err_obj;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_USER);
+ if (err)
+ goto err_obj;
+
+ cs = i915_gem_object_pin_map(obj, I915_MAP_WC);
+ if (IS_ERR(cs)) {
+ err = PTR_ERR(cs);
+ goto err_obj;
+ }
+
+ /* Semaphore target: spin until zero */
+ *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
+
+ *cs++ = MI_SEMAPHORE_WAIT |
+ MI_SEMAPHORE_POLL |
+ MI_SEMAPHORE_SAD_EQ_SDD;
+ *cs++ = 0;
+ *cs++ = lower_32_bits(vma->node.start);
+ *cs++ = upper_32_bits(vma->node.start);
+
+ if (*prev) {
+ u64 offset = (*prev)->batch->node.start;
+
+ /* Terminate the spinner in the next lower priority batch. */
+ *cs++ = MI_STORE_DWORD_IMM_GEN4;
+ *cs++ = lower_32_bits(offset);
+ *cs++ = upper_32_bits(offset);
+ *cs++ = 0;
+ }
+
+ *cs++ = MI_BATCH_BUFFER_END;
+ i915_gem_object_flush_map(obj);
+ i915_gem_object_unpin_map(obj);
+
+ rq = intel_context_create_request(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_obj;
+ }
+
+ rq->batch = i915_vma_get(vma);
+ i915_request_get(rq);
+
+ i915_vma_lock(vma);
+ err = i915_request_await_object(rq, vma->obj, false);
+ if (!err)
+ err = i915_vma_move_to_active(vma, rq, 0);
+ if (!err)
+ err = rq->engine->emit_bb_start(rq,
+ vma->node.start,
+ PAGE_SIZE, 0);
+ i915_vma_unlock(vma);
+ i915_request_add(rq);
+ if (err)
+ goto err_rq;
+
+ i915_gem_object_put(obj);
+ intel_context_put(ce);
+
+ rq->mock.link.next = &(*prev)->mock.link;
+ *prev = rq;
+ return 0;
+
+err_rq:
+ i915_vma_put(rq->batch);
+ i915_request_put(rq);
+err_obj:
+ i915_gem_object_put(obj);
+err_ce:
+ intel_context_put(ce);
+ return err;
+}
+
+static int __live_preempt_ring(struct intel_engine_cs *engine,
+ struct igt_spinner *spin,
+ int queue_sz, int ring_sz)
+{
+ struct intel_context *ce[2] = {};
+ struct i915_request *rq;
+ struct igt_live_test t;
+ int err = 0;
+ int n;
+
+ if (igt_live_test_begin(&t, engine->i915, __func__, engine->name))
+ return -EIO;
+
+ for (n = 0; n < ARRAY_SIZE(ce); n++) {
+ struct intel_context *tmp;
+
+ tmp = intel_context_create(engine);
+ if (IS_ERR(tmp)) {
+ err = PTR_ERR(tmp);
+ goto err_ce;
+ }
+
+ tmp->ring = __intel_context_ring_size(ring_sz);
+
+ err = intel_context_pin(tmp);
+ if (err) {
+ intel_context_put(tmp);
+ goto err_ce;
+ }
+
+ memset32(tmp->ring->vaddr,
+ 0xdeadbeef, /* trigger a hang if executed */
+ tmp->ring->vma->size / sizeof(u32));
+
+ ce[n] = tmp;
+ }
+
+ rq = igt_spinner_create_request(spin, ce[0], MI_ARB_CHECK);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_ce;
+ }
+
+ i915_request_get(rq);
+ rq->sched.attr.priority = I915_PRIORITY_BARRIER;
+ i915_request_add(rq);
+
+ if (!igt_wait_for_spinner(spin, rq)) {
+ intel_gt_set_wedged(engine->gt);
+ i915_request_put(rq);
+ err = -ETIME;
+ goto err_ce;
+ }
+
+ /* Fill the ring, until we will cause a wrap */
+ n = 0;
+ while (ce[0]->ring->tail - rq->wa_tail <= queue_sz) {
+ struct i915_request *tmp;
+
+ tmp = intel_context_create_request(ce[0]);
+ if (IS_ERR(tmp)) {
+ err = PTR_ERR(tmp);
+ i915_request_put(rq);
+ goto err_ce;
+ }
+
+ i915_request_add(tmp);
+ intel_engine_flush_submission(engine);
+ n++;
+ }
+ intel_engine_flush_submission(engine);
+ pr_debug("%s: Filled %d with %d nop tails {size:%x, tail:%x, emit:%x, rq.tail:%x}\n",
+ engine->name, queue_sz, n,
+ ce[0]->ring->size,
+ ce[0]->ring->tail,
+ ce[0]->ring->emit,
+ rq->tail);
+ i915_request_put(rq);
+
+ /* Create a second request to preempt the first ring */
+ rq = intel_context_create_request(ce[1]);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_ce;
+ }
+
+ rq->sched.attr.priority = I915_PRIORITY_BARRIER;
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ err = wait_for_submit(engine, rq, HZ / 2);
+ i915_request_put(rq);
+ if (err) {
+ pr_err("%s: preemption request was not submited\n",
+ engine->name);
+ err = -ETIME;
+ }
+
+ pr_debug("%s: ring[0]:{ tail:%x, emit:%x }, ring[1]:{ tail:%x, emit:%x }\n",
+ engine->name,
+ ce[0]->ring->tail, ce[0]->ring->emit,
+ ce[1]->ring->tail, ce[1]->ring->emit);
+
+err_ce:
+ intel_engine_flush_submission(engine);
+ igt_spinner_end(spin);
+ for (n = 0; n < ARRAY_SIZE(ce); n++) {
+ if (IS_ERR_OR_NULL(ce[n]))
+ break;
+
+ intel_context_unpin(ce[n]);
+ intel_context_put(ce[n]);
+ }
+ if (igt_live_test_end(&t))
+ err = -EIO;
+ return err;
+}
+
+static int live_preempt_ring(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ struct igt_spinner spin;
+ enum intel_engine_id id;
+ int err = 0;
+
+ /*
+ * Check that we rollback large chunks of a ring in order to do a
+ * preemption event. Similar to live_unlite_ring, but looking at
+ * ring size rather than the impact of intel_ring_direction().
+ */
+
+ if (igt_spinner_init(&spin, gt))
+ return -ENOMEM;
+
+ for_each_engine(engine, gt, id) {
+ int n;
+
+ if (!intel_engine_has_preemption(engine))
+ continue;
+
+ if (!intel_engine_can_store_dword(engine))
+ continue;
+
+ st_engine_heartbeat_disable(engine);
+
+ for (n = 0; n <= 3; n++) {
+ err = __live_preempt_ring(engine, &spin,
+ n * SZ_4K / 4, SZ_4K);
+ if (err)
+ break;
+ }
+
+ st_engine_heartbeat_enable(engine);
+ if (err)
+ break;
+ }
+
+ igt_spinner_fini(&spin);
+ return err;
+}
+
+static int live_preempt_gang(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ /*
+ * Build as long a chain of preempters as we can, with each
+ * request higher priority than the last. Once we are ready, we release
+ * the last batch which then precolates down the chain, each releasing
+ * the next oldest in turn. The intent is to simply push as hard as we
+ * can with the number of preemptions, trying to exceed narrow HW
+ * limits. At a minimum, we insist that we can sort all the user
+ * high priority levels into execution order.
+ */
+
+ for_each_engine(engine, gt, id) {
+ struct i915_request *rq = NULL;
+ struct igt_live_test t;
+ IGT_TIMEOUT(end_time);
+ int prio = 0;
+ int err = 0;
+ u32 *cs;
+
+ if (!intel_engine_has_preemption(engine))
+ continue;
+
+ if (igt_live_test_begin(&t, gt->i915, __func__, engine->name))
+ return -EIO;
+
+ do {
+ struct i915_sched_attr attr = {
+ .priority = I915_USER_PRIORITY(prio++),
+ };
+
+ err = create_gang(engine, &rq);
+ if (err)
+ break;
+
+ /* Submit each spinner at increasing priority */
+ engine->schedule(rq, &attr);
+ } while (prio <= I915_PRIORITY_MAX &&
+ !__igt_timeout(end_time, NULL));
+ pr_debug("%s: Preempt chain of %d requests\n",
+ engine->name, prio);
+
+ /*
+ * Such that the last spinner is the highest priority and
+ * should execute first. When that spinner completes,
+ * it will terminate the next lowest spinner until there
+ * are no more spinners and the gang is complete.
+ */
+ cs = i915_gem_object_pin_map(rq->batch->obj, I915_MAP_WC);
+ if (!IS_ERR(cs)) {
+ *cs = 0;
+ i915_gem_object_unpin_map(rq->batch->obj);
+ } else {
+ err = PTR_ERR(cs);
+ intel_gt_set_wedged(gt);
+ }
+
+ while (rq) { /* wait for each rq from highest to lowest prio */
+ struct i915_request *n = list_next_entry(rq, mock.link);
+
+ if (err == 0 && i915_request_wait(rq, 0, HZ / 5) < 0) {
+ struct drm_printer p =
+ drm_info_printer(engine->i915->drm.dev);
+
+ pr_err("Failed to flush chain of %d requests, at %d\n",
+ prio, rq_prio(rq) >> I915_USER_PRIORITY_SHIFT);
+ intel_engine_dump(engine, &p,
+ "%s\n", engine->name);
+
+ err = -ETIME;
+ }
+
+ i915_vma_put(rq->batch);
+ i915_request_put(rq);
+ rq = n;
+ }
+
+ if (igt_live_test_end(&t))
+ err = -EIO;
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static struct i915_vma *
+create_gpr_user(struct intel_engine_cs *engine,
+ struct i915_vma *result,
+ unsigned int offset)
+{
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ u32 *cs;
+ int err;
+ int i;
+
+ obj = i915_gem_object_create_internal(engine->i915, 4096);
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
+
+ vma = i915_vma_instance(obj, result->vm, NULL);
+ if (IS_ERR(vma)) {
+ i915_gem_object_put(obj);
+ return vma;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_USER);
+ if (err) {
+ i915_vma_put(vma);
+ return ERR_PTR(err);
+ }
+
+ cs = i915_gem_object_pin_map(obj, I915_MAP_WC);
+ if (IS_ERR(cs)) {
+ i915_vma_put(vma);
+ return ERR_CAST(cs);
+ }
+
+ /* All GPR are clear for new contexts. We use GPR(0) as a constant */
+ *cs++ = MI_LOAD_REGISTER_IMM(1);
+ *cs++ = CS_GPR(engine, 0);
+ *cs++ = 1;
+
+ for (i = 1; i < NUM_GPR; i++) {
+ u64 addr;
+
+ /*
+ * Perform: GPR[i]++
+ *
+ * As we read and write into the context saved GPR[i], if
+ * we restart this batch buffer from an earlier point, we
+ * will repeat the increment and store a value > 1.
+ */
+ *cs++ = MI_MATH(4);
+ *cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCA, MI_MATH_REG(i));
+ *cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCB, MI_MATH_REG(0));
+ *cs++ = MI_MATH_ADD;
+ *cs++ = MI_MATH_STORE(MI_MATH_REG(i), MI_MATH_REG_ACCU);
+
+ addr = result->node.start + offset + i * sizeof(*cs);
+ *cs++ = MI_STORE_REGISTER_MEM_GEN8;
+ *cs++ = CS_GPR(engine, 2 * i);
+ *cs++ = lower_32_bits(addr);
+ *cs++ = upper_32_bits(addr);
+
+ *cs++ = MI_SEMAPHORE_WAIT |
+ MI_SEMAPHORE_POLL |
+ MI_SEMAPHORE_SAD_GTE_SDD;
+ *cs++ = i;
+ *cs++ = lower_32_bits(result->node.start);
+ *cs++ = upper_32_bits(result->node.start);
+ }
+
+ *cs++ = MI_BATCH_BUFFER_END;
+ i915_gem_object_flush_map(obj);
+ i915_gem_object_unpin_map(obj);
+
+ return vma;
+}
+
+static struct i915_vma *create_global(struct intel_gt *gt, size_t sz)
+{
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ int err;
+
+ obj = i915_gem_object_create_internal(gt->i915, sz);
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
+
+ vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL);
+ if (IS_ERR(vma)) {
+ i915_gem_object_put(obj);
+ return vma;
+ }
+
+ err = i915_ggtt_pin(vma, NULL, 0, 0);
+ if (err) {
+ i915_vma_put(vma);
+ return ERR_PTR(err);
+ }
+
+ return vma;
+}
+
+static struct i915_request *
+create_gpr_client(struct intel_engine_cs *engine,
+ struct i915_vma *global,
+ unsigned int offset)
+{
+ struct i915_vma *batch, *vma;
+ struct intel_context *ce;
+ struct i915_request *rq;
+ int err;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce))
+ return ERR_CAST(ce);
+
+ vma = i915_vma_instance(global->obj, ce->vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto out_ce;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_USER);
+ if (err)
+ goto out_ce;
+
+ batch = create_gpr_user(engine, vma, offset);
+ if (IS_ERR(batch)) {
+ err = PTR_ERR(batch);
+ goto out_vma;
+ }
+
+ rq = intel_context_create_request(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out_batch;
+ }
+
+ i915_vma_lock(vma);
+ err = i915_request_await_object(rq, vma->obj, false);
+ if (!err)
+ err = i915_vma_move_to_active(vma, rq, 0);
+ i915_vma_unlock(vma);
+
+ i915_vma_lock(batch);
+ if (!err)
+ err = i915_request_await_object(rq, batch->obj, false);
+ if (!err)
+ err = i915_vma_move_to_active(batch, rq, 0);
+ if (!err)
+ err = rq->engine->emit_bb_start(rq,
+ batch->node.start,
+ PAGE_SIZE, 0);
+ i915_vma_unlock(batch);
+ i915_vma_unpin(batch);
+
+ if (!err)
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+out_batch:
+ i915_vma_put(batch);
+out_vma:
+ i915_vma_unpin(vma);
+out_ce:
+ intel_context_put(ce);
+ return err ? ERR_PTR(err) : rq;
+}
+
+static int preempt_user(struct intel_engine_cs *engine,
+ struct i915_vma *global,
+ int id)
+{
+ struct i915_sched_attr attr = {
+ .priority = I915_PRIORITY_MAX
+ };
+ struct i915_request *rq;
+ int err = 0;
+ u32 *cs;
+
+ rq = intel_engine_create_kernel_request(engine);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ cs = intel_ring_begin(rq, 4);
+ if (IS_ERR(cs)) {
+ i915_request_add(rq);
+ return PTR_ERR(cs);
+ }
+
+ *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
+ *cs++ = i915_ggtt_offset(global);
+ *cs++ = 0;
+ *cs++ = id;
+
+ intel_ring_advance(rq, cs);
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ engine->schedule(rq, &attr);
+
+ if (i915_request_wait(rq, 0, HZ / 2) < 0)
+ err = -ETIME;
+ i915_request_put(rq);
+
+ return err;
+}
+
+static int live_preempt_user(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ struct i915_vma *global;
+ enum intel_engine_id id;
+ u32 *result;
+ int err = 0;
+
+ /*
+ * In our other tests, we look at preemption in carefully
+ * controlled conditions in the ringbuffer. Since most of the
+ * time is spent in user batches, most of our preemptions naturally
+ * occur there. We want to verify that when we preempt inside a batch
+ * we continue on from the current instruction and do not roll back
+ * to the start, or another earlier arbitration point.
+ *
+ * To verify this, we create a batch which is a mixture of
+ * MI_MATH (gpr++) MI_SRM (gpr) and preemption points. Then with
+ * a few preempting contexts thrown into the mix, we look for any
+ * repeated instructions (which show up as incorrect values).
+ */
+
+ global = create_global(gt, 4096);
+ if (IS_ERR(global))
+ return PTR_ERR(global);
+
+ result = i915_gem_object_pin_map(global->obj, I915_MAP_WC);
+ if (IS_ERR(result)) {
+ i915_vma_unpin_and_release(&global, 0);
+ return PTR_ERR(result);
+ }
+
+ for_each_engine(engine, gt, id) {
+ struct i915_request *client[3] = {};
+ struct igt_live_test t;
+ int i;
+
+ if (!intel_engine_has_preemption(engine))
+ continue;
+
+ if (IS_GEN(gt->i915, 8) && engine->class != RENDER_CLASS)
+ continue; /* we need per-context GPR */
+
+ if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
+ err = -EIO;
+ break;
+ }
+
+ memset(result, 0, 4096);
+
+ for (i = 0; i < ARRAY_SIZE(client); i++) {
+ struct i915_request *rq;
+
+ rq = create_gpr_client(engine, global,
+ NUM_GPR * i * sizeof(u32));
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto end_test;
+ }
+
+ client[i] = rq;
+ }
+
+ /* Continuously preempt the set of 3 running contexts */
+ for (i = 1; i <= NUM_GPR; i++) {
+ err = preempt_user(engine, global, i);
+ if (err)
+ goto end_test;
+ }
+
+ if (READ_ONCE(result[0]) != NUM_GPR) {
+ pr_err("%s: Failed to release semaphore\n",
+ engine->name);
+ err = -EIO;
+ goto end_test;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(client); i++) {
+ int gpr;
+
+ if (i915_request_wait(client[i], 0, HZ / 2) < 0) {
+ err = -ETIME;
+ goto end_test;
+ }
+
+ for (gpr = 1; gpr < NUM_GPR; gpr++) {
+ if (result[NUM_GPR * i + gpr] != 1) {
+ pr_err("%s: Invalid result, client %d, gpr %d, result: %d\n",
+ engine->name,
+ i, gpr, result[NUM_GPR * i + gpr]);
+ err = -EINVAL;
+ goto end_test;
+ }
+ }
+ }
+
+end_test:
+ for (i = 0; i < ARRAY_SIZE(client); i++) {
+ if (!client[i])
+ break;
+
+ i915_request_put(client[i]);
+ }
+
+ /* Flush the semaphores on error */
+ smp_store_mb(result[0], -1);
+ if (igt_live_test_end(&t))
+ err = -EIO;
+ if (err)
+ break;
+ }
+
+ i915_vma_unpin_and_release(&global, I915_VMA_RELEASE_MAP);
+ return err;
+}
+
+static int live_preempt_timeout(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct i915_gem_context *ctx_hi, *ctx_lo;
+ struct igt_spinner spin_lo;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ int err = -ENOMEM;
+
+ /*
+ * Check that we force preemption to occur by cancelling the previous
+ * context if it refuses to yield the GPU.
+ */
+ if (!IS_ACTIVE(CONFIG_DRM_I915_PREEMPT_TIMEOUT))
+ return 0;
+
+ if (!intel_has_reset_engine(gt))
+ return 0;
+
+ if (igt_spinner_init(&spin_lo, gt))
+ return -ENOMEM;
+
+ ctx_hi = kernel_context(gt->i915);
+ if (!ctx_hi)
+ goto err_spin_lo;
+ ctx_hi->sched.priority =
+ I915_USER_PRIORITY(I915_CONTEXT_MAX_USER_PRIORITY);
+
+ ctx_lo = kernel_context(gt->i915);
+ if (!ctx_lo)
+ goto err_ctx_hi;
+ ctx_lo->sched.priority =
+ I915_USER_PRIORITY(I915_CONTEXT_MIN_USER_PRIORITY);
+
+ for_each_engine(engine, gt, id) {
+ unsigned long saved_timeout;
+ struct i915_request *rq;
+
+ if (!intel_engine_has_preemption(engine))
+ continue;
+
+ rq = spinner_create_request(&spin_lo, ctx_lo, engine,
+ MI_NOOP); /* preemption disabled */
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_ctx_lo;
+ }
+
+ i915_request_add(rq);
+ if (!igt_wait_for_spinner(&spin_lo, rq)) {
+ intel_gt_set_wedged(gt);
+ err = -EIO;
+ goto err_ctx_lo;
+ }
+
+ rq = igt_request_alloc(ctx_hi, engine);
+ if (IS_ERR(rq)) {
+ igt_spinner_end(&spin_lo);
+ err = PTR_ERR(rq);
+ goto err_ctx_lo;
+ }
+
+ /* Flush the previous CS ack before changing timeouts */
+ while (READ_ONCE(engine->execlists.pending[0]))
+ cpu_relax();
+
+ saved_timeout = engine->props.preempt_timeout_ms;
+ engine->props.preempt_timeout_ms = 1; /* in ms, -> 1 jiffie */
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ intel_engine_flush_submission(engine);
+ engine->props.preempt_timeout_ms = saved_timeout;
+
+ if (i915_request_wait(rq, 0, HZ / 10) < 0) {
+ intel_gt_set_wedged(gt);
+ i915_request_put(rq);
+ err = -ETIME;
+ goto err_ctx_lo;
+ }
+
+ igt_spinner_end(&spin_lo);
+ i915_request_put(rq);
+ }
+
+ err = 0;
+err_ctx_lo:
+ kernel_context_close(ctx_lo);
+err_ctx_hi:
+ kernel_context_close(ctx_hi);
+err_spin_lo:
+ igt_spinner_fini(&spin_lo);
+ return err;
+}
+
+static int random_range(struct rnd_state *rnd, int min, int max)
+{
+ return i915_prandom_u32_max_state(max - min, rnd) + min;
+}
+
+static int random_priority(struct rnd_state *rnd)
+{
+ return random_range(rnd, I915_PRIORITY_MIN, I915_PRIORITY_MAX);
+}
+
+struct preempt_smoke {
+ struct intel_gt *gt;
+ struct i915_gem_context **contexts;
+ struct intel_engine_cs *engine;
+ struct drm_i915_gem_object *batch;
+ unsigned int ncontext;
+ struct rnd_state prng;
+ unsigned long count;
+};
+
+static struct i915_gem_context *smoke_context(struct preempt_smoke *smoke)
+{
+ return smoke->contexts[i915_prandom_u32_max_state(smoke->ncontext,
+ &smoke->prng)];
+}
+
+static int smoke_submit(struct preempt_smoke *smoke,
+ struct i915_gem_context *ctx, int prio,
+ struct drm_i915_gem_object *batch)
+{
+ struct i915_request *rq;
+ struct i915_vma *vma = NULL;
+ int err = 0;
+
+ if (batch) {
+ struct i915_address_space *vm;
+
+ vm = i915_gem_context_get_vm_rcu(ctx);
+ vma = i915_vma_instance(batch, vm, NULL);
+ i915_vm_put(vm);
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
+
+ err = i915_vma_pin(vma, 0, 0, PIN_USER);
+ if (err)
+ return err;
+ }
+
+ ctx->sched.priority = prio;
+
+ rq = igt_request_alloc(ctx, smoke->engine);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto unpin;
+ }
+
+ if (vma) {
+ i915_vma_lock(vma);
+ err = i915_request_await_object(rq, vma->obj, false);
+ if (!err)
+ err = i915_vma_move_to_active(vma, rq, 0);
+ if (!err)
+ err = rq->engine->emit_bb_start(rq,
+ vma->node.start,
+ PAGE_SIZE, 0);
+ i915_vma_unlock(vma);
+ }
+
+ i915_request_add(rq);
+
+unpin:
+ if (vma)
+ i915_vma_unpin(vma);
+
+ return err;
+}
+
+static int smoke_crescendo_thread(void *arg)
+{
+ struct preempt_smoke *smoke = arg;
+ IGT_TIMEOUT(end_time);
+ unsigned long count;
+
+ count = 0;
+ do {
+ struct i915_gem_context *ctx = smoke_context(smoke);
+ int err;
+
+ err = smoke_submit(smoke,
+ ctx, count % I915_PRIORITY_MAX,
+ smoke->batch);
+ if (err)
+ return err;
+
+ count++;
+ } while (count < smoke->ncontext && !__igt_timeout(end_time, NULL));
+
+ smoke->count = count;
+ return 0;
+}
+
+static int smoke_crescendo(struct preempt_smoke *smoke, unsigned int flags)
+#define BATCH BIT(0)
+{
+ struct task_struct *tsk[I915_NUM_ENGINES] = {};
+ struct preempt_smoke arg[I915_NUM_ENGINES];
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ unsigned long count;
+ int err = 0;
+
+ for_each_engine(engine, smoke->gt, id) {
+ arg[id] = *smoke;
+ arg[id].engine = engine;
+ if (!(flags & BATCH))
+ arg[id].batch = NULL;
+ arg[id].count = 0;
+
+ tsk[id] = kthread_run(smoke_crescendo_thread, &arg,
+ "igt/smoke:%d", id);
+ if (IS_ERR(tsk[id])) {
+ err = PTR_ERR(tsk[id]);
+ break;
+ }
+ get_task_struct(tsk[id]);
+ }
+
+ yield(); /* start all threads before we kthread_stop() */
+
+ count = 0;
+ for_each_engine(engine, smoke->gt, id) {
+ int status;
+
+ if (IS_ERR_OR_NULL(tsk[id]))
+ continue;
+
+ status = kthread_stop(tsk[id]);
+ if (status && !err)
+ err = status;
+
+ count += arg[id].count;
+
+ put_task_struct(tsk[id]);
+ }
+
+ pr_info("Submitted %lu crescendo:%x requests across %d engines and %d contexts\n",
+ count, flags, smoke->gt->info.num_engines, smoke->ncontext);
+ return 0;
+}
+
+static int smoke_random(struct preempt_smoke *smoke, unsigned int flags)
+{
+ enum intel_engine_id id;
+ IGT_TIMEOUT(end_time);
+ unsigned long count;
+
+ count = 0;
+ do {
+ for_each_engine(smoke->engine, smoke->gt, id) {
+ struct i915_gem_context *ctx = smoke_context(smoke);
+ int err;
+
+ err = smoke_submit(smoke,
+ ctx, random_priority(&smoke->prng),
+ flags & BATCH ? smoke->batch : NULL);
+ if (err)
+ return err;
+
+ count++;
+ }
+ } while (count < smoke->ncontext && !__igt_timeout(end_time, NULL));
+
+ pr_info("Submitted %lu random:%x requests across %d engines and %d contexts\n",
+ count, flags, smoke->gt->info.num_engines, smoke->ncontext);
+ return 0;
+}
+
+static int live_preempt_smoke(void *arg)
+{
+ struct preempt_smoke smoke = {
+ .gt = arg,
+ .prng = I915_RND_STATE_INITIALIZER(i915_selftest.random_seed),
+ .ncontext = 256,
+ };
+ const unsigned int phase[] = { 0, BATCH };
+ struct igt_live_test t;
+ int err = -ENOMEM;
+ u32 *cs;
+ int n;
+
+ smoke.contexts = kmalloc_array(smoke.ncontext,
+ sizeof(*smoke.contexts),
+ GFP_KERNEL);
+ if (!smoke.contexts)
+ return -ENOMEM;
+
+ smoke.batch =
+ i915_gem_object_create_internal(smoke.gt->i915, PAGE_SIZE);
+ if (IS_ERR(smoke.batch)) {
+ err = PTR_ERR(smoke.batch);
+ goto err_free;
+ }
+
+ cs = i915_gem_object_pin_map(smoke.batch, I915_MAP_WB);
+ if (IS_ERR(cs)) {
+ err = PTR_ERR(cs);
+ goto err_batch;
+ }
+ for (n = 0; n < PAGE_SIZE / sizeof(*cs) - 1; n++)
+ cs[n] = MI_ARB_CHECK;
+ cs[n] = MI_BATCH_BUFFER_END;
+ i915_gem_object_flush_map(smoke.batch);
+ i915_gem_object_unpin_map(smoke.batch);
+
+ if (igt_live_test_begin(&t, smoke.gt->i915, __func__, "all")) {
+ err = -EIO;
+ goto err_batch;
+ }
+
+ for (n = 0; n < smoke.ncontext; n++) {
+ smoke.contexts[n] = kernel_context(smoke.gt->i915);
+ if (!smoke.contexts[n])
+ goto err_ctx;
+ }
+
+ for (n = 0; n < ARRAY_SIZE(phase); n++) {
+ err = smoke_crescendo(&smoke, phase[n]);
+ if (err)
+ goto err_ctx;
+
+ err = smoke_random(&smoke, phase[n]);
+ if (err)
+ goto err_ctx;
+ }
+
+err_ctx:
+ if (igt_live_test_end(&t))
+ err = -EIO;
+
+ for (n = 0; n < smoke.ncontext; n++) {
+ if (!smoke.contexts[n])
+ break;
+ kernel_context_close(smoke.contexts[n]);
+ }
+
+err_batch:
+ i915_gem_object_put(smoke.batch);
+err_free:
+ kfree(smoke.contexts);
+
+ return err;
+}
+
+static int nop_virtual_engine(struct intel_gt *gt,
+ struct intel_engine_cs **siblings,
+ unsigned int nsibling,
+ unsigned int nctx,
+ unsigned int flags)
+#define CHAIN BIT(0)
+{
+ IGT_TIMEOUT(end_time);
+ struct i915_request *request[16] = {};
+ struct intel_context *ve[16];
+ unsigned long n, prime, nc;
+ struct igt_live_test t;
+ ktime_t times[2] = {};
+ int err;
+
+ GEM_BUG_ON(!nctx || nctx > ARRAY_SIZE(ve));
+
+ for (n = 0; n < nctx; n++) {
+ ve[n] = intel_execlists_create_virtual(siblings, nsibling);
+ if (IS_ERR(ve[n])) {
+ err = PTR_ERR(ve[n]);
+ nctx = n;
+ goto out;
+ }
+
+ err = intel_context_pin(ve[n]);
+ if (err) {
+ intel_context_put(ve[n]);
+ nctx = n;
+ goto out;
+ }
+ }
+
+ err = igt_live_test_begin(&t, gt->i915, __func__, ve[0]->engine->name);
+ if (err)
+ goto out;
+
+ for_each_prime_number_from(prime, 1, 8192) {
+ times[1] = ktime_get_raw();
+
+ if (flags & CHAIN) {
+ for (nc = 0; nc < nctx; nc++) {
+ for (n = 0; n < prime; n++) {
+ struct i915_request *rq;
+
+ rq = i915_request_create(ve[nc]);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out;
+ }
+
+ if (request[nc])
+ i915_request_put(request[nc]);
+ request[nc] = i915_request_get(rq);
+ i915_request_add(rq);
+ }
+ }
+ } else {
+ for (n = 0; n < prime; n++) {
+ for (nc = 0; nc < nctx; nc++) {
+ struct i915_request *rq;
+
+ rq = i915_request_create(ve[nc]);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out;
+ }
+
+ if (request[nc])
+ i915_request_put(request[nc]);
+ request[nc] = i915_request_get(rq);
+ i915_request_add(rq);
+ }
+ }
+ }
+
+ for (nc = 0; nc < nctx; nc++) {
+ if (i915_request_wait(request[nc], 0, HZ / 10) < 0) {
+ pr_err("%s(%s): wait for %llx:%lld timed out\n",
+ __func__, ve[0]->engine->name,
+ request[nc]->fence.context,
+ request[nc]->fence.seqno);
+
+ GEM_TRACE("%s(%s) failed at request %llx:%lld\n",
+ __func__, ve[0]->engine->name,
+ request[nc]->fence.context,
+ request[nc]->fence.seqno);
+ GEM_TRACE_DUMP();
+ intel_gt_set_wedged(gt);
+ break;
+ }
+ }
+
+ times[1] = ktime_sub(ktime_get_raw(), times[1]);
+ if (prime == 1)
+ times[0] = times[1];
+
+ for (nc = 0; nc < nctx; nc++) {
+ i915_request_put(request[nc]);
+ request[nc] = NULL;
+ }
+
+ if (__igt_timeout(end_time, NULL))
+ break;
+ }
+
+ err = igt_live_test_end(&t);
+ if (err)
+ goto out;
+
+ pr_info("Requestx%d latencies on %s: 1 = %lluns, %lu = %lluns\n",
+ nctx, ve[0]->engine->name, ktime_to_ns(times[0]),
+ prime, div64_u64(ktime_to_ns(times[1]), prime));
+
+out:
+ if (igt_flush_test(gt->i915))
+ err = -EIO;
+
+ for (nc = 0; nc < nctx; nc++) {
+ i915_request_put(request[nc]);
+ intel_context_unpin(ve[nc]);
+ intel_context_put(ve[nc]);
+ }
+ return err;
+}
+
+static unsigned int
+__select_siblings(struct intel_gt *gt,
+ unsigned int class,
+ struct intel_engine_cs **siblings,
+ bool (*filter)(const struct intel_engine_cs *))
+{
+ unsigned int n = 0;
+ unsigned int inst;
+
+ for (inst = 0; inst <= MAX_ENGINE_INSTANCE; inst++) {
+ if (!gt->engine_class[class][inst])
+ continue;
+
+ if (filter && !filter(gt->engine_class[class][inst]))
+ continue;
+
+ siblings[n++] = gt->engine_class[class][inst];
+ }
+
+ return n;
+}
+
+static unsigned int
+select_siblings(struct intel_gt *gt,
+ unsigned int class,
+ struct intel_engine_cs **siblings)
+{
+ return __select_siblings(gt, class, siblings, NULL);
+}
+
+static int live_virtual_engine(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ unsigned int class;
+ int err;
+
+ if (intel_uc_uses_guc_submission(&gt->uc))
+ return 0;
+
+ for_each_engine(engine, gt, id) {
+ err = nop_virtual_engine(gt, &engine, 1, 1, 0);
+ if (err) {
+ pr_err("Failed to wrap engine %s: err=%d\n",
+ engine->name, err);
+ return err;
+ }
+ }
+
+ for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
+ int nsibling, n;
+
+ nsibling = select_siblings(gt, class, siblings);
+ if (nsibling < 2)
+ continue;
+
+ for (n = 1; n <= nsibling + 1; n++) {
+ err = nop_virtual_engine(gt, siblings, nsibling,
+ n, 0);
+ if (err)
+ return err;
+ }
+
+ err = nop_virtual_engine(gt, siblings, nsibling, n, CHAIN);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int mask_virtual_engine(struct intel_gt *gt,
+ struct intel_engine_cs **siblings,
+ unsigned int nsibling)
+{
+ struct i915_request *request[MAX_ENGINE_INSTANCE + 1];
+ struct intel_context *ve;
+ struct igt_live_test t;
+ unsigned int n;
+ int err;
+
+ /*
+ * Check that by setting the execution mask on a request, we can
+ * restrict it to our desired engine within the virtual engine.
+ */
+
+ ve = intel_execlists_create_virtual(siblings, nsibling);
+ if (IS_ERR(ve)) {
+ err = PTR_ERR(ve);
+ goto out_close;
+ }
+
+ err = intel_context_pin(ve);
+ if (err)
+ goto out_put;
+
+ err = igt_live_test_begin(&t, gt->i915, __func__, ve->engine->name);
+ if (err)
+ goto out_unpin;
+
+ for (n = 0; n < nsibling; n++) {
+ request[n] = i915_request_create(ve);
+ if (IS_ERR(request[n])) {
+ err = PTR_ERR(request[n]);
+ nsibling = n;
+ goto out;
+ }
+
+ /* Reverse order as it's more likely to be unnatural */
+ request[n]->execution_mask = siblings[nsibling - n - 1]->mask;
+
+ i915_request_get(request[n]);
+ i915_request_add(request[n]);
+ }
+
+ for (n = 0; n < nsibling; n++) {
+ if (i915_request_wait(request[n], 0, HZ / 10) < 0) {
+ pr_err("%s(%s): wait for %llx:%lld timed out\n",
+ __func__, ve->engine->name,
+ request[n]->fence.context,
+ request[n]->fence.seqno);
+
+ GEM_TRACE("%s(%s) failed at request %llx:%lld\n",
+ __func__, ve->engine->name,
+ request[n]->fence.context,
+ request[n]->fence.seqno);
+ GEM_TRACE_DUMP();
+ intel_gt_set_wedged(gt);
+ err = -EIO;
+ goto out;
+ }
+
+ if (request[n]->engine != siblings[nsibling - n - 1]) {
+ pr_err("Executed on wrong sibling '%s', expected '%s'\n",
+ request[n]->engine->name,
+ siblings[nsibling - n - 1]->name);
+ err = -EINVAL;
+ goto out;
+ }
+ }
+
+ err = igt_live_test_end(&t);
+out:
+ if (igt_flush_test(gt->i915))
+ err = -EIO;
+
+ for (n = 0; n < nsibling; n++)
+ i915_request_put(request[n]);
+
+out_unpin:
+ intel_context_unpin(ve);
+out_put:
+ intel_context_put(ve);
+out_close:
+ return err;
+}
+
+static int live_virtual_mask(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
+ unsigned int class;
+ int err;
+
+ if (intel_uc_uses_guc_submission(&gt->uc))
+ return 0;
+
+ for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
+ unsigned int nsibling;
+
+ nsibling = select_siblings(gt, class, siblings);
+ if (nsibling < 2)
+ continue;
+
+ err = mask_virtual_engine(gt, siblings, nsibling);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int slicein_virtual_engine(struct intel_gt *gt,
+ struct intel_engine_cs **siblings,
+ unsigned int nsibling)
+{
+ const long timeout = slice_timeout(siblings[0]);
+ struct intel_context *ce;
+ struct i915_request *rq;
+ struct igt_spinner spin;
+ unsigned int n;
+ int err = 0;
+
+ /*
+ * Virtual requests must take part in timeslicing on the target engines.
+ */
+
+ if (igt_spinner_init(&spin, gt))
+ return -ENOMEM;
+
+ for (n = 0; n < nsibling; n++) {
+ ce = intel_context_create(siblings[n]);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ goto out;
+ }
+
+ rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK);
+ intel_context_put(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out;
+ }
+
+ i915_request_add(rq);
+ }
+
+ ce = intel_execlists_create_virtual(siblings, nsibling);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ goto out;
+ }
+
+ rq = intel_context_create_request(ce);
+ intel_context_put(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out;
+ }
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+ if (i915_request_wait(rq, 0, timeout) < 0) {
+ GEM_TRACE_ERR("%s(%s) failed to slice in virtual request\n",
+ __func__, rq->engine->name);
+ GEM_TRACE_DUMP();
+ intel_gt_set_wedged(gt);
+ err = -EIO;
+ }
+ i915_request_put(rq);
+
+out:
+ igt_spinner_end(&spin);
+ if (igt_flush_test(gt->i915))
+ err = -EIO;
+ igt_spinner_fini(&spin);
+ return err;
+}
+
+static int sliceout_virtual_engine(struct intel_gt *gt,
+ struct intel_engine_cs **siblings,
+ unsigned int nsibling)
+{
+ const long timeout = slice_timeout(siblings[0]);
+ struct intel_context *ce;
+ struct i915_request *rq;
+ struct igt_spinner spin;
+ unsigned int n;
+ int err = 0;
+
+ /*
+ * Virtual requests must allow others a fair timeslice.
+ */
+
+ if (igt_spinner_init(&spin, gt))
+ return -ENOMEM;
+
+ /* XXX We do not handle oversubscription and fairness with normal rq */
+ for (n = 0; n < nsibling; n++) {
+ ce = intel_execlists_create_virtual(siblings, nsibling);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ goto out;
+ }
+
+ rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK);
+ intel_context_put(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out;
+ }
+
+ i915_request_add(rq);
+ }
+
+ for (n = 0; !err && n < nsibling; n++) {
+ ce = intel_context_create(siblings[n]);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ goto out;
+ }
+
+ rq = intel_context_create_request(ce);
+ intel_context_put(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out;
+ }
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+ if (i915_request_wait(rq, 0, timeout) < 0) {
+ GEM_TRACE_ERR("%s(%s) failed to slice out virtual request\n",
+ __func__, siblings[n]->name);
+ GEM_TRACE_DUMP();
+ intel_gt_set_wedged(gt);
+ err = -EIO;
+ }
+ i915_request_put(rq);
+ }
+
+out:
+ igt_spinner_end(&spin);
+ if (igt_flush_test(gt->i915))
+ err = -EIO;
+ igt_spinner_fini(&spin);
+ return err;
+}
+
+static int live_virtual_slice(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
+ unsigned int class;
+ int err;
+
+ if (intel_uc_uses_guc_submission(&gt->uc))
+ return 0;
+
+ for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
+ unsigned int nsibling;
+
+ nsibling = __select_siblings(gt, class, siblings,
+ intel_engine_has_timeslices);
+ if (nsibling < 2)
+ continue;
+
+ err = slicein_virtual_engine(gt, siblings, nsibling);
+ if (err)
+ return err;
+
+ err = sliceout_virtual_engine(gt, siblings, nsibling);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int preserved_virtual_engine(struct intel_gt *gt,
+ struct intel_engine_cs **siblings,
+ unsigned int nsibling)
+{
+ struct i915_request *last = NULL;
+ struct intel_context *ve;
+ struct i915_vma *scratch;
+ struct igt_live_test t;
+ unsigned int n;
+ int err = 0;
+ u32 *cs;
+
+ scratch = __vm_create_scratch_for_read(&siblings[0]->gt->ggtt->vm,
+ PAGE_SIZE);
+ if (IS_ERR(scratch))
+ return PTR_ERR(scratch);
+
+ err = i915_vma_sync(scratch);
+ if (err)
+ goto out_scratch;
+
+ ve = intel_execlists_create_virtual(siblings, nsibling);
+ if (IS_ERR(ve)) {
+ err = PTR_ERR(ve);
+ goto out_scratch;
+ }
+
+ err = intel_context_pin(ve);
+ if (err)
+ goto out_put;
+
+ err = igt_live_test_begin(&t, gt->i915, __func__, ve->engine->name);
+ if (err)
+ goto out_unpin;
+
+ for (n = 0; n < NUM_GPR_DW; n++) {
+ struct intel_engine_cs *engine = siblings[n % nsibling];
+ struct i915_request *rq;
+
+ rq = i915_request_create(ve);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out_end;
+ }
+
+ i915_request_put(last);
+ last = i915_request_get(rq);
+
+ cs = intel_ring_begin(rq, 8);
+ if (IS_ERR(cs)) {
+ i915_request_add(rq);
+ err = PTR_ERR(cs);
+ goto out_end;
+ }
+
+ *cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT;
+ *cs++ = CS_GPR(engine, n);
+ *cs++ = i915_ggtt_offset(scratch) + n * sizeof(u32);
+ *cs++ = 0;
+
+ *cs++ = MI_LOAD_REGISTER_IMM(1);
+ *cs++ = CS_GPR(engine, (n + 1) % NUM_GPR_DW);
+ *cs++ = n + 1;
+
+ *cs++ = MI_NOOP;
+ intel_ring_advance(rq, cs);
+
+ /* Restrict this request to run on a particular engine */
+ rq->execution_mask = engine->mask;
+ i915_request_add(rq);
+ }
+
+ if (i915_request_wait(last, 0, HZ / 5) < 0) {
+ err = -ETIME;
+ goto out_end;
+ }
+
+ cs = i915_gem_object_pin_map(scratch->obj, I915_MAP_WB);
+ if (IS_ERR(cs)) {
+ err = PTR_ERR(cs);
+ goto out_end;
+ }
+
+ for (n = 0; n < NUM_GPR_DW; n++) {
+ if (cs[n] != n) {
+ pr_err("Incorrect value[%d] found for GPR[%d]\n",
+ cs[n], n);
+ err = -EINVAL;
+ break;
+ }
+ }
+
+ i915_gem_object_unpin_map(scratch->obj);
+
+out_end:
+ if (igt_live_test_end(&t))
+ err = -EIO;
+ i915_request_put(last);
+out_unpin:
+ intel_context_unpin(ve);
+out_put:
+ intel_context_put(ve);
+out_scratch:
+ i915_vma_unpin_and_release(&scratch, 0);
+ return err;
+}
+
+static int live_virtual_preserved(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
+ unsigned int class;
+
+ /*
+ * Check that the context image retains non-privileged (user) registers
+ * from one engine to the next. For this we check that the CS_GPR
+ * are preserved.
+ */
+
+ if (intel_uc_uses_guc_submission(&gt->uc))
+ return 0;
+
+ /* As we use CS_GPR we cannot run before they existed on all engines. */
+ if (INTEL_GEN(gt->i915) < 9)
+ return 0;
+
+ for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
+ int nsibling, err;
+
+ nsibling = select_siblings(gt, class, siblings);
+ if (nsibling < 2)
+ continue;
+
+ err = preserved_virtual_engine(gt, siblings, nsibling);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int bond_virtual_engine(struct intel_gt *gt,
+ unsigned int class,
+ struct intel_engine_cs **siblings,
+ unsigned int nsibling,
+ unsigned int flags)
+#define BOND_SCHEDULE BIT(0)
+{
+ struct intel_engine_cs *master;
+ struct i915_request *rq[16];
+ enum intel_engine_id id;
+ struct igt_spinner spin;
+ unsigned long n;
+ int err;
+
+ /*
+ * A set of bonded requests is intended to be run concurrently
+ * across a number of engines. We use one request per-engine
+ * and a magic fence to schedule each of the bonded requests
+ * at the same time. A consequence of our current scheduler is that
+ * we only move requests to the HW ready queue when the request
+ * becomes ready, that is when all of its prerequisite fences have
+ * been signaled. As one of those fences is the master submit fence,
+ * there is a delay on all secondary fences as the HW may be
+ * currently busy. Equally, as all the requests are independent,
+ * they may have other fences that delay individual request
+ * submission to HW. Ergo, we do not guarantee that all requests are
+ * immediately submitted to HW at the same time, just that if the
+ * rules are abided by, they are ready at the same time as the
+ * first is submitted. Userspace can embed semaphores in its batch
+ * to ensure parallel execution of its phases as it requires.
+ * Though naturally it gets requested that perhaps the scheduler should
+ * take care of parallel execution, even across preemption events on
+ * different HW. (The proper answer is of course "lalalala".)
+ *
+ * With the submit-fence, we have identified three possible phases
+ * of synchronisation depending on the master fence: queued (not
+ * ready), executing, and signaled. The first two are quite simple
+ * and checked below. However, the signaled master fence handling is
+ * contentious. Currently we do not distinguish between a signaled
+ * fence and an expired fence, as once signaled it does not convey
+ * any information about the previous execution. It may even be freed
+ * and hence checking later it may not exist at all. Ergo we currently
+ * do not apply the bonding constraint for an already signaled fence,
+ * as our expectation is that it should not constrain the secondaries
+ * and is outside of the scope of the bonded request API (i.e. all
+ * userspace requests are meant to be running in parallel). As
+ * it imposes no constraint, and is effectively a no-op, we do not
+ * check below as normal execution flows are checked extensively above.
+ *
+ * XXX Is the degenerate handling of signaled submit fences the
+ * expected behaviour for userpace?
+ */
+
+ GEM_BUG_ON(nsibling >= ARRAY_SIZE(rq) - 1);
+
+ if (igt_spinner_init(&spin, gt))
+ return -ENOMEM;
+
+ err = 0;
+ rq[0] = ERR_PTR(-ENOMEM);
+ for_each_engine(master, gt, id) {
+ struct i915_sw_fence fence = {};
+ struct intel_context *ce;
+
+ if (master->class == class)
+ continue;
+
+ ce = intel_context_create(master);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ goto out;
+ }
+
+ memset_p((void *)rq, ERR_PTR(-EINVAL), ARRAY_SIZE(rq));
+
+ rq[0] = igt_spinner_create_request(&spin, ce, MI_NOOP);
+ intel_context_put(ce);
+ if (IS_ERR(rq[0])) {
+ err = PTR_ERR(rq[0]);
+ goto out;
+ }
+ i915_request_get(rq[0]);
+
+ if (flags & BOND_SCHEDULE) {
+ onstack_fence_init(&fence);
+ err = i915_sw_fence_await_sw_fence_gfp(&rq[0]->submit,
+ &fence,
+ GFP_KERNEL);
+ }
+
+ i915_request_add(rq[0]);
+ if (err < 0)
+ goto out;
+
+ if (!(flags & BOND_SCHEDULE) &&
+ !igt_wait_for_spinner(&spin, rq[0])) {
+ err = -EIO;
+ goto out;
+ }
+
+ for (n = 0; n < nsibling; n++) {
+ struct intel_context *ve;
+
+ ve = intel_execlists_create_virtual(siblings, nsibling);
+ if (IS_ERR(ve)) {
+ err = PTR_ERR(ve);
+ onstack_fence_fini(&fence);
+ goto out;
+ }
+
+ err = intel_virtual_engine_attach_bond(ve->engine,
+ master,
+ siblings[n]);
+ if (err) {
+ intel_context_put(ve);
+ onstack_fence_fini(&fence);
+ goto out;
+ }
+
+ err = intel_context_pin(ve);
+ intel_context_put(ve);
+ if (err) {
+ onstack_fence_fini(&fence);
+ goto out;
+ }
+
+ rq[n + 1] = i915_request_create(ve);
+ intel_context_unpin(ve);
+ if (IS_ERR(rq[n + 1])) {
+ err = PTR_ERR(rq[n + 1]);
+ onstack_fence_fini(&fence);
+ goto out;
+ }
+ i915_request_get(rq[n + 1]);
+
+ err = i915_request_await_execution(rq[n + 1],
+ &rq[0]->fence,
+ ve->engine->bond_execute);
+ i915_request_add(rq[n + 1]);
+ if (err < 0) {
+ onstack_fence_fini(&fence);
+ goto out;
+ }
+ }
+ onstack_fence_fini(&fence);
+ intel_engine_flush_submission(master);
+ igt_spinner_end(&spin);
+
+ if (i915_request_wait(rq[0], 0, HZ / 10) < 0) {
+ pr_err("Master request did not execute (on %s)!\n",
+ rq[0]->engine->name);
+ err = -EIO;
+ goto out;
+ }
+
+ for (n = 0; n < nsibling; n++) {
+ if (i915_request_wait(rq[n + 1], 0,
+ MAX_SCHEDULE_TIMEOUT) < 0) {
+ err = -EIO;
+ goto out;
+ }
+
+ if (rq[n + 1]->engine != siblings[n]) {
+ pr_err("Bonded request did not execute on target engine: expected %s, used %s; master was %s\n",
+ siblings[n]->name,
+ rq[n + 1]->engine->name,
+ rq[0]->engine->name);
+ err = -EINVAL;
+ goto out;
+ }
+ }
+
+ for (n = 0; !IS_ERR(rq[n]); n++)
+ i915_request_put(rq[n]);
+ rq[0] = ERR_PTR(-ENOMEM);
+ }
+
+out:
+ for (n = 0; !IS_ERR(rq[n]); n++)
+ i915_request_put(rq[n]);
+ if (igt_flush_test(gt->i915))
+ err = -EIO;
+
+ igt_spinner_fini(&spin);
+ return err;
+}
+
+static int live_virtual_bond(void *arg)
+{
+ static const struct phase {
+ const char *name;
+ unsigned int flags;
+ } phases[] = {
+ { "", 0 },
+ { "schedule", BOND_SCHEDULE },
+ { },
+ };
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
+ unsigned int class;
+ int err;
+
+ if (intel_uc_uses_guc_submission(&gt->uc))
+ return 0;
+
+ for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
+ const struct phase *p;
+ int nsibling;
+
+ nsibling = select_siblings(gt, class, siblings);
+ if (nsibling < 2)
+ continue;
+
+ for (p = phases; p->name; p++) {
+ err = bond_virtual_engine(gt,
+ class, siblings, nsibling,
+ p->flags);
+ if (err) {
+ pr_err("%s(%s): failed class=%d, nsibling=%d, err=%d\n",
+ __func__, p->name, class, nsibling, err);
+ return err;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int reset_virtual_engine(struct intel_gt *gt,
+ struct intel_engine_cs **siblings,
+ unsigned int nsibling)
+{
+ struct intel_engine_cs *engine;
+ struct intel_context *ve;
+ struct igt_spinner spin;
+ struct i915_request *rq;
+ unsigned int n;
+ int err = 0;
+
+ /*
+ * In order to support offline error capture for fast preempt reset,
+ * we need to decouple the guilty request and ensure that it and its
+ * descendents are not executed while the capture is in progress.
+ */
+
+ if (igt_spinner_init(&spin, gt))
+ return -ENOMEM;
+
+ ve = intel_execlists_create_virtual(siblings, nsibling);
+ if (IS_ERR(ve)) {
+ err = PTR_ERR(ve);
+ goto out_spin;
+ }
+
+ for (n = 0; n < nsibling; n++)
+ st_engine_heartbeat_disable(siblings[n]);
+
+ rq = igt_spinner_create_request(&spin, ve, MI_ARB_CHECK);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out_heartbeat;
+ }
+ i915_request_add(rq);
+
+ if (!igt_wait_for_spinner(&spin, rq)) {
+ intel_gt_set_wedged(gt);
+ err = -ETIME;
+ goto out_heartbeat;
+ }
+
+ engine = rq->engine;
+ GEM_BUG_ON(engine == ve->engine);
+
+ /* Take ownership of the reset and tasklet */
+ local_bh_disable();
+ if (test_and_set_bit(I915_RESET_ENGINE + engine->id,
+ &gt->reset.flags)) {
+ local_bh_enable();
+ intel_gt_set_wedged(gt);
+ err = -EBUSY;
+ goto out_heartbeat;
+ }
+ tasklet_disable(&engine->execlists.tasklet);
+
+ engine->execlists.tasklet.func(engine->execlists.tasklet.data);
+ GEM_BUG_ON(execlists_active(&engine->execlists) != rq);
+
+ /* Fake a preemption event; failed of course */
+ spin_lock_irq(&engine->active.lock);
+ __unwind_incomplete_requests(engine);
+ spin_unlock_irq(&engine->active.lock);
+ GEM_BUG_ON(rq->engine != engine);
+
+ /* Reset the engine while keeping our active request on hold */
+ execlists_hold(engine, rq);
+ GEM_BUG_ON(!i915_request_on_hold(rq));
+
+ __intel_engine_reset_bh(engine, NULL);
+ GEM_BUG_ON(rq->fence.error != -EIO);
+
+ /* Release our grasp on the engine, letting CS flow again */
+ tasklet_enable(&engine->execlists.tasklet);
+ clear_and_wake_up_bit(I915_RESET_ENGINE + engine->id, &gt->reset.flags);
+ local_bh_enable();
+
+ /* Check that we do not resubmit the held request */
+ i915_request_get(rq);
+ if (!i915_request_wait(rq, 0, HZ / 5)) {
+ pr_err("%s: on hold request completed!\n",
+ engine->name);
+ intel_gt_set_wedged(gt);
+ err = -EIO;
+ goto out_rq;
+ }
+ GEM_BUG_ON(!i915_request_on_hold(rq));
+
+ /* But is resubmitted on release */
+ execlists_unhold(engine, rq);
+ if (i915_request_wait(rq, 0, HZ / 5) < 0) {
+ pr_err("%s: held request did not complete!\n",
+ engine->name);
+ intel_gt_set_wedged(gt);
+ err = -ETIME;
+ }
+
+out_rq:
+ i915_request_put(rq);
+out_heartbeat:
+ for (n = 0; n < nsibling; n++)
+ st_engine_heartbeat_enable(siblings[n]);
+
+ intel_context_put(ve);
+out_spin:
+ igt_spinner_fini(&spin);
+ return err;
+}
+
+static int live_virtual_reset(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
+ unsigned int class;
+
+ /*
+ * Check that we handle a reset event within a virtual engine.
+ * Only the physical engine is reset, but we have to check the flow
+ * of the virtual requests around the reset, and make sure it is not
+ * forgotten.
+ */
+
+ if (intel_uc_uses_guc_submission(&gt->uc))
+ return 0;
+
+ if (!intel_has_reset_engine(gt))
+ return 0;
+
+ for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
+ int nsibling, err;
+
+ nsibling = select_siblings(gt, class, siblings);
+ if (nsibling < 2)
+ continue;
+
+ err = reset_virtual_engine(gt, siblings, nsibling);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+int intel_execlists_live_selftests(struct drm_i915_private *i915)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(live_sanitycheck),
+ SUBTEST(live_unlite_switch),
+ SUBTEST(live_unlite_preempt),
+ SUBTEST(live_unlite_ring),
+ SUBTEST(live_pin_rewind),
+ SUBTEST(live_hold_reset),
+ SUBTEST(live_error_interrupt),
+ SUBTEST(live_timeslice_preempt),
+ SUBTEST(live_timeslice_rewind),
+ SUBTEST(live_timeslice_queue),
+ SUBTEST(live_timeslice_nopreempt),
+ SUBTEST(live_busywait_preempt),
+ SUBTEST(live_preempt),
+ SUBTEST(live_late_preempt),
+ SUBTEST(live_nopreempt),
+ SUBTEST(live_preempt_cancel),
+ SUBTEST(live_suppress_self_preempt),
+ SUBTEST(live_chain_preempt),
+ SUBTEST(live_preempt_ring),
+ SUBTEST(live_preempt_gang),
+ SUBTEST(live_preempt_timeout),
+ SUBTEST(live_preempt_user),
+ SUBTEST(live_preempt_smoke),
+ SUBTEST(live_virtual_engine),
+ SUBTEST(live_virtual_mask),
+ SUBTEST(live_virtual_preserved),
+ SUBTEST(live_virtual_slice),
+ SUBTEST(live_virtual_bond),
+ SUBTEST(live_virtual_reset),
+ };
+
+ if (!HAS_EXECLISTS(i915))
+ return 0;
+
+ if (intel_gt_is_wedged(&i915->gt))
+ return 0;
+
+ return intel_gt_live_subtests(tests, &i915->gt);
+}
diff --git a/drivers/gpu/drm/i915/gt/selftest_gt_pm.c b/drivers/gpu/drm/i915/gt/selftest_gt_pm.c
index 6180a47c1b51..5d911f724ebe 100644
--- a/drivers/gpu/drm/i915/gt/selftest_gt_pm.c
+++ b/drivers/gpu/drm/i915/gt/selftest_gt_pm.c
@@ -71,7 +71,7 @@ static int live_gt_clocks(void *arg)
enum intel_engine_id id;
int err = 0;
- if (!RUNTIME_INFO(gt->i915)->cs_timestamp_frequency_hz) { /* unknown */
+ if (!gt->clock_frequency) { /* unknown */
pr_info("CS_TIMESTAMP frequency unknown\n");
return 0;
}
@@ -112,12 +112,12 @@ static int live_gt_clocks(void *arg)
measure_clocks(engine, &cycles, &dt);
- time = i915_cs_timestamp_ticks_to_ns(engine->i915, cycles);
- expected = i915_cs_timestamp_ns_to_ticks(engine->i915, dt);
+ time = intel_gt_clock_interval_to_ns(engine->gt, cycles);
+ expected = intel_gt_ns_to_clock_interval(engine->gt, dt);
pr_info("%s: TIMESTAMP %d cycles [%lldns] in %lldns [%d cycles], using CS clock frequency of %uKHz\n",
engine->name, cycles, time, dt, expected,
- RUNTIME_INFO(engine->i915)->cs_timestamp_frequency_hz / 1000);
+ engine->gt->clock_frequency / 1000);
if (9 * time < 8 * dt || 8 * time > 9 * dt) {
pr_err("%s: CS ticks did not match walltime!\n",
diff --git a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
index fb5ebf930ab2..463bb6a700c8 100644
--- a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
+++ b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
@@ -506,7 +506,8 @@ static int igt_reset_nop_engine(void *arg)
}
err = intel_engine_reset(engine, NULL);
if (err) {
- pr_err("i915_reset_engine failed\n");
+ pr_err("intel_engine_reset(%s) failed, err:%d\n",
+ engine->name, err);
break;
}
@@ -539,6 +540,149 @@ static int igt_reset_nop_engine(void *arg)
return 0;
}
+static void force_reset_timeout(struct intel_engine_cs *engine)
+{
+ engine->reset_timeout.probability = 999;
+ atomic_set(&engine->reset_timeout.times, -1);
+}
+
+static void cancel_reset_timeout(struct intel_engine_cs *engine)
+{
+ memset(&engine->reset_timeout, 0, sizeof(engine->reset_timeout));
+}
+
+static int igt_reset_fail_engine(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ /* Check that we can recover from engine-reset failues */
+
+ if (!intel_has_reset_engine(gt))
+ return 0;
+
+ for_each_engine(engine, gt, id) {
+ unsigned int count;
+ struct intel_context *ce;
+ IGT_TIMEOUT(end_time);
+ int err;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce))
+ return PTR_ERR(ce);
+
+ st_engine_heartbeat_disable(engine);
+ set_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
+
+ force_reset_timeout(engine);
+ err = intel_engine_reset(engine, NULL);
+ cancel_reset_timeout(engine);
+ if (err == 0) /* timeouts only generated on gen8+ */
+ goto skip;
+
+ count = 0;
+ do {
+ struct i915_request *last = NULL;
+ int i;
+
+ if (!wait_for_idle(engine)) {
+ pr_err("%s failed to idle before reset\n",
+ engine->name);
+ err = -EIO;
+ break;
+ }
+
+ for (i = 0; i < count % 15; i++) {
+ struct i915_request *rq;
+
+ rq = intel_context_create_request(ce);
+ if (IS_ERR(rq)) {
+ struct drm_printer p =
+ drm_info_printer(gt->i915->drm.dev);
+ intel_engine_dump(engine, &p,
+ "%s(%s): failed to submit request\n",
+ __func__,
+ engine->name);
+
+ GEM_TRACE("%s(%s): failed to submit request\n",
+ __func__,
+ engine->name);
+ GEM_TRACE_DUMP();
+
+ intel_gt_set_wedged(gt);
+ if (last)
+ i915_request_put(last);
+
+ err = PTR_ERR(rq);
+ goto out;
+ }
+
+ if (last)
+ i915_request_put(last);
+ last = i915_request_get(rq);
+ i915_request_add(rq);
+ }
+
+ if (count & 1) {
+ err = intel_engine_reset(engine, NULL);
+ if (err) {
+ GEM_TRACE_ERR("intel_engine_reset(%s) failed, err:%d\n",
+ engine->name, err);
+ GEM_TRACE_DUMP();
+ i915_request_put(last);
+ break;
+ }
+ } else {
+ force_reset_timeout(engine);
+ err = intel_engine_reset(engine, NULL);
+ cancel_reset_timeout(engine);
+ if (err != -ETIMEDOUT) {
+ pr_err("intel_engine_reset(%s) did not fail, err:%d\n",
+ engine->name, err);
+ i915_request_put(last);
+ break;
+ }
+ }
+
+ err = 0;
+ if (last) {
+ if (i915_request_wait(last, 0, HZ / 2) < 0) {
+ struct drm_printer p =
+ drm_info_printer(gt->i915->drm.dev);
+
+ intel_engine_dump(engine, &p,
+ "%s(%s): failed to complete request\n",
+ __func__,
+ engine->name);
+
+ GEM_TRACE("%s(%s): failed to complete request\n",
+ __func__,
+ engine->name);
+ GEM_TRACE_DUMP();
+
+ err = -EIO;
+ }
+ i915_request_put(last);
+ }
+ count++;
+ } while (err == 0 && time_before(jiffies, end_time));
+out:
+ pr_info("%s(%s): %d resets\n", __func__, engine->name, count);
+skip:
+ clear_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
+ st_engine_heartbeat_enable(engine);
+ intel_context_put(ce);
+
+ if (igt_flush_test(gt->i915))
+ err = -EIO;
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
static int __igt_reset_engine(struct intel_gt *gt, bool active)
{
struct i915_gpu_error *global = &gt->i915->gpu_error;
@@ -560,6 +704,7 @@ static int __igt_reset_engine(struct intel_gt *gt, bool active)
for_each_engine(engine, gt, id) {
unsigned int reset_count, reset_engine_count;
+ unsigned long count;
IGT_TIMEOUT(end_time);
if (active && !intel_engine_can_store_dword(engine))
@@ -577,6 +722,7 @@ static int __igt_reset_engine(struct intel_gt *gt, bool active)
st_engine_heartbeat_disable(engine);
set_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
+ count = 0;
do {
if (active) {
struct i915_request *rq;
@@ -608,7 +754,8 @@ static int __igt_reset_engine(struct intel_gt *gt, bool active)
err = intel_engine_reset(engine, NULL);
if (err) {
- pr_err("i915_reset_engine failed\n");
+ pr_err("intel_engine_reset(%s) failed, err:%d\n",
+ engine->name, err);
break;
}
@@ -625,9 +772,13 @@ static int __igt_reset_engine(struct intel_gt *gt, bool active)
err = -EINVAL;
break;
}
+
+ count++;
} while (time_before(jiffies, end_time));
clear_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
st_engine_heartbeat_enable(engine);
+ pr_info("%s: Completed %lu %s resets\n",
+ engine->name, count, active ? "active" : "idle");
if (err)
break;
@@ -1478,7 +1629,8 @@ static int igt_reset_queue(void *arg)
prev = rq;
count++;
} while (time_before(jiffies, end_time));
- pr_info("%s: Completed %d resets\n", engine->name, count);
+ pr_info("%s: Completed %d queued resets\n",
+ engine->name, count);
*h.batch = MI_BATCH_BUFFER_END;
intel_gt_chipset_flush(engine->gt);
@@ -1575,13 +1727,21 @@ static int __igt_atomic_reset_engine(struct intel_engine_cs *engine,
GEM_TRACE("i915_reset_engine(%s:%s) under %s\n",
engine->name, mode, p->name);
- tasklet_disable(t);
+ if (t->func)
+ tasklet_disable(t);
+ if (strcmp(p->name, "softirq"))
+ local_bh_disable();
p->critical_section_begin();
- err = intel_engine_reset(engine, NULL);
+ err = __intel_engine_reset_bh(engine, NULL);
p->critical_section_end();
- tasklet_enable(t);
+ if (strcmp(p->name, "softirq"))
+ local_bh_enable();
+ if (t->func) {
+ tasklet_enable(t);
+ tasklet_hi_schedule(t);
+ }
if (err)
pr_err("i915_reset_engine(%s:%s) failed under %s\n",
@@ -1687,6 +1847,7 @@ int intel_hangcheck_live_selftests(struct drm_i915_private *i915)
SUBTEST(igt_reset_nop_engine),
SUBTEST(igt_reset_idle_engine),
SUBTEST(igt_reset_active_engine),
+ SUBTEST(igt_reset_fail_engine),
SUBTEST(igt_reset_engines),
SUBTEST(igt_reset_engines_atomic),
SUBTEST(igt_reset_queue),
diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c
index 95d41c01d0e0..920979a89413 100644
--- a/drivers/gpu/drm/i915/gt/selftest_lrc.c
+++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c
@@ -1,22 +1,22 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2018 Intel Corporation
*/
#include <linux/prime_numbers.h>
-#include "gem/i915_gem_pm.h"
-#include "gt/intel_engine_heartbeat.h"
-#include "gt/intel_reset.h"
-#include "gt/selftest_engine_heartbeat.h"
-
#include "i915_selftest.h"
+#include "intel_engine_heartbeat.h"
+#include "intel_engine_pm.h"
+#include "intel_reset.h"
+#include "intel_ring.h"
+#include "selftest_engine_heartbeat.h"
#include "selftests/i915_random.h"
#include "selftests/igt_flush_test.h"
#include "selftests/igt_live_test.h"
#include "selftests/igt_spinner.h"
#include "selftests/lib_sw_fence.h"
+#include "shmem_utils.h"
#include "gem/selftests/igt_gem_utils.h"
#include "gem/selftests/mock_context.h"
@@ -27,29 +27,7 @@
static struct i915_vma *create_scratch(struct intel_gt *gt)
{
- struct drm_i915_gem_object *obj;
- struct i915_vma *vma;
- int err;
-
- obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
- if (IS_ERR(obj))
- return ERR_CAST(obj);
-
- i915_gem_object_set_cache_coherency(obj, I915_CACHING_CACHED);
-
- vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL);
- if (IS_ERR(vma)) {
- i915_gem_object_put(obj);
- return vma;
- }
-
- err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
- if (err) {
- i915_gem_object_put(obj);
- return ERR_PTR(err);
- }
-
- return vma;
+ return __vm_create_scratch_for_read(&gt->ggtt->vm, PAGE_SIZE);
}
static bool is_active(struct i915_request *rq)
@@ -70,6 +48,9 @@ static int wait_for_submit(struct intel_engine_cs *engine,
struct i915_request *rq,
unsigned long timeout)
{
+ /* Ignore our own attempts to suppress excess tasklets */
+ tasklet_hi_schedule(&engine->execlists.tasklet);
+
timeout += jiffies;
do {
bool done = time_after(jiffies, timeout);
@@ -89,4623 +70,6 @@ static int wait_for_submit(struct intel_engine_cs *engine,
} while (1);
}
-static int wait_for_reset(struct intel_engine_cs *engine,
- struct i915_request *rq,
- unsigned long timeout)
-{
- timeout += jiffies;
-
- do {
- cond_resched();
- intel_engine_flush_submission(engine);
-
- if (READ_ONCE(engine->execlists.pending[0]))
- continue;
-
- if (i915_request_completed(rq))
- break;
-
- if (READ_ONCE(rq->fence.error))
- break;
- } while (time_before(jiffies, timeout));
-
- flush_scheduled_work();
-
- if (rq->fence.error != -EIO) {
- pr_err("%s: hanging request %llx:%lld not reset\n",
- engine->name,
- rq->fence.context,
- rq->fence.seqno);
- return -EINVAL;
- }
-
- /* Give the request a jiffie to complete after flushing the worker */
- if (i915_request_wait(rq, 0,
- max(0l, (long)(timeout - jiffies)) + 1) < 0) {
- pr_err("%s: hanging request %llx:%lld did not complete\n",
- engine->name,
- rq->fence.context,
- rq->fence.seqno);
- return -ETIME;
- }
-
- return 0;
-}
-
-static int live_sanitycheck(void *arg)
-{
- struct intel_gt *gt = arg;
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- struct igt_spinner spin;
- int err = 0;
-
- if (!HAS_LOGICAL_RING_CONTEXTS(gt->i915))
- return 0;
-
- if (igt_spinner_init(&spin, gt))
- return -ENOMEM;
-
- for_each_engine(engine, gt, id) {
- struct intel_context *ce;
- struct i915_request *rq;
-
- ce = intel_context_create(engine);
- if (IS_ERR(ce)) {
- err = PTR_ERR(ce);
- break;
- }
-
- rq = igt_spinner_create_request(&spin, ce, MI_NOOP);
- if (IS_ERR(rq)) {
- err = PTR_ERR(rq);
- goto out_ctx;
- }
-
- i915_request_add(rq);
- if (!igt_wait_for_spinner(&spin, rq)) {
- GEM_TRACE("spinner failed to start\n");
- GEM_TRACE_DUMP();
- intel_gt_set_wedged(gt);
- err = -EIO;
- goto out_ctx;
- }
-
- igt_spinner_end(&spin);
- if (igt_flush_test(gt->i915)) {
- err = -EIO;
- goto out_ctx;
- }
-
-out_ctx:
- intel_context_put(ce);
- if (err)
- break;
- }
-
- igt_spinner_fini(&spin);
- return err;
-}
-
-static int live_unlite_restore(struct intel_gt *gt, int prio)
-{
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- struct igt_spinner spin;
- int err = -ENOMEM;
-
- /*
- * Check that we can correctly context switch between 2 instances
- * on the same engine from the same parent context.
- */
-
- if (igt_spinner_init(&spin, gt))
- return err;
-
- err = 0;
- for_each_engine(engine, gt, id) {
- struct intel_context *ce[2] = {};
- struct i915_request *rq[2];
- struct igt_live_test t;
- int n;
-
- if (prio && !intel_engine_has_preemption(engine))
- continue;
-
- if (!intel_engine_can_store_dword(engine))
- continue;
-
- if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
- err = -EIO;
- break;
- }
- st_engine_heartbeat_disable(engine);
-
- for (n = 0; n < ARRAY_SIZE(ce); n++) {
- struct intel_context *tmp;
-
- tmp = intel_context_create(engine);
- if (IS_ERR(tmp)) {
- err = PTR_ERR(tmp);
- goto err_ce;
- }
-
- err = intel_context_pin(tmp);
- if (err) {
- intel_context_put(tmp);
- goto err_ce;
- }
-
- /*
- * Setup the pair of contexts such that if we
- * lite-restore using the RING_TAIL from ce[1] it
- * will execute garbage from ce[0]->ring.
- */
- memset(tmp->ring->vaddr,
- POISON_INUSE, /* IPEHR: 0x5a5a5a5a [hung!] */
- tmp->ring->vma->size);
-
- ce[n] = tmp;
- }
- GEM_BUG_ON(!ce[1]->ring->size);
- intel_ring_reset(ce[1]->ring, ce[1]->ring->size / 2);
- __execlists_update_reg_state(ce[1], engine, ce[1]->ring->head);
-
- rq[0] = igt_spinner_create_request(&spin, ce[0], MI_ARB_CHECK);
- if (IS_ERR(rq[0])) {
- err = PTR_ERR(rq[0]);
- goto err_ce;
- }
-
- i915_request_get(rq[0]);
- i915_request_add(rq[0]);
- GEM_BUG_ON(rq[0]->postfix > ce[1]->ring->emit);
-
- if (!igt_wait_for_spinner(&spin, rq[0])) {
- i915_request_put(rq[0]);
- goto err_ce;
- }
-
- rq[1] = i915_request_create(ce[1]);
- if (IS_ERR(rq[1])) {
- err = PTR_ERR(rq[1]);
- i915_request_put(rq[0]);
- goto err_ce;
- }
-
- if (!prio) {
- /*
- * Ensure we do the switch to ce[1] on completion.
- *
- * rq[0] is already submitted, so this should reduce
- * to a no-op (a wait on a request on the same engine
- * uses the submit fence, not the completion fence),
- * but it will install a dependency on rq[1] for rq[0]
- * that will prevent the pair being reordered by
- * timeslicing.
- */
- i915_request_await_dma_fence(rq[1], &rq[0]->fence);
- }
-
- i915_request_get(rq[1]);
- i915_request_add(rq[1]);
- GEM_BUG_ON(rq[1]->postfix <= rq[0]->postfix);
- i915_request_put(rq[0]);
-
- if (prio) {
- struct i915_sched_attr attr = {
- .priority = prio,
- };
-
- /* Alternatively preempt the spinner with ce[1] */
- engine->schedule(rq[1], &attr);
- }
-
- /* And switch back to ce[0] for good measure */
- rq[0] = i915_request_create(ce[0]);
- if (IS_ERR(rq[0])) {
- err = PTR_ERR(rq[0]);
- i915_request_put(rq[1]);
- goto err_ce;
- }
-
- i915_request_await_dma_fence(rq[0], &rq[1]->fence);
- i915_request_get(rq[0]);
- i915_request_add(rq[0]);
- GEM_BUG_ON(rq[0]->postfix > rq[1]->postfix);
- i915_request_put(rq[1]);
- i915_request_put(rq[0]);
-
-err_ce:
- intel_engine_flush_submission(engine);
- igt_spinner_end(&spin);
- for (n = 0; n < ARRAY_SIZE(ce); n++) {
- if (IS_ERR_OR_NULL(ce[n]))
- break;
-
- intel_context_unpin(ce[n]);
- intel_context_put(ce[n]);
- }
-
- st_engine_heartbeat_enable(engine);
- if (igt_live_test_end(&t))
- err = -EIO;
- if (err)
- break;
- }
-
- igt_spinner_fini(&spin);
- return err;
-}
-
-static int live_unlite_switch(void *arg)
-{
- return live_unlite_restore(arg, 0);
-}
-
-static int live_unlite_preempt(void *arg)
-{
- return live_unlite_restore(arg, I915_USER_PRIORITY(I915_PRIORITY_MAX));
-}
-
-static int live_unlite_ring(void *arg)
-{
- struct intel_gt *gt = arg;
- struct intel_engine_cs *engine;
- struct igt_spinner spin;
- enum intel_engine_id id;
- int err = 0;
-
- /*
- * Setup a preemption event that will cause almost the entire ring
- * to be unwound, potentially fooling our intel_ring_direction()
- * into emitting a forward lite-restore instead of the rollback.
- */
-
- if (igt_spinner_init(&spin, gt))
- return -ENOMEM;
-
- for_each_engine(engine, gt, id) {
- struct intel_context *ce[2] = {};
- struct i915_request *rq;
- struct igt_live_test t;
- int n;
-
- if (!intel_engine_has_preemption(engine))
- continue;
-
- if (!intel_engine_can_store_dword(engine))
- continue;
-
- if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
- err = -EIO;
- break;
- }
- st_engine_heartbeat_disable(engine);
-
- for (n = 0; n < ARRAY_SIZE(ce); n++) {
- struct intel_context *tmp;
-
- tmp = intel_context_create(engine);
- if (IS_ERR(tmp)) {
- err = PTR_ERR(tmp);
- goto err_ce;
- }
-
- err = intel_context_pin(tmp);
- if (err) {
- intel_context_put(tmp);
- goto err_ce;
- }
-
- memset32(tmp->ring->vaddr,
- 0xdeadbeef, /* trigger a hang if executed */
- tmp->ring->vma->size / sizeof(u32));
-
- ce[n] = tmp;
- }
-
- /* Create max prio spinner, followed by N low prio nops */
- rq = igt_spinner_create_request(&spin, ce[0], MI_ARB_CHECK);
- if (IS_ERR(rq)) {
- err = PTR_ERR(rq);
- goto err_ce;
- }
-
- i915_request_get(rq);
- rq->sched.attr.priority = I915_PRIORITY_BARRIER;
- i915_request_add(rq);
-
- if (!igt_wait_for_spinner(&spin, rq)) {
- intel_gt_set_wedged(gt);
- i915_request_put(rq);
- err = -ETIME;
- goto err_ce;
- }
-
- /* Fill the ring, until we will cause a wrap */
- n = 0;
- while (intel_ring_direction(ce[0]->ring,
- rq->wa_tail,
- ce[0]->ring->tail) <= 0) {
- struct i915_request *tmp;
-
- tmp = intel_context_create_request(ce[0]);
- if (IS_ERR(tmp)) {
- err = PTR_ERR(tmp);
- i915_request_put(rq);
- goto err_ce;
- }
-
- i915_request_add(tmp);
- intel_engine_flush_submission(engine);
- n++;
- }
- intel_engine_flush_submission(engine);
- pr_debug("%s: Filled ring with %d nop tails {size:%x, tail:%x, emit:%x, rq.tail:%x}\n",
- engine->name, n,
- ce[0]->ring->size,
- ce[0]->ring->tail,
- ce[0]->ring->emit,
- rq->tail);
- GEM_BUG_ON(intel_ring_direction(ce[0]->ring,
- rq->tail,
- ce[0]->ring->tail) <= 0);
- i915_request_put(rq);
-
- /* Create a second ring to preempt the first ring after rq[0] */
- rq = intel_context_create_request(ce[1]);
- if (IS_ERR(rq)) {
- err = PTR_ERR(rq);
- goto err_ce;
- }
-
- rq->sched.attr.priority = I915_PRIORITY_BARRIER;
- i915_request_get(rq);
- i915_request_add(rq);
-
- err = wait_for_submit(engine, rq, HZ / 2);
- i915_request_put(rq);
- if (err) {
- pr_err("%s: preemption request was not submitted\n",
- engine->name);
- err = -ETIME;
- }
-
- pr_debug("%s: ring[0]:{ tail:%x, emit:%x }, ring[1]:{ tail:%x, emit:%x }\n",
- engine->name,
- ce[0]->ring->tail, ce[0]->ring->emit,
- ce[1]->ring->tail, ce[1]->ring->emit);
-
-err_ce:
- intel_engine_flush_submission(engine);
- igt_spinner_end(&spin);
- for (n = 0; n < ARRAY_SIZE(ce); n++) {
- if (IS_ERR_OR_NULL(ce[n]))
- break;
-
- intel_context_unpin(ce[n]);
- intel_context_put(ce[n]);
- }
- st_engine_heartbeat_enable(engine);
- if (igt_live_test_end(&t))
- err = -EIO;
- if (err)
- break;
- }
-
- igt_spinner_fini(&spin);
- return err;
-}
-
-static int live_pin_rewind(void *arg)
-{
- struct intel_gt *gt = arg;
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- int err = 0;
-
- /*
- * We have to be careful not to trust intel_ring too much, for example
- * ring->head is updated upon retire which is out of sync with pinning
- * the context. Thus we cannot use ring->head to set CTX_RING_HEAD,
- * or else we risk writing an older, stale value.
- *
- * To simulate this, let's apply a bit of deliberate sabotague.
- */
-
- for_each_engine(engine, gt, id) {
- struct intel_context *ce;
- struct i915_request *rq;
- struct intel_ring *ring;
- struct igt_live_test t;
-
- if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
- err = -EIO;
- break;
- }
-
- ce = intel_context_create(engine);
- if (IS_ERR(ce)) {
- err = PTR_ERR(ce);
- break;
- }
-
- err = intel_context_pin(ce);
- if (err) {
- intel_context_put(ce);
- break;
- }
-
- /* Keep the context awake while we play games */
- err = i915_active_acquire(&ce->active);
- if (err) {
- intel_context_unpin(ce);
- intel_context_put(ce);
- break;
- }
- ring = ce->ring;
-
- /* Poison the ring, and offset the next request from HEAD */
- memset32(ring->vaddr, STACK_MAGIC, ring->size / sizeof(u32));
- ring->emit = ring->size / 2;
- ring->tail = ring->emit;
- GEM_BUG_ON(ring->head);
-
- intel_context_unpin(ce);
-
- /* Submit a simple nop request */
- GEM_BUG_ON(intel_context_is_pinned(ce));
- rq = intel_context_create_request(ce);
- i915_active_release(&ce->active); /* e.g. async retire */
- intel_context_put(ce);
- if (IS_ERR(rq)) {
- err = PTR_ERR(rq);
- break;
- }
- GEM_BUG_ON(!rq->head);
- i915_request_add(rq);
-
- /* Expect not to hang! */
- if (igt_live_test_end(&t)) {
- err = -EIO;
- break;
- }
- }
-
- return err;
-}
-
-static int live_hold_reset(void *arg)
-{
- struct intel_gt *gt = arg;
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- struct igt_spinner spin;
- int err = 0;
-
- /*
- * In order to support offline error capture for fast preempt reset,
- * we need to decouple the guilty request and ensure that it and its
- * descendents are not executed while the capture is in progress.
- */
-
- if (!intel_has_reset_engine(gt))
- return 0;
-
- if (igt_spinner_init(&spin, gt))
- return -ENOMEM;
-
- for_each_engine(engine, gt, id) {
- struct intel_context *ce;
- struct i915_request *rq;
-
- ce = intel_context_create(engine);
- if (IS_ERR(ce)) {
- err = PTR_ERR(ce);
- break;
- }
-
- st_engine_heartbeat_disable(engine);
-
- rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK);
- if (IS_ERR(rq)) {
- err = PTR_ERR(rq);
- goto out;
- }
- i915_request_add(rq);
-
- if (!igt_wait_for_spinner(&spin, rq)) {
- intel_gt_set_wedged(gt);
- err = -ETIME;
- goto out;
- }
-
- /* We have our request executing, now remove it and reset */
-
- if (test_and_set_bit(I915_RESET_ENGINE + id,
- &gt->reset.flags)) {
- intel_gt_set_wedged(gt);
- err = -EBUSY;
- goto out;
- }
- tasklet_disable(&engine->execlists.tasklet);
-
- engine->execlists.tasklet.func(engine->execlists.tasklet.data);
- GEM_BUG_ON(execlists_active(&engine->execlists) != rq);
-
- i915_request_get(rq);
- execlists_hold(engine, rq);
- GEM_BUG_ON(!i915_request_on_hold(rq));
-
- intel_engine_reset(engine, NULL);
- GEM_BUG_ON(rq->fence.error != -EIO);
-
- tasklet_enable(&engine->execlists.tasklet);
- clear_and_wake_up_bit(I915_RESET_ENGINE + id,
- &gt->reset.flags);
-
- /* Check that we do not resubmit the held request */
- if (!i915_request_wait(rq, 0, HZ / 5)) {
- pr_err("%s: on hold request completed!\n",
- engine->name);
- i915_request_put(rq);
- err = -EIO;
- goto out;
- }
- GEM_BUG_ON(!i915_request_on_hold(rq));
-
- /* But is resubmitted on release */
- execlists_unhold(engine, rq);
- if (i915_request_wait(rq, 0, HZ / 5) < 0) {
- pr_err("%s: held request did not complete!\n",
- engine->name);
- intel_gt_set_wedged(gt);
- err = -ETIME;
- }
- i915_request_put(rq);
-
-out:
- st_engine_heartbeat_enable(engine);
- intel_context_put(ce);
- if (err)
- break;
- }
-
- igt_spinner_fini(&spin);
- return err;
-}
-
-static const char *error_repr(int err)
-{
- return err ? "bad" : "good";
-}
-
-static int live_error_interrupt(void *arg)
-{
- static const struct error_phase {
- enum { GOOD = 0, BAD = -EIO } error[2];
- } phases[] = {
- { { BAD, GOOD } },
- { { BAD, BAD } },
- { { BAD, GOOD } },
- { { GOOD, GOOD } }, /* sentinel */
- };
- struct intel_gt *gt = arg;
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
-
- /*
- * We hook up the CS_MASTER_ERROR_INTERRUPT to have forewarning
- * of invalid commands in user batches that will cause a GPU hang.
- * This is a faster mechanism than using hangcheck/heartbeats, but
- * only detects problems the HW knows about -- it will not warn when
- * we kill the HW!
- *
- * To verify our detection and reset, we throw some invalid commands
- * at the HW and wait for the interrupt.
- */
-
- if (!intel_has_reset_engine(gt))
- return 0;
-
- for_each_engine(engine, gt, id) {
- const struct error_phase *p;
- int err = 0;
-
- st_engine_heartbeat_disable(engine);
-
- for (p = phases; p->error[0] != GOOD; p++) {
- struct i915_request *client[ARRAY_SIZE(phases->error)];
- u32 *cs;
- int i;
-
- memset(client, 0, sizeof(*client));
- for (i = 0; i < ARRAY_SIZE(client); i++) {
- struct intel_context *ce;
- struct i915_request *rq;
-
- ce = intel_context_create(engine);
- if (IS_ERR(ce)) {
- err = PTR_ERR(ce);
- goto out;
- }
-
- rq = intel_context_create_request(ce);
- intel_context_put(ce);
- if (IS_ERR(rq)) {
- err = PTR_ERR(rq);
- goto out;
- }
-
- if (rq->engine->emit_init_breadcrumb) {
- err = rq->engine->emit_init_breadcrumb(rq);
- if (err) {
- i915_request_add(rq);
- goto out;
- }
- }
-
- cs = intel_ring_begin(rq, 2);
- if (IS_ERR(cs)) {
- i915_request_add(rq);
- err = PTR_ERR(cs);
- goto out;
- }
-
- if (p->error[i]) {
- *cs++ = 0xdeadbeef;
- *cs++ = 0xdeadbeef;
- } else {
- *cs++ = MI_NOOP;
- *cs++ = MI_NOOP;
- }
-
- client[i] = i915_request_get(rq);
- i915_request_add(rq);
- }
-
- err = wait_for_submit(engine, client[0], HZ / 2);
- if (err) {
- pr_err("%s: first request did not start within time!\n",
- engine->name);
- err = -ETIME;
- goto out;
- }
-
- for (i = 0; i < ARRAY_SIZE(client); i++) {
- if (i915_request_wait(client[i], 0, HZ / 5) < 0)
- pr_debug("%s: %s request incomplete!\n",
- engine->name,
- error_repr(p->error[i]));
-
- if (!i915_request_started(client[i])) {
- pr_err("%s: %s request not started!\n",
- engine->name,
- error_repr(p->error[i]));
- err = -ETIME;
- goto out;
- }
-
- /* Kick the tasklet to process the error */
- intel_engine_flush_submission(engine);
- if (client[i]->fence.error != p->error[i]) {
- pr_err("%s: %s request (%s) with wrong error code: %d\n",
- engine->name,
- error_repr(p->error[i]),
- i915_request_completed(client[i]) ? "completed" : "running",
- client[i]->fence.error);
- err = -EINVAL;
- goto out;
- }
- }
-
-out:
- for (i = 0; i < ARRAY_SIZE(client); i++)
- if (client[i])
- i915_request_put(client[i]);
- if (err) {
- pr_err("%s: failed at phase[%zd] { %d, %d }\n",
- engine->name, p - phases,
- p->error[0], p->error[1]);
- break;
- }
- }
-
- st_engine_heartbeat_enable(engine);
- if (err) {
- intel_gt_set_wedged(gt);
- return err;
- }
- }
-
- return 0;
-}
-
-static int
-emit_semaphore_chain(struct i915_request *rq, struct i915_vma *vma, int idx)
-{
- u32 *cs;
-
- cs = intel_ring_begin(rq, 10);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
-
- *cs++ = MI_SEMAPHORE_WAIT |
- MI_SEMAPHORE_GLOBAL_GTT |
- MI_SEMAPHORE_POLL |
- MI_SEMAPHORE_SAD_NEQ_SDD;
- *cs++ = 0;
- *cs++ = i915_ggtt_offset(vma) + 4 * idx;
- *cs++ = 0;
-
- if (idx > 0) {
- *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
- *cs++ = i915_ggtt_offset(vma) + 4 * (idx - 1);
- *cs++ = 0;
- *cs++ = 1;
- } else {
- *cs++ = MI_NOOP;
- *cs++ = MI_NOOP;
- *cs++ = MI_NOOP;
- *cs++ = MI_NOOP;
- }
-
- *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
-
- intel_ring_advance(rq, cs);
- return 0;
-}
-
-static struct i915_request *
-semaphore_queue(struct intel_engine_cs *engine, struct i915_vma *vma, int idx)
-{
- struct intel_context *ce;
- struct i915_request *rq;
- int err;
-
- ce = intel_context_create(engine);
- if (IS_ERR(ce))
- return ERR_CAST(ce);
-
- rq = intel_context_create_request(ce);
- if (IS_ERR(rq))
- goto out_ce;
-
- err = 0;
- if (rq->engine->emit_init_breadcrumb)
- err = rq->engine->emit_init_breadcrumb(rq);
- if (err == 0)
- err = emit_semaphore_chain(rq, vma, idx);
- if (err == 0)
- i915_request_get(rq);
- i915_request_add(rq);
- if (err)
- rq = ERR_PTR(err);
-
-out_ce:
- intel_context_put(ce);
- return rq;
-}
-
-static int
-release_queue(struct intel_engine_cs *engine,
- struct i915_vma *vma,
- int idx, int prio)
-{
- struct i915_sched_attr attr = {
- .priority = prio,
- };
- struct i915_request *rq;
- u32 *cs;
-
- rq = intel_engine_create_kernel_request(engine);
- if (IS_ERR(rq))
- return PTR_ERR(rq);
-
- cs = intel_ring_begin(rq, 4);
- if (IS_ERR(cs)) {
- i915_request_add(rq);
- return PTR_ERR(cs);
- }
-
- *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
- *cs++ = i915_ggtt_offset(vma) + 4 * (idx - 1);
- *cs++ = 0;
- *cs++ = 1;
-
- intel_ring_advance(rq, cs);
-
- i915_request_get(rq);
- i915_request_add(rq);
-
- local_bh_disable();
- engine->schedule(rq, &attr);
- local_bh_enable(); /* kick tasklet */
-
- i915_request_put(rq);
-
- return 0;
-}
-
-static int
-slice_semaphore_queue(struct intel_engine_cs *outer,
- struct i915_vma *vma,
- int count)
-{
- struct intel_engine_cs *engine;
- struct i915_request *head;
- enum intel_engine_id id;
- int err, i, n = 0;
-
- head = semaphore_queue(outer, vma, n++);
- if (IS_ERR(head))
- return PTR_ERR(head);
-
- for_each_engine(engine, outer->gt, id) {
- for (i = 0; i < count; i++) {
- struct i915_request *rq;
-
- rq = semaphore_queue(engine, vma, n++);
- if (IS_ERR(rq)) {
- err = PTR_ERR(rq);
- goto out;
- }
-
- i915_request_put(rq);
- }
- }
-
- err = release_queue(outer, vma, n, I915_PRIORITY_BARRIER);
- if (err)
- goto out;
-
- if (i915_request_wait(head, 0,
- 2 * outer->gt->info.num_engines * (count + 2) * (count + 3)) < 0) {
- pr_err("Failed to slice along semaphore chain of length (%d, %d)!\n",
- count, n);
- GEM_TRACE_DUMP();
- intel_gt_set_wedged(outer->gt);
- err = -EIO;
- }
-
-out:
- i915_request_put(head);
- return err;
-}
-
-static int live_timeslice_preempt(void *arg)
-{
- struct intel_gt *gt = arg;
- struct drm_i915_gem_object *obj;
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- struct i915_vma *vma;
- void *vaddr;
- int err = 0;
-
- /*
- * If a request takes too long, we would like to give other users
- * a fair go on the GPU. In particular, users may create batches
- * that wait upon external input, where that input may even be
- * supplied by another GPU job. To avoid blocking forever, we
- * need to preempt the current task and replace it with another
- * ready task.
- */
- if (!IS_ACTIVE(CONFIG_DRM_I915_TIMESLICE_DURATION))
- return 0;
-
- obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
- if (IS_ERR(obj))
- return PTR_ERR(obj);
-
- vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL);
- if (IS_ERR(vma)) {
- err = PTR_ERR(vma);
- goto err_obj;
- }
-
- vaddr = i915_gem_object_pin_map(obj, I915_MAP_WC);
- if (IS_ERR(vaddr)) {
- err = PTR_ERR(vaddr);
- goto err_obj;
- }
-
- err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
- if (err)
- goto err_map;
-
- err = i915_vma_sync(vma);
- if (err)
- goto err_pin;
-
- for_each_engine(engine, gt, id) {
- if (!intel_engine_has_preemption(engine))
- continue;
-
- memset(vaddr, 0, PAGE_SIZE);
-
- st_engine_heartbeat_disable(engine);
- err = slice_semaphore_queue(engine, vma, 5);
- st_engine_heartbeat_enable(engine);
- if (err)
- goto err_pin;
-
- if (igt_flush_test(gt->i915)) {
- err = -EIO;
- goto err_pin;
- }
- }
-
-err_pin:
- i915_vma_unpin(vma);
-err_map:
- i915_gem_object_unpin_map(obj);
-err_obj:
- i915_gem_object_put(obj);
- return err;
-}
-
-static struct i915_request *
-create_rewinder(struct intel_context *ce,
- struct i915_request *wait,
- void *slot, int idx)
-{
- const u32 offset =
- i915_ggtt_offset(ce->engine->status_page.vma) +
- offset_in_page(slot);
- struct i915_request *rq;
- u32 *cs;
- int err;
-
- rq = intel_context_create_request(ce);
- if (IS_ERR(rq))
- return rq;
-
- if (wait) {
- err = i915_request_await_dma_fence(rq, &wait->fence);
- if (err)
- goto err;
- }
-
- cs = intel_ring_begin(rq, 14);
- if (IS_ERR(cs)) {
- err = PTR_ERR(cs);
- goto err;
- }
-
- *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
- *cs++ = MI_NOOP;
-
- *cs++ = MI_SEMAPHORE_WAIT |
- MI_SEMAPHORE_GLOBAL_GTT |
- MI_SEMAPHORE_POLL |
- MI_SEMAPHORE_SAD_GTE_SDD;
- *cs++ = idx;
- *cs++ = offset;
- *cs++ = 0;
-
- *cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT;
- *cs++ = i915_mmio_reg_offset(RING_TIMESTAMP(rq->engine->mmio_base));
- *cs++ = offset + idx * sizeof(u32);
- *cs++ = 0;
-
- *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
- *cs++ = offset;
- *cs++ = 0;
- *cs++ = idx + 1;
-
- intel_ring_advance(rq, cs);
-
- rq->sched.attr.priority = I915_PRIORITY_MASK;
- err = 0;
-err:
- i915_request_get(rq);
- i915_request_add(rq);
- if (err) {
- i915_request_put(rq);
- return ERR_PTR(err);
- }
-
- return rq;
-}
-
-static int live_timeslice_rewind(void *arg)
-{
- struct intel_gt *gt = arg;
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
-
- /*
- * The usual presumption on timeslice expiration is that we replace
- * the active context with another. However, given a chain of
- * dependencies we may end up with replacing the context with itself,
- * but only a few of those requests, forcing us to rewind the
- * RING_TAIL of the original request.
- */
- if (!IS_ACTIVE(CONFIG_DRM_I915_TIMESLICE_DURATION))
- return 0;
-
- for_each_engine(engine, gt, id) {
- enum { A1, A2, B1 };
- enum { X = 1, Z, Y };
- struct i915_request *rq[3] = {};
- struct intel_context *ce;
- unsigned long timeslice;
- int i, err = 0;
- u32 *slot;
-
- if (!intel_engine_has_timeslices(engine))
- continue;
-
- /*
- * A:rq1 -- semaphore wait, timestamp X
- * A:rq2 -- write timestamp Y
- *
- * B:rq1 [await A:rq1] -- write timestamp Z
- *
- * Force timeslice, release semaphore.
- *
- * Expect execution/evaluation order XZY
- */
-
- st_engine_heartbeat_disable(engine);
- timeslice = xchg(&engine->props.timeslice_duration_ms, 1);
-
- slot = memset32(engine->status_page.addr + 1000, 0, 4);
-
- ce = intel_context_create(engine);
- if (IS_ERR(ce)) {
- err = PTR_ERR(ce);
- goto err;
- }
-
- rq[A1] = create_rewinder(ce, NULL, slot, X);
- if (IS_ERR(rq[A1])) {
- intel_context_put(ce);
- goto err;
- }
-
- rq[A2] = create_rewinder(ce, NULL, slot, Y);
- intel_context_put(ce);
- if (IS_ERR(rq[A2]))
- goto err;
-
- err = wait_for_submit(engine, rq[A2], HZ / 2);
- if (err) {
- pr_err("%s: failed to submit first context\n",
- engine->name);
- goto err;
- }
-
- ce = intel_context_create(engine);
- if (IS_ERR(ce)) {
- err = PTR_ERR(ce);
- goto err;
- }
-
- rq[B1] = create_rewinder(ce, rq[A1], slot, Z);
- intel_context_put(ce);
- if (IS_ERR(rq[2]))
- goto err;
-
- err = wait_for_submit(engine, rq[B1], HZ / 2);
- if (err) {
- pr_err("%s: failed to submit second context\n",
- engine->name);
- goto err;
- }
-
- /* ELSP[] = { { A:rq1, A:rq2 }, { B:rq1 } } */
- ENGINE_TRACE(engine, "forcing tasklet for rewind\n");
- if (i915_request_is_active(rq[A2])) { /* semaphore yielded! */
- /* Wait for the timeslice to kick in */
- del_timer(&engine->execlists.timer);
- tasklet_hi_schedule(&engine->execlists.tasklet);
- intel_engine_flush_submission(engine);
- }
- /* -> ELSP[] = { { A:rq1 }, { B:rq1 } } */
- GEM_BUG_ON(!i915_request_is_active(rq[A1]));
- GEM_BUG_ON(!i915_request_is_active(rq[B1]));
- GEM_BUG_ON(i915_request_is_active(rq[A2]));
-
- /* Release the hounds! */
- slot[0] = 1;
- wmb(); /* "pairs" with GPU; paranoid kick of internal CPU$ */
-
- for (i = 1; i <= 3; i++) {
- unsigned long timeout = jiffies + HZ / 2;
-
- while (!READ_ONCE(slot[i]) &&
- time_before(jiffies, timeout))
- ;
-
- if (!time_before(jiffies, timeout)) {
- pr_err("%s: rq[%d] timed out\n",
- engine->name, i - 1);
- err = -ETIME;
- goto err;
- }
-
- pr_debug("%s: slot[%d]:%x\n", engine->name, i, slot[i]);
- }
-
- /* XZY: XZ < XY */
- if (slot[Z] - slot[X] >= slot[Y] - slot[X]) {
- pr_err("%s: timeslicing did not run context B [%u] before A [%u]!\n",
- engine->name,
- slot[Z] - slot[X],
- slot[Y] - slot[X]);
- err = -EINVAL;
- }
-
-err:
- memset32(&slot[0], -1, 4);
- wmb();
-
- engine->props.timeslice_duration_ms = timeslice;
- st_engine_heartbeat_enable(engine);
- for (i = 0; i < 3; i++)
- i915_request_put(rq[i]);
- if (igt_flush_test(gt->i915))
- err = -EIO;
- if (err)
- return err;
- }
-
- return 0;
-}
-
-static struct i915_request *nop_request(struct intel_engine_cs *engine)
-{
- struct i915_request *rq;
-
- rq = intel_engine_create_kernel_request(engine);
- if (IS_ERR(rq))
- return rq;
-
- i915_request_get(rq);
- i915_request_add(rq);
-
- return rq;
-}
-
-static long slice_timeout(struct intel_engine_cs *engine)
-{
- long timeout;
-
- /* Enough time for a timeslice to kick in, and kick out */
- timeout = 2 * msecs_to_jiffies_timeout(timeslice(engine));
-
- /* Enough time for the nop request to complete */
- timeout += HZ / 5;
-
- return timeout + 1;
-}
-
-static int live_timeslice_queue(void *arg)
-{
- struct intel_gt *gt = arg;
- struct drm_i915_gem_object *obj;
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- struct i915_vma *vma;
- void *vaddr;
- int err = 0;
-
- /*
- * Make sure that even if ELSP[0] and ELSP[1] are filled with
- * timeslicing between them disabled, we *do* enable timeslicing
- * if the queue demands it. (Normally, we do not submit if
- * ELSP[1] is already occupied, so must rely on timeslicing to
- * eject ELSP[0] in favour of the queue.)
- */
- if (!IS_ACTIVE(CONFIG_DRM_I915_TIMESLICE_DURATION))
- return 0;
-
- obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
- if (IS_ERR(obj))
- return PTR_ERR(obj);
-
- vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL);
- if (IS_ERR(vma)) {
- err = PTR_ERR(vma);
- goto err_obj;
- }
-
- vaddr = i915_gem_object_pin_map(obj, I915_MAP_WC);
- if (IS_ERR(vaddr)) {
- err = PTR_ERR(vaddr);
- goto err_obj;
- }
-
- err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
- if (err)
- goto err_map;
-
- err = i915_vma_sync(vma);
- if (err)
- goto err_pin;
-
- for_each_engine(engine, gt, id) {
- struct i915_sched_attr attr = {
- .priority = I915_USER_PRIORITY(I915_PRIORITY_MAX),
- };
- struct i915_request *rq, *nop;
-
- if (!intel_engine_has_preemption(engine))
- continue;
-
- st_engine_heartbeat_disable(engine);
- memset(vaddr, 0, PAGE_SIZE);
-
- /* ELSP[0]: semaphore wait */
- rq = semaphore_queue(engine, vma, 0);
- if (IS_ERR(rq)) {
- err = PTR_ERR(rq);
- goto err_heartbeat;
- }
- engine->schedule(rq, &attr);
- err = wait_for_submit(engine, rq, HZ / 2);
- if (err) {
- pr_err("%s: Timed out trying to submit semaphores\n",
- engine->name);
- goto err_rq;
- }
-
- /* ELSP[1]: nop request */
- nop = nop_request(engine);
- if (IS_ERR(nop)) {
- err = PTR_ERR(nop);
- goto err_rq;
- }
- err = wait_for_submit(engine, nop, HZ / 2);
- i915_request_put(nop);
- if (err) {
- pr_err("%s: Timed out trying to submit nop\n",
- engine->name);
- goto err_rq;
- }
-
- GEM_BUG_ON(i915_request_completed(rq));
- GEM_BUG_ON(execlists_active(&engine->execlists) != rq);
-
- /* Queue: semaphore signal, matching priority as semaphore */
- err = release_queue(engine, vma, 1, effective_prio(rq));
- if (err)
- goto err_rq;
-
- /* Wait until we ack the release_queue and start timeslicing */
- do {
- cond_resched();
- intel_engine_flush_submission(engine);
- } while (READ_ONCE(engine->execlists.pending[0]));
-
- /* Timeslice every jiffy, so within 2 we should signal */
- if (i915_request_wait(rq, 0, slice_timeout(engine)) < 0) {
- struct drm_printer p =
- drm_info_printer(gt->i915->drm.dev);
-
- pr_err("%s: Failed to timeslice into queue\n",
- engine->name);
- intel_engine_dump(engine, &p,
- "%s\n", engine->name);
-
- memset(vaddr, 0xff, PAGE_SIZE);
- err = -EIO;
- }
-err_rq:
- i915_request_put(rq);
-err_heartbeat:
- st_engine_heartbeat_enable(engine);
- if (err)
- break;
- }
-
-err_pin:
- i915_vma_unpin(vma);
-err_map:
- i915_gem_object_unpin_map(obj);
-err_obj:
- i915_gem_object_put(obj);
- return err;
-}
-
-static int live_timeslice_nopreempt(void *arg)
-{
- struct intel_gt *gt = arg;
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- struct igt_spinner spin;
- int err = 0;
-
- /*
- * We should not timeslice into a request that is marked with
- * I915_REQUEST_NOPREEMPT.
- */
- if (!IS_ACTIVE(CONFIG_DRM_I915_TIMESLICE_DURATION))
- return 0;
-
- if (igt_spinner_init(&spin, gt))
- return -ENOMEM;
-
- for_each_engine(engine, gt, id) {
- struct intel_context *ce;
- struct i915_request *rq;
- unsigned long timeslice;
-
- if (!intel_engine_has_preemption(engine))
- continue;
-
- ce = intel_context_create(engine);
- if (IS_ERR(ce)) {
- err = PTR_ERR(ce);
- break;
- }
-
- st_engine_heartbeat_disable(engine);
- timeslice = xchg(&engine->props.timeslice_duration_ms, 1);
-
- /* Create an unpreemptible spinner */
-
- rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK);
- intel_context_put(ce);
- if (IS_ERR(rq)) {
- err = PTR_ERR(rq);
- goto out_heartbeat;
- }
-
- i915_request_get(rq);
- i915_request_add(rq);
-
- if (!igt_wait_for_spinner(&spin, rq)) {
- i915_request_put(rq);
- err = -ETIME;
- goto out_spin;
- }
-
- set_bit(I915_FENCE_FLAG_NOPREEMPT, &rq->fence.flags);
- i915_request_put(rq);
-
- /* Followed by a maximum priority barrier (heartbeat) */
-
- ce = intel_context_create(engine);
- if (IS_ERR(ce)) {
- err = PTR_ERR(ce);
- goto out_spin;
- }
-
- rq = intel_context_create_request(ce);
- intel_context_put(ce);
- if (IS_ERR(rq)) {
- err = PTR_ERR(rq);
- goto out_spin;
- }
-
- rq->sched.attr.priority = I915_PRIORITY_BARRIER;
- i915_request_get(rq);
- i915_request_add(rq);
-
- /*
- * Wait until the barrier is in ELSP, and we know timeslicing
- * will have been activated.
- */
- if (wait_for_submit(engine, rq, HZ / 2)) {
- i915_request_put(rq);
- err = -ETIME;
- goto out_spin;
- }
-
- /*
- * Since the ELSP[0] request is unpreemptible, it should not
- * allow the maximum priority barrier through. Wait long
- * enough to see if it is timesliced in by mistake.
- */
- if (i915_request_wait(rq, 0, slice_timeout(engine)) >= 0) {
- pr_err("%s: I915_PRIORITY_BARRIER request completed, bypassing no-preempt request\n",
- engine->name);
- err = -EINVAL;
- }
- i915_request_put(rq);
-
-out_spin:
- igt_spinner_end(&spin);
-out_heartbeat:
- xchg(&engine->props.timeslice_duration_ms, timeslice);
- st_engine_heartbeat_enable(engine);
- if (err)
- break;
-
- if (igt_flush_test(gt->i915)) {
- err = -EIO;
- break;
- }
- }
-
- igt_spinner_fini(&spin);
- return err;
-}
-
-static int live_busywait_preempt(void *arg)
-{
- struct intel_gt *gt = arg;
- struct i915_gem_context *ctx_hi, *ctx_lo;
- struct intel_engine_cs *engine;
- struct drm_i915_gem_object *obj;
- struct i915_vma *vma;
- enum intel_engine_id id;
- int err = -ENOMEM;
- u32 *map;
-
- /*
- * Verify that even without HAS_LOGICAL_RING_PREEMPTION, we can
- * preempt the busywaits used to synchronise between rings.
- */
-
- ctx_hi = kernel_context(gt->i915);
- if (!ctx_hi)
- return -ENOMEM;
- ctx_hi->sched.priority =
- I915_USER_PRIORITY(I915_CONTEXT_MAX_USER_PRIORITY);
-
- ctx_lo = kernel_context(gt->i915);
- if (!ctx_lo)
- goto err_ctx_hi;
- ctx_lo->sched.priority =
- I915_USER_PRIORITY(I915_CONTEXT_MIN_USER_PRIORITY);
-
- obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
- if (IS_ERR(obj)) {
- err = PTR_ERR(obj);
- goto err_ctx_lo;
- }
-
- map = i915_gem_object_pin_map(obj, I915_MAP_WC);
- if (IS_ERR(map)) {
- err = PTR_ERR(map);
- goto err_obj;
- }
-
- vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL);
- if (IS_ERR(vma)) {
- err = PTR_ERR(vma);
- goto err_map;
- }
-
- err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
- if (err)
- goto err_map;
-
- err = i915_vma_sync(vma);
- if (err)
- goto err_vma;
-
- for_each_engine(engine, gt, id) {
- struct i915_request *lo, *hi;
- struct igt_live_test t;
- u32 *cs;
-
- if (!intel_engine_has_preemption(engine))
- continue;
-
- if (!intel_engine_can_store_dword(engine))
- continue;
-
- if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
- err = -EIO;
- goto err_vma;
- }
-
- /*
- * We create two requests. The low priority request
- * busywaits on a semaphore (inside the ringbuffer where
- * is should be preemptible) and the high priority requests
- * uses a MI_STORE_DWORD_IMM to update the semaphore value
- * allowing the first request to complete. If preemption
- * fails, we hang instead.
- */
-
- lo = igt_request_alloc(ctx_lo, engine);
- if (IS_ERR(lo)) {
- err = PTR_ERR(lo);
- goto err_vma;
- }
-
- cs = intel_ring_begin(lo, 8);
- if (IS_ERR(cs)) {
- err = PTR_ERR(cs);
- i915_request_add(lo);
- goto err_vma;
- }
-
- *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
- *cs++ = i915_ggtt_offset(vma);
- *cs++ = 0;
- *cs++ = 1;
-
- /* XXX Do we need a flush + invalidate here? */
-
- *cs++ = MI_SEMAPHORE_WAIT |
- MI_SEMAPHORE_GLOBAL_GTT |
- MI_SEMAPHORE_POLL |
- MI_SEMAPHORE_SAD_EQ_SDD;
- *cs++ = 0;
- *cs++ = i915_ggtt_offset(vma);
- *cs++ = 0;
-
- intel_ring_advance(lo, cs);
-
- i915_request_get(lo);
- i915_request_add(lo);
-
- if (wait_for(READ_ONCE(*map), 10)) {
- i915_request_put(lo);
- err = -ETIMEDOUT;
- goto err_vma;
- }
-
- /* Low priority request should be busywaiting now */
- if (i915_request_wait(lo, 0, 1) != -ETIME) {
- i915_request_put(lo);
- pr_err("%s: Busywaiting request did not!\n",
- engine->name);
- err = -EIO;
- goto err_vma;
- }
-
- hi = igt_request_alloc(ctx_hi, engine);
- if (IS_ERR(hi)) {
- err = PTR_ERR(hi);
- i915_request_put(lo);
- goto err_vma;
- }
-
- cs = intel_ring_begin(hi, 4);
- if (IS_ERR(cs)) {
- err = PTR_ERR(cs);
- i915_request_add(hi);
- i915_request_put(lo);
- goto err_vma;
- }
-
- *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
- *cs++ = i915_ggtt_offset(vma);
- *cs++ = 0;
- *cs++ = 0;
-
- intel_ring_advance(hi, cs);
- i915_request_add(hi);
-
- if (i915_request_wait(lo, 0, HZ / 5) < 0) {
- struct drm_printer p = drm_info_printer(gt->i915->drm.dev);
-
- pr_err("%s: Failed to preempt semaphore busywait!\n",
- engine->name);
-
- intel_engine_dump(engine, &p, "%s\n", engine->name);
- GEM_TRACE_DUMP();
-
- i915_request_put(lo);
- intel_gt_set_wedged(gt);
- err = -EIO;
- goto err_vma;
- }
- GEM_BUG_ON(READ_ONCE(*map));
- i915_request_put(lo);
-
- if (igt_live_test_end(&t)) {
- err = -EIO;
- goto err_vma;
- }
- }
-
- err = 0;
-err_vma:
- i915_vma_unpin(vma);
-err_map:
- i915_gem_object_unpin_map(obj);
-err_obj:
- i915_gem_object_put(obj);
-err_ctx_lo:
- kernel_context_close(ctx_lo);
-err_ctx_hi:
- kernel_context_close(ctx_hi);
- return err;
-}
-
-static struct i915_request *
-spinner_create_request(struct igt_spinner *spin,
- struct i915_gem_context *ctx,
- struct intel_engine_cs *engine,
- u32 arb)
-{
- struct intel_context *ce;
- struct i915_request *rq;
-
- ce = i915_gem_context_get_engine(ctx, engine->legacy_idx);
- if (IS_ERR(ce))
- return ERR_CAST(ce);
-
- rq = igt_spinner_create_request(spin, ce, arb);
- intel_context_put(ce);
- return rq;
-}
-
-static int live_preempt(void *arg)
-{
- struct intel_gt *gt = arg;
- struct i915_gem_context *ctx_hi, *ctx_lo;
- struct igt_spinner spin_hi, spin_lo;
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- int err = -ENOMEM;
-
- if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915))
- return 0;
-
- if (!(gt->i915->caps.scheduler & I915_SCHEDULER_CAP_PREEMPTION))
- pr_err("Logical preemption supported, but not exposed\n");
-
- if (igt_spinner_init(&spin_hi, gt))
- return -ENOMEM;
-
- if (igt_spinner_init(&spin_lo, gt))
- goto err_spin_hi;
-
- ctx_hi = kernel_context(gt->i915);
- if (!ctx_hi)
- goto err_spin_lo;
- ctx_hi->sched.priority =
- I915_USER_PRIORITY(I915_CONTEXT_MAX_USER_PRIORITY);
-
- ctx_lo = kernel_context(gt->i915);
- if (!ctx_lo)
- goto err_ctx_hi;
- ctx_lo->sched.priority =
- I915_USER_PRIORITY(I915_CONTEXT_MIN_USER_PRIORITY);
-
- for_each_engine(engine, gt, id) {
- struct igt_live_test t;
- struct i915_request *rq;
-
- if (!intel_engine_has_preemption(engine))
- continue;
-
- if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
- err = -EIO;
- goto err_ctx_lo;
- }
-
- rq = spinner_create_request(&spin_lo, ctx_lo, engine,
- MI_ARB_CHECK);
- if (IS_ERR(rq)) {
- err = PTR_ERR(rq);
- goto err_ctx_lo;
- }
-
- i915_request_add(rq);
- if (!igt_wait_for_spinner(&spin_lo, rq)) {
- GEM_TRACE("lo spinner failed to start\n");
- GEM_TRACE_DUMP();
- intel_gt_set_wedged(gt);
- err = -EIO;
- goto err_ctx_lo;
- }
-
- rq = spinner_create_request(&spin_hi, ctx_hi, engine,
- MI_ARB_CHECK);
- if (IS_ERR(rq)) {
- igt_spinner_end(&spin_lo);
- err = PTR_ERR(rq);
- goto err_ctx_lo;
- }
-
- i915_request_add(rq);
- if (!igt_wait_for_spinner(&spin_hi, rq)) {
- GEM_TRACE("hi spinner failed to start\n");
- GEM_TRACE_DUMP();
- intel_gt_set_wedged(gt);
- err = -EIO;
- goto err_ctx_lo;
- }
-
- igt_spinner_end(&spin_hi);
- igt_spinner_end(&spin_lo);
-
- if (igt_live_test_end(&t)) {
- err = -EIO;
- goto err_ctx_lo;
- }
- }
-
- err = 0;
-err_ctx_lo:
- kernel_context_close(ctx_lo);
-err_ctx_hi:
- kernel_context_close(ctx_hi);
-err_spin_lo:
- igt_spinner_fini(&spin_lo);
-err_spin_hi:
- igt_spinner_fini(&spin_hi);
- return err;
-}
-
-static int live_late_preempt(void *arg)
-{
- struct intel_gt *gt = arg;
- struct i915_gem_context *ctx_hi, *ctx_lo;
- struct igt_spinner spin_hi, spin_lo;
- struct intel_engine_cs *engine;
- struct i915_sched_attr attr = {};
- enum intel_engine_id id;
- int err = -ENOMEM;
-
- if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915))
- return 0;
-
- if (igt_spinner_init(&spin_hi, gt))
- return -ENOMEM;
-
- if (igt_spinner_init(&spin_lo, gt))
- goto err_spin_hi;
-
- ctx_hi = kernel_context(gt->i915);
- if (!ctx_hi)
- goto err_spin_lo;
-
- ctx_lo = kernel_context(gt->i915);
- if (!ctx_lo)
- goto err_ctx_hi;
-
- /* Make sure ctx_lo stays before ctx_hi until we trigger preemption. */
- ctx_lo->sched.priority = I915_USER_PRIORITY(1);
-
- for_each_engine(engine, gt, id) {
- struct igt_live_test t;
- struct i915_request *rq;
-
- if (!intel_engine_has_preemption(engine))
- continue;
-
- if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
- err = -EIO;
- goto err_ctx_lo;
- }
-
- rq = spinner_create_request(&spin_lo, ctx_lo, engine,
- MI_ARB_CHECK);
- if (IS_ERR(rq)) {
- err = PTR_ERR(rq);
- goto err_ctx_lo;
- }
-
- i915_request_add(rq);
- if (!igt_wait_for_spinner(&spin_lo, rq)) {
- pr_err("First context failed to start\n");
- goto err_wedged;
- }
-
- rq = spinner_create_request(&spin_hi, ctx_hi, engine,
- MI_NOOP);
- if (IS_ERR(rq)) {
- igt_spinner_end(&spin_lo);
- err = PTR_ERR(rq);
- goto err_ctx_lo;
- }
-
- i915_request_add(rq);
- if (igt_wait_for_spinner(&spin_hi, rq)) {
- pr_err("Second context overtook first?\n");
- goto err_wedged;
- }
-
- attr.priority = I915_USER_PRIORITY(I915_PRIORITY_MAX);
- engine->schedule(rq, &attr);
-
- if (!igt_wait_for_spinner(&spin_hi, rq)) {
- pr_err("High priority context failed to preempt the low priority context\n");
- GEM_TRACE_DUMP();
- goto err_wedged;
- }
-
- igt_spinner_end(&spin_hi);
- igt_spinner_end(&spin_lo);
-
- if (igt_live_test_end(&t)) {
- err = -EIO;
- goto err_ctx_lo;
- }
- }
-
- err = 0;
-err_ctx_lo:
- kernel_context_close(ctx_lo);
-err_ctx_hi:
- kernel_context_close(ctx_hi);
-err_spin_lo:
- igt_spinner_fini(&spin_lo);
-err_spin_hi:
- igt_spinner_fini(&spin_hi);
- return err;
-
-err_wedged:
- igt_spinner_end(&spin_hi);
- igt_spinner_end(&spin_lo);
- intel_gt_set_wedged(gt);
- err = -EIO;
- goto err_ctx_lo;
-}
-
-struct preempt_client {
- struct igt_spinner spin;
- struct i915_gem_context *ctx;
-};
-
-static int preempt_client_init(struct intel_gt *gt, struct preempt_client *c)
-{
- c->ctx = kernel_context(gt->i915);
- if (!c->ctx)
- return -ENOMEM;
-
- if (igt_spinner_init(&c->spin, gt))
- goto err_ctx;
-
- return 0;
-
-err_ctx:
- kernel_context_close(c->ctx);
- return -ENOMEM;
-}
-
-static void preempt_client_fini(struct preempt_client *c)
-{
- igt_spinner_fini(&c->spin);
- kernel_context_close(c->ctx);
-}
-
-static int live_nopreempt(void *arg)
-{
- struct intel_gt *gt = arg;
- struct intel_engine_cs *engine;
- struct preempt_client a, b;
- enum intel_engine_id id;
- int err = -ENOMEM;
-
- /*
- * Verify that we can disable preemption for an individual request
- * that may be being observed and not want to be interrupted.
- */
-
- if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915))
- return 0;
-
- if (preempt_client_init(gt, &a))
- return -ENOMEM;
- if (preempt_client_init(gt, &b))
- goto err_client_a;
- b.ctx->sched.priority = I915_USER_PRIORITY(I915_PRIORITY_MAX);
-
- for_each_engine(engine, gt, id) {
- struct i915_request *rq_a, *rq_b;
-
- if (!intel_engine_has_preemption(engine))
- continue;
-
- engine->execlists.preempt_hang.count = 0;
-
- rq_a = spinner_create_request(&a.spin,
- a.ctx, engine,
- MI_ARB_CHECK);
- if (IS_ERR(rq_a)) {
- err = PTR_ERR(rq_a);
- goto err_client_b;
- }
-
- /* Low priority client, but unpreemptable! */
- __set_bit(I915_FENCE_FLAG_NOPREEMPT, &rq_a->fence.flags);
-
- i915_request_add(rq_a);
- if (!igt_wait_for_spinner(&a.spin, rq_a)) {
- pr_err("First client failed to start\n");
- goto err_wedged;
- }
-
- rq_b = spinner_create_request(&b.spin,
- b.ctx, engine,
- MI_ARB_CHECK);
- if (IS_ERR(rq_b)) {
- err = PTR_ERR(rq_b);
- goto err_client_b;
- }
-
- i915_request_add(rq_b);
-
- /* B is much more important than A! (But A is unpreemptable.) */
- GEM_BUG_ON(rq_prio(rq_b) <= rq_prio(rq_a));
-
- /* Wait long enough for preemption and timeslicing */
- if (igt_wait_for_spinner(&b.spin, rq_b)) {
- pr_err("Second client started too early!\n");
- goto err_wedged;
- }
-
- igt_spinner_end(&a.spin);
-
- if (!igt_wait_for_spinner(&b.spin, rq_b)) {
- pr_err("Second client failed to start\n");
- goto err_wedged;
- }
-
- igt_spinner_end(&b.spin);
-
- if (engine->execlists.preempt_hang.count) {
- pr_err("Preemption recorded x%d; should have been suppressed!\n",
- engine->execlists.preempt_hang.count);
- err = -EINVAL;
- goto err_wedged;
- }
-
- if (igt_flush_test(gt->i915))
- goto err_wedged;
- }
-
- err = 0;
-err_client_b:
- preempt_client_fini(&b);
-err_client_a:
- preempt_client_fini(&a);
- return err;
-
-err_wedged:
- igt_spinner_end(&b.spin);
- igt_spinner_end(&a.spin);
- intel_gt_set_wedged(gt);
- err = -EIO;
- goto err_client_b;
-}
-
-struct live_preempt_cancel {
- struct intel_engine_cs *engine;
- struct preempt_client a, b;
-};
-
-static int __cancel_active0(struct live_preempt_cancel *arg)
-{
- struct i915_request *rq;
- struct igt_live_test t;
- int err;
-
- /* Preempt cancel of ELSP0 */
- GEM_TRACE("%s(%s)\n", __func__, arg->engine->name);
- if (igt_live_test_begin(&t, arg->engine->i915,
- __func__, arg->engine->name))
- return -EIO;
-
- rq = spinner_create_request(&arg->a.spin,
- arg->a.ctx, arg->engine,
- MI_ARB_CHECK);
- if (IS_ERR(rq))
- return PTR_ERR(rq);
-
- clear_bit(CONTEXT_BANNED, &rq->context->flags);
- i915_request_get(rq);
- i915_request_add(rq);
- if (!igt_wait_for_spinner(&arg->a.spin, rq)) {
- err = -EIO;
- goto out;
- }
-
- intel_context_set_banned(rq->context);
- err = intel_engine_pulse(arg->engine);
- if (err)
- goto out;
-
- err = wait_for_reset(arg->engine, rq, HZ / 2);
- if (err) {
- pr_err("Cancelled inflight0 request did not reset\n");
- goto out;
- }
-
-out:
- i915_request_put(rq);
- if (igt_live_test_end(&t))
- err = -EIO;
- return err;
-}
-
-static int __cancel_active1(struct live_preempt_cancel *arg)
-{
- struct i915_request *rq[2] = {};
- struct igt_live_test t;
- int err;
-
- /* Preempt cancel of ELSP1 */
- GEM_TRACE("%s(%s)\n", __func__, arg->engine->name);
- if (igt_live_test_begin(&t, arg->engine->i915,
- __func__, arg->engine->name))
- return -EIO;
-
- rq[0] = spinner_create_request(&arg->a.spin,
- arg->a.ctx, arg->engine,
- MI_NOOP); /* no preemption */
- if (IS_ERR(rq[0]))
- return PTR_ERR(rq[0]);
-
- clear_bit(CONTEXT_BANNED, &rq[0]->context->flags);
- i915_request_get(rq[0]);
- i915_request_add(rq[0]);
- if (!igt_wait_for_spinner(&arg->a.spin, rq[0])) {
- err = -EIO;
- goto out;
- }
-
- rq[1] = spinner_create_request(&arg->b.spin,
- arg->b.ctx, arg->engine,
- MI_ARB_CHECK);
- if (IS_ERR(rq[1])) {
- err = PTR_ERR(rq[1]);
- goto out;
- }
-
- clear_bit(CONTEXT_BANNED, &rq[1]->context->flags);
- i915_request_get(rq[1]);
- err = i915_request_await_dma_fence(rq[1], &rq[0]->fence);
- i915_request_add(rq[1]);
- if (err)
- goto out;
-
- intel_context_set_banned(rq[1]->context);
- err = intel_engine_pulse(arg->engine);
- if (err)
- goto out;
-
- igt_spinner_end(&arg->a.spin);
- err = wait_for_reset(arg->engine, rq[1], HZ / 2);
- if (err)
- goto out;
-
- if (rq[0]->fence.error != 0) {
- pr_err("Normal inflight0 request did not complete\n");
- err = -EINVAL;
- goto out;
- }
-
- if (rq[1]->fence.error != -EIO) {
- pr_err("Cancelled inflight1 request did not report -EIO\n");
- err = -EINVAL;
- goto out;
- }
-
-out:
- i915_request_put(rq[1]);
- i915_request_put(rq[0]);
- if (igt_live_test_end(&t))
- err = -EIO;
- return err;
-}
-
-static int __cancel_queued(struct live_preempt_cancel *arg)
-{
- struct i915_request *rq[3] = {};
- struct igt_live_test t;
- int err;
-
- /* Full ELSP and one in the wings */
- GEM_TRACE("%s(%s)\n", __func__, arg->engine->name);
- if (igt_live_test_begin(&t, arg->engine->i915,
- __func__, arg->engine->name))
- return -EIO;
-
- rq[0] = spinner_create_request(&arg->a.spin,
- arg->a.ctx, arg->engine,
- MI_ARB_CHECK);
- if (IS_ERR(rq[0]))
- return PTR_ERR(rq[0]);
-
- clear_bit(CONTEXT_BANNED, &rq[0]->context->flags);
- i915_request_get(rq[0]);
- i915_request_add(rq[0]);
- if (!igt_wait_for_spinner(&arg->a.spin, rq[0])) {
- err = -EIO;
- goto out;
- }
-
- rq[1] = igt_request_alloc(arg->b.ctx, arg->engine);
- if (IS_ERR(rq[1])) {
- err = PTR_ERR(rq[1]);
- goto out;
- }
-
- clear_bit(CONTEXT_BANNED, &rq[1]->context->flags);
- i915_request_get(rq[1]);
- err = i915_request_await_dma_fence(rq[1], &rq[0]->fence);
- i915_request_add(rq[1]);
- if (err)
- goto out;
-
- rq[2] = spinner_create_request(&arg->b.spin,
- arg->a.ctx, arg->engine,
- MI_ARB_CHECK);
- if (IS_ERR(rq[2])) {
- err = PTR_ERR(rq[2]);
- goto out;
- }
-
- i915_request_get(rq[2]);
- err = i915_request_await_dma_fence(rq[2], &rq[1]->fence);
- i915_request_add(rq[2]);
- if (err)
- goto out;
-
- intel_context_set_banned(rq[2]->context);
- err = intel_engine_pulse(arg->engine);
- if (err)
- goto out;
-
- err = wait_for_reset(arg->engine, rq[2], HZ / 2);
- if (err)
- goto out;
-
- if (rq[0]->fence.error != -EIO) {
- pr_err("Cancelled inflight0 request did not report -EIO\n");
- err = -EINVAL;
- goto out;
- }
-
- if (rq[1]->fence.error != 0) {
- pr_err("Normal inflight1 request did not complete\n");
- err = -EINVAL;
- goto out;
- }
-
- if (rq[2]->fence.error != -EIO) {
- pr_err("Cancelled queued request did not report -EIO\n");
- err = -EINVAL;
- goto out;
- }
-
-out:
- i915_request_put(rq[2]);
- i915_request_put(rq[1]);
- i915_request_put(rq[0]);
- if (igt_live_test_end(&t))
- err = -EIO;
- return err;
-}
-
-static int __cancel_hostile(struct live_preempt_cancel *arg)
-{
- struct i915_request *rq;
- int err;
-
- /* Preempt cancel non-preemptible spinner in ELSP0 */
- if (!IS_ACTIVE(CONFIG_DRM_I915_PREEMPT_TIMEOUT))
- return 0;
-
- if (!intel_has_reset_engine(arg->engine->gt))
- return 0;
-
- GEM_TRACE("%s(%s)\n", __func__, arg->engine->name);
- rq = spinner_create_request(&arg->a.spin,
- arg->a.ctx, arg->engine,
- MI_NOOP); /* preemption disabled */
- if (IS_ERR(rq))
- return PTR_ERR(rq);
-
- clear_bit(CONTEXT_BANNED, &rq->context->flags);
- i915_request_get(rq);
- i915_request_add(rq);
- if (!igt_wait_for_spinner(&arg->a.spin, rq)) {
- err = -EIO;
- goto out;
- }
-
- intel_context_set_banned(rq->context);
- err = intel_engine_pulse(arg->engine); /* force reset */
- if (err)
- goto out;
-
- err = wait_for_reset(arg->engine, rq, HZ / 2);
- if (err) {
- pr_err("Cancelled inflight0 request did not reset\n");
- goto out;
- }
-
-out:
- i915_request_put(rq);
- if (igt_flush_test(arg->engine->i915))
- err = -EIO;
- return err;
-}
-
-static int live_preempt_cancel(void *arg)
-{
- struct intel_gt *gt = arg;
- struct live_preempt_cancel data;
- enum intel_engine_id id;
- int err = -ENOMEM;
-
- /*
- * To cancel an inflight context, we need to first remove it from the
- * GPU. That sounds like preemption! Plus a little bit of bookkeeping.
- */
-
- if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915))
- return 0;
-
- if (preempt_client_init(gt, &data.a))
- return -ENOMEM;
- if (preempt_client_init(gt, &data.b))
- goto err_client_a;
-
- for_each_engine(data.engine, gt, id) {
- if (!intel_engine_has_preemption(data.engine))
- continue;
-
- err = __cancel_active0(&data);
- if (err)
- goto err_wedged;
-
- err = __cancel_active1(&data);
- if (err)
- goto err_wedged;
-
- err = __cancel_queued(&data);
- if (err)
- goto err_wedged;
-
- err = __cancel_hostile(&data);
- if (err)
- goto err_wedged;
- }
-
- err = 0;
-err_client_b:
- preempt_client_fini(&data.b);
-err_client_a:
- preempt_client_fini(&data.a);
- return err;
-
-err_wedged:
- GEM_TRACE_DUMP();
- igt_spinner_end(&data.b.spin);
- igt_spinner_end(&data.a.spin);
- intel_gt_set_wedged(gt);
- goto err_client_b;
-}
-
-static int live_suppress_self_preempt(void *arg)
-{
- struct intel_gt *gt = arg;
- struct intel_engine_cs *engine;
- struct i915_sched_attr attr = {
- .priority = I915_USER_PRIORITY(I915_PRIORITY_MAX)
- };
- struct preempt_client a, b;
- enum intel_engine_id id;
- int err = -ENOMEM;
-
- /*
- * Verify that if a preemption request does not cause a change in
- * the current execution order, the preempt-to-idle injection is
- * skipped and that we do not accidentally apply it after the CS
- * completion event.
- */
-
- if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915))
- return 0;
-
- if (intel_uc_uses_guc_submission(&gt->uc))
- return 0; /* presume black blox */
-
- if (intel_vgpu_active(gt->i915))
- return 0; /* GVT forces single port & request submission */
-
- if (preempt_client_init(gt, &a))
- return -ENOMEM;
- if (preempt_client_init(gt, &b))
- goto err_client_a;
-
- for_each_engine(engine, gt, id) {
- struct i915_request *rq_a, *rq_b;
- int depth;
-
- if (!intel_engine_has_preemption(engine))
- continue;
-
- if (igt_flush_test(gt->i915))
- goto err_wedged;
-
- st_engine_heartbeat_disable(engine);
- engine->execlists.preempt_hang.count = 0;
-
- rq_a = spinner_create_request(&a.spin,
- a.ctx, engine,
- MI_NOOP);
- if (IS_ERR(rq_a)) {
- err = PTR_ERR(rq_a);
- st_engine_heartbeat_enable(engine);
- goto err_client_b;
- }
-
- i915_request_add(rq_a);
- if (!igt_wait_for_spinner(&a.spin, rq_a)) {
- pr_err("First client failed to start\n");
- st_engine_heartbeat_enable(engine);
- goto err_wedged;
- }
-
- /* Keep postponing the timer to avoid premature slicing */
- mod_timer(&engine->execlists.timer, jiffies + HZ);
- for (depth = 0; depth < 8; depth++) {
- rq_b = spinner_create_request(&b.spin,
- b.ctx, engine,
- MI_NOOP);
- if (IS_ERR(rq_b)) {
- err = PTR_ERR(rq_b);
- st_engine_heartbeat_enable(engine);
- goto err_client_b;
- }
- i915_request_add(rq_b);
-
- GEM_BUG_ON(i915_request_completed(rq_a));
- engine->schedule(rq_a, &attr);
- igt_spinner_end(&a.spin);
-
- if (!igt_wait_for_spinner(&b.spin, rq_b)) {
- pr_err("Second client failed to start\n");
- st_engine_heartbeat_enable(engine);
- goto err_wedged;
- }
-
- swap(a, b);
- rq_a = rq_b;
- }
- igt_spinner_end(&a.spin);
-
- if (engine->execlists.preempt_hang.count) {
- pr_err("Preemption on %s recorded x%d, depth %d; should have been suppressed!\n",
- engine->name,
- engine->execlists.preempt_hang.count,
- depth);
- st_engine_heartbeat_enable(engine);
- err = -EINVAL;
- goto err_client_b;
- }
-
- st_engine_heartbeat_enable(engine);
- if (igt_flush_test(gt->i915))
- goto err_wedged;
- }
-
- err = 0;
-err_client_b:
- preempt_client_fini(&b);
-err_client_a:
- preempt_client_fini(&a);
- return err;
-
-err_wedged:
- igt_spinner_end(&b.spin);
- igt_spinner_end(&a.spin);
- intel_gt_set_wedged(gt);
- err = -EIO;
- goto err_client_b;
-}
-
-static int live_chain_preempt(void *arg)
-{
- struct intel_gt *gt = arg;
- struct intel_engine_cs *engine;
- struct preempt_client hi, lo;
- enum intel_engine_id id;
- int err = -ENOMEM;
-
- /*
- * Build a chain AB...BA between two contexts (A, B) and request
- * preemption of the last request. It should then complete before
- * the previously submitted spinner in B.
- */
-
- if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915))
- return 0;
-
- if (preempt_client_init(gt, &hi))
- return -ENOMEM;
-
- if (preempt_client_init(gt, &lo))
- goto err_client_hi;
-
- for_each_engine(engine, gt, id) {
- struct i915_sched_attr attr = {
- .priority = I915_USER_PRIORITY(I915_PRIORITY_MAX),
- };
- struct igt_live_test t;
- struct i915_request *rq;
- int ring_size, count, i;
-
- if (!intel_engine_has_preemption(engine))
- continue;
-
- rq = spinner_create_request(&lo.spin,
- lo.ctx, engine,
- MI_ARB_CHECK);
- if (IS_ERR(rq))
- goto err_wedged;
-
- i915_request_get(rq);
- i915_request_add(rq);
-
- ring_size = rq->wa_tail - rq->head;
- if (ring_size < 0)
- ring_size += rq->ring->size;
- ring_size = rq->ring->size / ring_size;
- pr_debug("%s(%s): Using maximum of %d requests\n",
- __func__, engine->name, ring_size);
-
- igt_spinner_end(&lo.spin);
- if (i915_request_wait(rq, 0, HZ / 2) < 0) {
- pr_err("Timed out waiting to flush %s\n", engine->name);
- i915_request_put(rq);
- goto err_wedged;
- }
- i915_request_put(rq);
-
- if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
- err = -EIO;
- goto err_wedged;
- }
-
- for_each_prime_number_from(count, 1, ring_size) {
- rq = spinner_create_request(&hi.spin,
- hi.ctx, engine,
- MI_ARB_CHECK);
- if (IS_ERR(rq))
- goto err_wedged;
- i915_request_add(rq);
- if (!igt_wait_for_spinner(&hi.spin, rq))
- goto err_wedged;
-
- rq = spinner_create_request(&lo.spin,
- lo.ctx, engine,
- MI_ARB_CHECK);
- if (IS_ERR(rq))
- goto err_wedged;
- i915_request_add(rq);
-
- for (i = 0; i < count; i++) {
- rq = igt_request_alloc(lo.ctx, engine);
- if (IS_ERR(rq))
- goto err_wedged;
- i915_request_add(rq);
- }
-
- rq = igt_request_alloc(hi.ctx, engine);
- if (IS_ERR(rq))
- goto err_wedged;
-
- i915_request_get(rq);
- i915_request_add(rq);
- engine->schedule(rq, &attr);
-
- igt_spinner_end(&hi.spin);
- if (i915_request_wait(rq, 0, HZ / 5) < 0) {
- struct drm_printer p =
- drm_info_printer(gt->i915->drm.dev);
-
- pr_err("Failed to preempt over chain of %d\n",
- count);
- intel_engine_dump(engine, &p,
- "%s\n", engine->name);
- i915_request_put(rq);
- goto err_wedged;
- }
- igt_spinner_end(&lo.spin);
- i915_request_put(rq);
-
- rq = igt_request_alloc(lo.ctx, engine);
- if (IS_ERR(rq))
- goto err_wedged;
-
- i915_request_get(rq);
- i915_request_add(rq);
-
- if (i915_request_wait(rq, 0, HZ / 5) < 0) {
- struct drm_printer p =
- drm_info_printer(gt->i915->drm.dev);
-
- pr_err("Failed to flush low priority chain of %d requests\n",
- count);
- intel_engine_dump(engine, &p,
- "%s\n", engine->name);
-
- i915_request_put(rq);
- goto err_wedged;
- }
- i915_request_put(rq);
- }
-
- if (igt_live_test_end(&t)) {
- err = -EIO;
- goto err_wedged;
- }
- }
-
- err = 0;
-err_client_lo:
- preempt_client_fini(&lo);
-err_client_hi:
- preempt_client_fini(&hi);
- return err;
-
-err_wedged:
- igt_spinner_end(&hi.spin);
- igt_spinner_end(&lo.spin);
- intel_gt_set_wedged(gt);
- err = -EIO;
- goto err_client_lo;
-}
-
-static int create_gang(struct intel_engine_cs *engine,
- struct i915_request **prev)
-{
- struct drm_i915_gem_object *obj;
- struct intel_context *ce;
- struct i915_request *rq;
- struct i915_vma *vma;
- u32 *cs;
- int err;
-
- ce = intel_context_create(engine);
- if (IS_ERR(ce))
- return PTR_ERR(ce);
-
- obj = i915_gem_object_create_internal(engine->i915, 4096);
- if (IS_ERR(obj)) {
- err = PTR_ERR(obj);
- goto err_ce;
- }
-
- vma = i915_vma_instance(obj, ce->vm, NULL);
- if (IS_ERR(vma)) {
- err = PTR_ERR(vma);
- goto err_obj;
- }
-
- err = i915_vma_pin(vma, 0, 0, PIN_USER);
- if (err)
- goto err_obj;
-
- cs = i915_gem_object_pin_map(obj, I915_MAP_WC);
- if (IS_ERR(cs))
- goto err_obj;
-
- /* Semaphore target: spin until zero */
- *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
-
- *cs++ = MI_SEMAPHORE_WAIT |
- MI_SEMAPHORE_POLL |
- MI_SEMAPHORE_SAD_EQ_SDD;
- *cs++ = 0;
- *cs++ = lower_32_bits(vma->node.start);
- *cs++ = upper_32_bits(vma->node.start);
-
- if (*prev) {
- u64 offset = (*prev)->batch->node.start;
-
- /* Terminate the spinner in the next lower priority batch. */
- *cs++ = MI_STORE_DWORD_IMM_GEN4;
- *cs++ = lower_32_bits(offset);
- *cs++ = upper_32_bits(offset);
- *cs++ = 0;
- }
-
- *cs++ = MI_BATCH_BUFFER_END;
- i915_gem_object_flush_map(obj);
- i915_gem_object_unpin_map(obj);
-
- rq = intel_context_create_request(ce);
- if (IS_ERR(rq))
- goto err_obj;
-
- rq->batch = i915_vma_get(vma);
- i915_request_get(rq);
-
- i915_vma_lock(vma);
- err = i915_request_await_object(rq, vma->obj, false);
- if (!err)
- err = i915_vma_move_to_active(vma, rq, 0);
- if (!err)
- err = rq->engine->emit_bb_start(rq,
- vma->node.start,
- PAGE_SIZE, 0);
- i915_vma_unlock(vma);
- i915_request_add(rq);
- if (err)
- goto err_rq;
-
- i915_gem_object_put(obj);
- intel_context_put(ce);
-
- rq->mock.link.next = &(*prev)->mock.link;
- *prev = rq;
- return 0;
-
-err_rq:
- i915_vma_put(rq->batch);
- i915_request_put(rq);
-err_obj:
- i915_gem_object_put(obj);
-err_ce:
- intel_context_put(ce);
- return err;
-}
-
-static int __live_preempt_ring(struct intel_engine_cs *engine,
- struct igt_spinner *spin,
- int queue_sz, int ring_sz)
-{
- struct intel_context *ce[2] = {};
- struct i915_request *rq;
- struct igt_live_test t;
- int err = 0;
- int n;
-
- if (igt_live_test_begin(&t, engine->i915, __func__, engine->name))
- return -EIO;
-
- for (n = 0; n < ARRAY_SIZE(ce); n++) {
- struct intel_context *tmp;
-
- tmp = intel_context_create(engine);
- if (IS_ERR(tmp)) {
- err = PTR_ERR(tmp);
- goto err_ce;
- }
-
- tmp->ring = __intel_context_ring_size(ring_sz);
-
- err = intel_context_pin(tmp);
- if (err) {
- intel_context_put(tmp);
- goto err_ce;
- }
-
- memset32(tmp->ring->vaddr,
- 0xdeadbeef, /* trigger a hang if executed */
- tmp->ring->vma->size / sizeof(u32));
-
- ce[n] = tmp;
- }
-
- rq = igt_spinner_create_request(spin, ce[0], MI_ARB_CHECK);
- if (IS_ERR(rq)) {
- err = PTR_ERR(rq);
- goto err_ce;
- }
-
- i915_request_get(rq);
- rq->sched.attr.priority = I915_PRIORITY_BARRIER;
- i915_request_add(rq);
-
- if (!igt_wait_for_spinner(spin, rq)) {
- intel_gt_set_wedged(engine->gt);
- i915_request_put(rq);
- err = -ETIME;
- goto err_ce;
- }
-
- /* Fill the ring, until we will cause a wrap */
- n = 0;
- while (ce[0]->ring->tail - rq->wa_tail <= queue_sz) {
- struct i915_request *tmp;
-
- tmp = intel_context_create_request(ce[0]);
- if (IS_ERR(tmp)) {
- err = PTR_ERR(tmp);
- i915_request_put(rq);
- goto err_ce;
- }
-
- i915_request_add(tmp);
- intel_engine_flush_submission(engine);
- n++;
- }
- intel_engine_flush_submission(engine);
- pr_debug("%s: Filled %d with %d nop tails {size:%x, tail:%x, emit:%x, rq.tail:%x}\n",
- engine->name, queue_sz, n,
- ce[0]->ring->size,
- ce[0]->ring->tail,
- ce[0]->ring->emit,
- rq->tail);
- i915_request_put(rq);
-
- /* Create a second request to preempt the first ring */
- rq = intel_context_create_request(ce[1]);
- if (IS_ERR(rq)) {
- err = PTR_ERR(rq);
- goto err_ce;
- }
-
- rq->sched.attr.priority = I915_PRIORITY_BARRIER;
- i915_request_get(rq);
- i915_request_add(rq);
-
- err = wait_for_submit(engine, rq, HZ / 2);
- i915_request_put(rq);
- if (err) {
- pr_err("%s: preemption request was not submited\n",
- engine->name);
- err = -ETIME;
- }
-
- pr_debug("%s: ring[0]:{ tail:%x, emit:%x }, ring[1]:{ tail:%x, emit:%x }\n",
- engine->name,
- ce[0]->ring->tail, ce[0]->ring->emit,
- ce[1]->ring->tail, ce[1]->ring->emit);
-
-err_ce:
- intel_engine_flush_submission(engine);
- igt_spinner_end(spin);
- for (n = 0; n < ARRAY_SIZE(ce); n++) {
- if (IS_ERR_OR_NULL(ce[n]))
- break;
-
- intel_context_unpin(ce[n]);
- intel_context_put(ce[n]);
- }
- if (igt_live_test_end(&t))
- err = -EIO;
- return err;
-}
-
-static int live_preempt_ring(void *arg)
-{
- struct intel_gt *gt = arg;
- struct intel_engine_cs *engine;
- struct igt_spinner spin;
- enum intel_engine_id id;
- int err = 0;
-
- /*
- * Check that we rollback large chunks of a ring in order to do a
- * preemption event. Similar to live_unlite_ring, but looking at
- * ring size rather than the impact of intel_ring_direction().
- */
-
- if (igt_spinner_init(&spin, gt))
- return -ENOMEM;
-
- for_each_engine(engine, gt, id) {
- int n;
-
- if (!intel_engine_has_preemption(engine))
- continue;
-
- if (!intel_engine_can_store_dword(engine))
- continue;
-
- st_engine_heartbeat_disable(engine);
-
- for (n = 0; n <= 3; n++) {
- err = __live_preempt_ring(engine, &spin,
- n * SZ_4K / 4, SZ_4K);
- if (err)
- break;
- }
-
- st_engine_heartbeat_enable(engine);
- if (err)
- break;
- }
-
- igt_spinner_fini(&spin);
- return err;
-}
-
-static int live_preempt_gang(void *arg)
-{
- struct intel_gt *gt = arg;
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
-
- if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915))
- return 0;
-
- /*
- * Build as long a chain of preempters as we can, with each
- * request higher priority than the last. Once we are ready, we release
- * the last batch which then precolates down the chain, each releasing
- * the next oldest in turn. The intent is to simply push as hard as we
- * can with the number of preemptions, trying to exceed narrow HW
- * limits. At a minimum, we insist that we can sort all the user
- * high priority levels into execution order.
- */
-
- for_each_engine(engine, gt, id) {
- struct i915_request *rq = NULL;
- struct igt_live_test t;
- IGT_TIMEOUT(end_time);
- int prio = 0;
- int err = 0;
- u32 *cs;
-
- if (!intel_engine_has_preemption(engine))
- continue;
-
- if (igt_live_test_begin(&t, gt->i915, __func__, engine->name))
- return -EIO;
-
- do {
- struct i915_sched_attr attr = {
- .priority = I915_USER_PRIORITY(prio++),
- };
-
- err = create_gang(engine, &rq);
- if (err)
- break;
-
- /* Submit each spinner at increasing priority */
- engine->schedule(rq, &attr);
- } while (prio <= I915_PRIORITY_MAX &&
- !__igt_timeout(end_time, NULL));
- pr_debug("%s: Preempt chain of %d requests\n",
- engine->name, prio);
-
- /*
- * Such that the last spinner is the highest priority and
- * should execute first. When that spinner completes,
- * it will terminate the next lowest spinner until there
- * are no more spinners and the gang is complete.
- */
- cs = i915_gem_object_pin_map(rq->batch->obj, I915_MAP_WC);
- if (!IS_ERR(cs)) {
- *cs = 0;
- i915_gem_object_unpin_map(rq->batch->obj);
- } else {
- err = PTR_ERR(cs);
- intel_gt_set_wedged(gt);
- }
-
- while (rq) { /* wait for each rq from highest to lowest prio */
- struct i915_request *n = list_next_entry(rq, mock.link);
-
- if (err == 0 && i915_request_wait(rq, 0, HZ / 5) < 0) {
- struct drm_printer p =
- drm_info_printer(engine->i915->drm.dev);
-
- pr_err("Failed to flush chain of %d requests, at %d\n",
- prio, rq_prio(rq) >> I915_USER_PRIORITY_SHIFT);
- intel_engine_dump(engine, &p,
- "%s\n", engine->name);
-
- err = -ETIME;
- }
-
- i915_vma_put(rq->batch);
- i915_request_put(rq);
- rq = n;
- }
-
- if (igt_live_test_end(&t))
- err = -EIO;
- if (err)
- return err;
- }
-
- return 0;
-}
-
-static struct i915_vma *
-create_gpr_user(struct intel_engine_cs *engine,
- struct i915_vma *result,
- unsigned int offset)
-{
- struct drm_i915_gem_object *obj;
- struct i915_vma *vma;
- u32 *cs;
- int err;
- int i;
-
- obj = i915_gem_object_create_internal(engine->i915, 4096);
- if (IS_ERR(obj))
- return ERR_CAST(obj);
-
- vma = i915_vma_instance(obj, result->vm, NULL);
- if (IS_ERR(vma)) {
- i915_gem_object_put(obj);
- return vma;
- }
-
- err = i915_vma_pin(vma, 0, 0, PIN_USER);
- if (err) {
- i915_vma_put(vma);
- return ERR_PTR(err);
- }
-
- cs = i915_gem_object_pin_map(obj, I915_MAP_WC);
- if (IS_ERR(cs)) {
- i915_vma_put(vma);
- return ERR_CAST(cs);
- }
-
- /* All GPR are clear for new contexts. We use GPR(0) as a constant */
- *cs++ = MI_LOAD_REGISTER_IMM(1);
- *cs++ = CS_GPR(engine, 0);
- *cs++ = 1;
-
- for (i = 1; i < NUM_GPR; i++) {
- u64 addr;
-
- /*
- * Perform: GPR[i]++
- *
- * As we read and write into the context saved GPR[i], if
- * we restart this batch buffer from an earlier point, we
- * will repeat the increment and store a value > 1.
- */
- *cs++ = MI_MATH(4);
- *cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCA, MI_MATH_REG(i));
- *cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCB, MI_MATH_REG(0));
- *cs++ = MI_MATH_ADD;
- *cs++ = MI_MATH_STORE(MI_MATH_REG(i), MI_MATH_REG_ACCU);
-
- addr = result->node.start + offset + i * sizeof(*cs);
- *cs++ = MI_STORE_REGISTER_MEM_GEN8;
- *cs++ = CS_GPR(engine, 2 * i);
- *cs++ = lower_32_bits(addr);
- *cs++ = upper_32_bits(addr);
-
- *cs++ = MI_SEMAPHORE_WAIT |
- MI_SEMAPHORE_POLL |
- MI_SEMAPHORE_SAD_GTE_SDD;
- *cs++ = i;
- *cs++ = lower_32_bits(result->node.start);
- *cs++ = upper_32_bits(result->node.start);
- }
-
- *cs++ = MI_BATCH_BUFFER_END;
- i915_gem_object_flush_map(obj);
- i915_gem_object_unpin_map(obj);
-
- return vma;
-}
-
-static struct i915_vma *create_global(struct intel_gt *gt, size_t sz)
-{
- struct drm_i915_gem_object *obj;
- struct i915_vma *vma;
- int err;
-
- obj = i915_gem_object_create_internal(gt->i915, sz);
- if (IS_ERR(obj))
- return ERR_CAST(obj);
-
- vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL);
- if (IS_ERR(vma)) {
- i915_gem_object_put(obj);
- return vma;
- }
-
- err = i915_ggtt_pin(vma, NULL, 0, 0);
- if (err) {
- i915_vma_put(vma);
- return ERR_PTR(err);
- }
-
- return vma;
-}
-
-static struct i915_request *
-create_gpr_client(struct intel_engine_cs *engine,
- struct i915_vma *global,
- unsigned int offset)
-{
- struct i915_vma *batch, *vma;
- struct intel_context *ce;
- struct i915_request *rq;
- int err;
-
- ce = intel_context_create(engine);
- if (IS_ERR(ce))
- return ERR_CAST(ce);
-
- vma = i915_vma_instance(global->obj, ce->vm, NULL);
- if (IS_ERR(vma)) {
- err = PTR_ERR(vma);
- goto out_ce;
- }
-
- err = i915_vma_pin(vma, 0, 0, PIN_USER);
- if (err)
- goto out_ce;
-
- batch = create_gpr_user(engine, vma, offset);
- if (IS_ERR(batch)) {
- err = PTR_ERR(batch);
- goto out_vma;
- }
-
- rq = intel_context_create_request(ce);
- if (IS_ERR(rq)) {
- err = PTR_ERR(rq);
- goto out_batch;
- }
-
- i915_vma_lock(vma);
- err = i915_request_await_object(rq, vma->obj, false);
- if (!err)
- err = i915_vma_move_to_active(vma, rq, 0);
- i915_vma_unlock(vma);
-
- i915_vma_lock(batch);
- if (!err)
- err = i915_request_await_object(rq, batch->obj, false);
- if (!err)
- err = i915_vma_move_to_active(batch, rq, 0);
- if (!err)
- err = rq->engine->emit_bb_start(rq,
- batch->node.start,
- PAGE_SIZE, 0);
- i915_vma_unlock(batch);
- i915_vma_unpin(batch);
-
- if (!err)
- i915_request_get(rq);
- i915_request_add(rq);
-
-out_batch:
- i915_vma_put(batch);
-out_vma:
- i915_vma_unpin(vma);
-out_ce:
- intel_context_put(ce);
- return err ? ERR_PTR(err) : rq;
-}
-
-static int preempt_user(struct intel_engine_cs *engine,
- struct i915_vma *global,
- int id)
-{
- struct i915_sched_attr attr = {
- .priority = I915_PRIORITY_MAX
- };
- struct i915_request *rq;
- int err = 0;
- u32 *cs;
-
- rq = intel_engine_create_kernel_request(engine);
- if (IS_ERR(rq))
- return PTR_ERR(rq);
-
- cs = intel_ring_begin(rq, 4);
- if (IS_ERR(cs)) {
- i915_request_add(rq);
- return PTR_ERR(cs);
- }
-
- *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
- *cs++ = i915_ggtt_offset(global);
- *cs++ = 0;
- *cs++ = id;
-
- intel_ring_advance(rq, cs);
-
- i915_request_get(rq);
- i915_request_add(rq);
-
- engine->schedule(rq, &attr);
-
- if (i915_request_wait(rq, 0, HZ / 2) < 0)
- err = -ETIME;
- i915_request_put(rq);
-
- return err;
-}
-
-static int live_preempt_user(void *arg)
-{
- struct intel_gt *gt = arg;
- struct intel_engine_cs *engine;
- struct i915_vma *global;
- enum intel_engine_id id;
- u32 *result;
- int err = 0;
-
- if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915))
- return 0;
-
- /*
- * In our other tests, we look at preemption in carefully
- * controlled conditions in the ringbuffer. Since most of the
- * time is spent in user batches, most of our preemptions naturally
- * occur there. We want to verify that when we preempt inside a batch
- * we continue on from the current instruction and do not roll back
- * to the start, or another earlier arbitration point.
- *
- * To verify this, we create a batch which is a mixture of
- * MI_MATH (gpr++) MI_SRM (gpr) and preemption points. Then with
- * a few preempting contexts thrown into the mix, we look for any
- * repeated instructions (which show up as incorrect values).
- */
-
- global = create_global(gt, 4096);
- if (IS_ERR(global))
- return PTR_ERR(global);
-
- result = i915_gem_object_pin_map(global->obj, I915_MAP_WC);
- if (IS_ERR(result)) {
- i915_vma_unpin_and_release(&global, 0);
- return PTR_ERR(result);
- }
-
- for_each_engine(engine, gt, id) {
- struct i915_request *client[3] = {};
- struct igt_live_test t;
- int i;
-
- if (!intel_engine_has_preemption(engine))
- continue;
-
- if (IS_GEN(gt->i915, 8) && engine->class != RENDER_CLASS)
- continue; /* we need per-context GPR */
-
- if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
- err = -EIO;
- break;
- }
-
- memset(result, 0, 4096);
-
- for (i = 0; i < ARRAY_SIZE(client); i++) {
- struct i915_request *rq;
-
- rq = create_gpr_client(engine, global,
- NUM_GPR * i * sizeof(u32));
- if (IS_ERR(rq))
- goto end_test;
-
- client[i] = rq;
- }
-
- /* Continuously preempt the set of 3 running contexts */
- for (i = 1; i <= NUM_GPR; i++) {
- err = preempt_user(engine, global, i);
- if (err)
- goto end_test;
- }
-
- if (READ_ONCE(result[0]) != NUM_GPR) {
- pr_err("%s: Failed to release semaphore\n",
- engine->name);
- err = -EIO;
- goto end_test;
- }
-
- for (i = 0; i < ARRAY_SIZE(client); i++) {
- int gpr;
-
- if (i915_request_wait(client[i], 0, HZ / 2) < 0) {
- err = -ETIME;
- goto end_test;
- }
-
- for (gpr = 1; gpr < NUM_GPR; gpr++) {
- if (result[NUM_GPR * i + gpr] != 1) {
- pr_err("%s: Invalid result, client %d, gpr %d, result: %d\n",
- engine->name,
- i, gpr, result[NUM_GPR * i + gpr]);
- err = -EINVAL;
- goto end_test;
- }
- }
- }
-
-end_test:
- for (i = 0; i < ARRAY_SIZE(client); i++) {
- if (!client[i])
- break;
-
- i915_request_put(client[i]);
- }
-
- /* Flush the semaphores on error */
- smp_store_mb(result[0], -1);
- if (igt_live_test_end(&t))
- err = -EIO;
- if (err)
- break;
- }
-
- i915_vma_unpin_and_release(&global, I915_VMA_RELEASE_MAP);
- return err;
-}
-
-static int live_preempt_timeout(void *arg)
-{
- struct intel_gt *gt = arg;
- struct i915_gem_context *ctx_hi, *ctx_lo;
- struct igt_spinner spin_lo;
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- int err = -ENOMEM;
-
- /*
- * Check that we force preemption to occur by cancelling the previous
- * context if it refuses to yield the GPU.
- */
- if (!IS_ACTIVE(CONFIG_DRM_I915_PREEMPT_TIMEOUT))
- return 0;
-
- if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915))
- return 0;
-
- if (!intel_has_reset_engine(gt))
- return 0;
-
- if (igt_spinner_init(&spin_lo, gt))
- return -ENOMEM;
-
- ctx_hi = kernel_context(gt->i915);
- if (!ctx_hi)
- goto err_spin_lo;
- ctx_hi->sched.priority =
- I915_USER_PRIORITY(I915_CONTEXT_MAX_USER_PRIORITY);
-
- ctx_lo = kernel_context(gt->i915);
- if (!ctx_lo)
- goto err_ctx_hi;
- ctx_lo->sched.priority =
- I915_USER_PRIORITY(I915_CONTEXT_MIN_USER_PRIORITY);
-
- for_each_engine(engine, gt, id) {
- unsigned long saved_timeout;
- struct i915_request *rq;
-
- if (!intel_engine_has_preemption(engine))
- continue;
-
- rq = spinner_create_request(&spin_lo, ctx_lo, engine,
- MI_NOOP); /* preemption disabled */
- if (IS_ERR(rq)) {
- err = PTR_ERR(rq);
- goto err_ctx_lo;
- }
-
- i915_request_add(rq);
- if (!igt_wait_for_spinner(&spin_lo, rq)) {
- intel_gt_set_wedged(gt);
- err = -EIO;
- goto err_ctx_lo;
- }
-
- rq = igt_request_alloc(ctx_hi, engine);
- if (IS_ERR(rq)) {
- igt_spinner_end(&spin_lo);
- err = PTR_ERR(rq);
- goto err_ctx_lo;
- }
-
- /* Flush the previous CS ack before changing timeouts */
- while (READ_ONCE(engine->execlists.pending[0]))
- cpu_relax();
-
- saved_timeout = engine->props.preempt_timeout_ms;
- engine->props.preempt_timeout_ms = 1; /* in ms, -> 1 jiffie */
-
- i915_request_get(rq);
- i915_request_add(rq);
-
- intel_engine_flush_submission(engine);
- engine->props.preempt_timeout_ms = saved_timeout;
-
- if (i915_request_wait(rq, 0, HZ / 10) < 0) {
- intel_gt_set_wedged(gt);
- i915_request_put(rq);
- err = -ETIME;
- goto err_ctx_lo;
- }
-
- igt_spinner_end(&spin_lo);
- i915_request_put(rq);
- }
-
- err = 0;
-err_ctx_lo:
- kernel_context_close(ctx_lo);
-err_ctx_hi:
- kernel_context_close(ctx_hi);
-err_spin_lo:
- igt_spinner_fini(&spin_lo);
- return err;
-}
-
-static int random_range(struct rnd_state *rnd, int min, int max)
-{
- return i915_prandom_u32_max_state(max - min, rnd) + min;
-}
-
-static int random_priority(struct rnd_state *rnd)
-{
- return random_range(rnd, I915_PRIORITY_MIN, I915_PRIORITY_MAX);
-}
-
-struct preempt_smoke {
- struct intel_gt *gt;
- struct i915_gem_context **contexts;
- struct intel_engine_cs *engine;
- struct drm_i915_gem_object *batch;
- unsigned int ncontext;
- struct rnd_state prng;
- unsigned long count;
-};
-
-static struct i915_gem_context *smoke_context(struct preempt_smoke *smoke)
-{
- return smoke->contexts[i915_prandom_u32_max_state(smoke->ncontext,
- &smoke->prng)];
-}
-
-static int smoke_submit(struct preempt_smoke *smoke,
- struct i915_gem_context *ctx, int prio,
- struct drm_i915_gem_object *batch)
-{
- struct i915_request *rq;
- struct i915_vma *vma = NULL;
- int err = 0;
-
- if (batch) {
- struct i915_address_space *vm;
-
- vm = i915_gem_context_get_vm_rcu(ctx);
- vma = i915_vma_instance(batch, vm, NULL);
- i915_vm_put(vm);
- if (IS_ERR(vma))
- return PTR_ERR(vma);
-
- err = i915_vma_pin(vma, 0, 0, PIN_USER);
- if (err)
- return err;
- }
-
- ctx->sched.priority = prio;
-
- rq = igt_request_alloc(ctx, smoke->engine);
- if (IS_ERR(rq)) {
- err = PTR_ERR(rq);
- goto unpin;
- }
-
- if (vma) {
- i915_vma_lock(vma);
- err = i915_request_await_object(rq, vma->obj, false);
- if (!err)
- err = i915_vma_move_to_active(vma, rq, 0);
- if (!err)
- err = rq->engine->emit_bb_start(rq,
- vma->node.start,
- PAGE_SIZE, 0);
- i915_vma_unlock(vma);
- }
-
- i915_request_add(rq);
-
-unpin:
- if (vma)
- i915_vma_unpin(vma);
-
- return err;
-}
-
-static int smoke_crescendo_thread(void *arg)
-{
- struct preempt_smoke *smoke = arg;
- IGT_TIMEOUT(end_time);
- unsigned long count;
-
- count = 0;
- do {
- struct i915_gem_context *ctx = smoke_context(smoke);
- int err;
-
- err = smoke_submit(smoke,
- ctx, count % I915_PRIORITY_MAX,
- smoke->batch);
- if (err)
- return err;
-
- count++;
- } while (count < smoke->ncontext && !__igt_timeout(end_time, NULL));
-
- smoke->count = count;
- return 0;
-}
-
-static int smoke_crescendo(struct preempt_smoke *smoke, unsigned int flags)
-#define BATCH BIT(0)
-{
- struct task_struct *tsk[I915_NUM_ENGINES] = {};
- struct preempt_smoke arg[I915_NUM_ENGINES];
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- unsigned long count;
- int err = 0;
-
- for_each_engine(engine, smoke->gt, id) {
- arg[id] = *smoke;
- arg[id].engine = engine;
- if (!(flags & BATCH))
- arg[id].batch = NULL;
- arg[id].count = 0;
-
- tsk[id] = kthread_run(smoke_crescendo_thread, &arg,
- "igt/smoke:%d", id);
- if (IS_ERR(tsk[id])) {
- err = PTR_ERR(tsk[id]);
- break;
- }
- get_task_struct(tsk[id]);
- }
-
- yield(); /* start all threads before we kthread_stop() */
-
- count = 0;
- for_each_engine(engine, smoke->gt, id) {
- int status;
-
- if (IS_ERR_OR_NULL(tsk[id]))
- continue;
-
- status = kthread_stop(tsk[id]);
- if (status && !err)
- err = status;
-
- count += arg[id].count;
-
- put_task_struct(tsk[id]);
- }
-
- pr_info("Submitted %lu crescendo:%x requests across %d engines and %d contexts\n",
- count, flags, smoke->gt->info.num_engines, smoke->ncontext);
- return 0;
-}
-
-static int smoke_random(struct preempt_smoke *smoke, unsigned int flags)
-{
- enum intel_engine_id id;
- IGT_TIMEOUT(end_time);
- unsigned long count;
-
- count = 0;
- do {
- for_each_engine(smoke->engine, smoke->gt, id) {
- struct i915_gem_context *ctx = smoke_context(smoke);
- int err;
-
- err = smoke_submit(smoke,
- ctx, random_priority(&smoke->prng),
- flags & BATCH ? smoke->batch : NULL);
- if (err)
- return err;
-
- count++;
- }
- } while (count < smoke->ncontext && !__igt_timeout(end_time, NULL));
-
- pr_info("Submitted %lu random:%x requests across %d engines and %d contexts\n",
- count, flags, smoke->gt->info.num_engines, smoke->ncontext);
- return 0;
-}
-
-static int live_preempt_smoke(void *arg)
-{
- struct preempt_smoke smoke = {
- .gt = arg,
- .prng = I915_RND_STATE_INITIALIZER(i915_selftest.random_seed),
- .ncontext = 256,
- };
- const unsigned int phase[] = { 0, BATCH };
- struct igt_live_test t;
- int err = -ENOMEM;
- u32 *cs;
- int n;
-
- if (!HAS_LOGICAL_RING_PREEMPTION(smoke.gt->i915))
- return 0;
-
- smoke.contexts = kmalloc_array(smoke.ncontext,
- sizeof(*smoke.contexts),
- GFP_KERNEL);
- if (!smoke.contexts)
- return -ENOMEM;
-
- smoke.batch =
- i915_gem_object_create_internal(smoke.gt->i915, PAGE_SIZE);
- if (IS_ERR(smoke.batch)) {
- err = PTR_ERR(smoke.batch);
- goto err_free;
- }
-
- cs = i915_gem_object_pin_map(smoke.batch, I915_MAP_WB);
- if (IS_ERR(cs)) {
- err = PTR_ERR(cs);
- goto err_batch;
- }
- for (n = 0; n < PAGE_SIZE / sizeof(*cs) - 1; n++)
- cs[n] = MI_ARB_CHECK;
- cs[n] = MI_BATCH_BUFFER_END;
- i915_gem_object_flush_map(smoke.batch);
- i915_gem_object_unpin_map(smoke.batch);
-
- if (igt_live_test_begin(&t, smoke.gt->i915, __func__, "all")) {
- err = -EIO;
- goto err_batch;
- }
-
- for (n = 0; n < smoke.ncontext; n++) {
- smoke.contexts[n] = kernel_context(smoke.gt->i915);
- if (!smoke.contexts[n])
- goto err_ctx;
- }
-
- for (n = 0; n < ARRAY_SIZE(phase); n++) {
- err = smoke_crescendo(&smoke, phase[n]);
- if (err)
- goto err_ctx;
-
- err = smoke_random(&smoke, phase[n]);
- if (err)
- goto err_ctx;
- }
-
-err_ctx:
- if (igt_live_test_end(&t))
- err = -EIO;
-
- for (n = 0; n < smoke.ncontext; n++) {
- if (!smoke.contexts[n])
- break;
- kernel_context_close(smoke.contexts[n]);
- }
-
-err_batch:
- i915_gem_object_put(smoke.batch);
-err_free:
- kfree(smoke.contexts);
-
- return err;
-}
-
-static int nop_virtual_engine(struct intel_gt *gt,
- struct intel_engine_cs **siblings,
- unsigned int nsibling,
- unsigned int nctx,
- unsigned int flags)
-#define CHAIN BIT(0)
-{
- IGT_TIMEOUT(end_time);
- struct i915_request *request[16] = {};
- struct intel_context *ve[16];
- unsigned long n, prime, nc;
- struct igt_live_test t;
- ktime_t times[2] = {};
- int err;
-
- GEM_BUG_ON(!nctx || nctx > ARRAY_SIZE(ve));
-
- for (n = 0; n < nctx; n++) {
- ve[n] = intel_execlists_create_virtual(siblings, nsibling);
- if (IS_ERR(ve[n])) {
- err = PTR_ERR(ve[n]);
- nctx = n;
- goto out;
- }
-
- err = intel_context_pin(ve[n]);
- if (err) {
- intel_context_put(ve[n]);
- nctx = n;
- goto out;
- }
- }
-
- err = igt_live_test_begin(&t, gt->i915, __func__, ve[0]->engine->name);
- if (err)
- goto out;
-
- for_each_prime_number_from(prime, 1, 8192) {
- times[1] = ktime_get_raw();
-
- if (flags & CHAIN) {
- for (nc = 0; nc < nctx; nc++) {
- for (n = 0; n < prime; n++) {
- struct i915_request *rq;
-
- rq = i915_request_create(ve[nc]);
- if (IS_ERR(rq)) {
- err = PTR_ERR(rq);
- goto out;
- }
-
- if (request[nc])
- i915_request_put(request[nc]);
- request[nc] = i915_request_get(rq);
- i915_request_add(rq);
- }
- }
- } else {
- for (n = 0; n < prime; n++) {
- for (nc = 0; nc < nctx; nc++) {
- struct i915_request *rq;
-
- rq = i915_request_create(ve[nc]);
- if (IS_ERR(rq)) {
- err = PTR_ERR(rq);
- goto out;
- }
-
- if (request[nc])
- i915_request_put(request[nc]);
- request[nc] = i915_request_get(rq);
- i915_request_add(rq);
- }
- }
- }
-
- for (nc = 0; nc < nctx; nc++) {
- if (i915_request_wait(request[nc], 0, HZ / 10) < 0) {
- pr_err("%s(%s): wait for %llx:%lld timed out\n",
- __func__, ve[0]->engine->name,
- request[nc]->fence.context,
- request[nc]->fence.seqno);
-
- GEM_TRACE("%s(%s) failed at request %llx:%lld\n",
- __func__, ve[0]->engine->name,
- request[nc]->fence.context,
- request[nc]->fence.seqno);
- GEM_TRACE_DUMP();
- intel_gt_set_wedged(gt);
- break;
- }
- }
-
- times[1] = ktime_sub(ktime_get_raw(), times[1]);
- if (prime == 1)
- times[0] = times[1];
-
- for (nc = 0; nc < nctx; nc++) {
- i915_request_put(request[nc]);
- request[nc] = NULL;
- }
-
- if (__igt_timeout(end_time, NULL))
- break;
- }
-
- err = igt_live_test_end(&t);
- if (err)
- goto out;
-
- pr_info("Requestx%d latencies on %s: 1 = %lluns, %lu = %lluns\n",
- nctx, ve[0]->engine->name, ktime_to_ns(times[0]),
- prime, div64_u64(ktime_to_ns(times[1]), prime));
-
-out:
- if (igt_flush_test(gt->i915))
- err = -EIO;
-
- for (nc = 0; nc < nctx; nc++) {
- i915_request_put(request[nc]);
- intel_context_unpin(ve[nc]);
- intel_context_put(ve[nc]);
- }
- return err;
-}
-
-static unsigned int
-__select_siblings(struct intel_gt *gt,
- unsigned int class,
- struct intel_engine_cs **siblings,
- bool (*filter)(const struct intel_engine_cs *))
-{
- unsigned int n = 0;
- unsigned int inst;
-
- for (inst = 0; inst <= MAX_ENGINE_INSTANCE; inst++) {
- if (!gt->engine_class[class][inst])
- continue;
-
- if (filter && !filter(gt->engine_class[class][inst]))
- continue;
-
- siblings[n++] = gt->engine_class[class][inst];
- }
-
- return n;
-}
-
-static unsigned int
-select_siblings(struct intel_gt *gt,
- unsigned int class,
- struct intel_engine_cs **siblings)
-{
- return __select_siblings(gt, class, siblings, NULL);
-}
-
-static int live_virtual_engine(void *arg)
-{
- struct intel_gt *gt = arg;
- struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- unsigned int class;
- int err;
-
- if (intel_uc_uses_guc_submission(&gt->uc))
- return 0;
-
- for_each_engine(engine, gt, id) {
- err = nop_virtual_engine(gt, &engine, 1, 1, 0);
- if (err) {
- pr_err("Failed to wrap engine %s: err=%d\n",
- engine->name, err);
- return err;
- }
- }
-
- for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
- int nsibling, n;
-
- nsibling = select_siblings(gt, class, siblings);
- if (nsibling < 2)
- continue;
-
- for (n = 1; n <= nsibling + 1; n++) {
- err = nop_virtual_engine(gt, siblings, nsibling,
- n, 0);
- if (err)
- return err;
- }
-
- err = nop_virtual_engine(gt, siblings, nsibling, n, CHAIN);
- if (err)
- return err;
- }
-
- return 0;
-}
-
-static int mask_virtual_engine(struct intel_gt *gt,
- struct intel_engine_cs **siblings,
- unsigned int nsibling)
-{
- struct i915_request *request[MAX_ENGINE_INSTANCE + 1];
- struct intel_context *ve;
- struct igt_live_test t;
- unsigned int n;
- int err;
-
- /*
- * Check that by setting the execution mask on a request, we can
- * restrict it to our desired engine within the virtual engine.
- */
-
- ve = intel_execlists_create_virtual(siblings, nsibling);
- if (IS_ERR(ve)) {
- err = PTR_ERR(ve);
- goto out_close;
- }
-
- err = intel_context_pin(ve);
- if (err)
- goto out_put;
-
- err = igt_live_test_begin(&t, gt->i915, __func__, ve->engine->name);
- if (err)
- goto out_unpin;
-
- for (n = 0; n < nsibling; n++) {
- request[n] = i915_request_create(ve);
- if (IS_ERR(request[n])) {
- err = PTR_ERR(request[n]);
- nsibling = n;
- goto out;
- }
-
- /* Reverse order as it's more likely to be unnatural */
- request[n]->execution_mask = siblings[nsibling - n - 1]->mask;
-
- i915_request_get(request[n]);
- i915_request_add(request[n]);
- }
-
- for (n = 0; n < nsibling; n++) {
- if (i915_request_wait(request[n], 0, HZ / 10) < 0) {
- pr_err("%s(%s): wait for %llx:%lld timed out\n",
- __func__, ve->engine->name,
- request[n]->fence.context,
- request[n]->fence.seqno);
-
- GEM_TRACE("%s(%s) failed at request %llx:%lld\n",
- __func__, ve->engine->name,
- request[n]->fence.context,
- request[n]->fence.seqno);
- GEM_TRACE_DUMP();
- intel_gt_set_wedged(gt);
- err = -EIO;
- goto out;
- }
-
- if (request[n]->engine != siblings[nsibling - n - 1]) {
- pr_err("Executed on wrong sibling '%s', expected '%s'\n",
- request[n]->engine->name,
- siblings[nsibling - n - 1]->name);
- err = -EINVAL;
- goto out;
- }
- }
-
- err = igt_live_test_end(&t);
-out:
- if (igt_flush_test(gt->i915))
- err = -EIO;
-
- for (n = 0; n < nsibling; n++)
- i915_request_put(request[n]);
-
-out_unpin:
- intel_context_unpin(ve);
-out_put:
- intel_context_put(ve);
-out_close:
- return err;
-}
-
-static int live_virtual_mask(void *arg)
-{
- struct intel_gt *gt = arg;
- struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
- unsigned int class;
- int err;
-
- if (intel_uc_uses_guc_submission(&gt->uc))
- return 0;
-
- for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
- unsigned int nsibling;
-
- nsibling = select_siblings(gt, class, siblings);
- if (nsibling < 2)
- continue;
-
- err = mask_virtual_engine(gt, siblings, nsibling);
- if (err)
- return err;
- }
-
- return 0;
-}
-
-static int slicein_virtual_engine(struct intel_gt *gt,
- struct intel_engine_cs **siblings,
- unsigned int nsibling)
-{
- const long timeout = slice_timeout(siblings[0]);
- struct intel_context *ce;
- struct i915_request *rq;
- struct igt_spinner spin;
- unsigned int n;
- int err = 0;
-
- /*
- * Virtual requests must take part in timeslicing on the target engines.
- */
-
- if (igt_spinner_init(&spin, gt))
- return -ENOMEM;
-
- for (n = 0; n < nsibling; n++) {
- ce = intel_context_create(siblings[n]);
- if (IS_ERR(ce)) {
- err = PTR_ERR(ce);
- goto out;
- }
-
- rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK);
- intel_context_put(ce);
- if (IS_ERR(rq)) {
- err = PTR_ERR(rq);
- goto out;
- }
-
- i915_request_add(rq);
- }
-
- ce = intel_execlists_create_virtual(siblings, nsibling);
- if (IS_ERR(ce)) {
- err = PTR_ERR(ce);
- goto out;
- }
-
- rq = intel_context_create_request(ce);
- intel_context_put(ce);
- if (IS_ERR(rq)) {
- err = PTR_ERR(rq);
- goto out;
- }
-
- i915_request_get(rq);
- i915_request_add(rq);
- if (i915_request_wait(rq, 0, timeout) < 0) {
- GEM_TRACE_ERR("%s(%s) failed to slice in virtual request\n",
- __func__, rq->engine->name);
- GEM_TRACE_DUMP();
- intel_gt_set_wedged(gt);
- err = -EIO;
- }
- i915_request_put(rq);
-
-out:
- igt_spinner_end(&spin);
- if (igt_flush_test(gt->i915))
- err = -EIO;
- igt_spinner_fini(&spin);
- return err;
-}
-
-static int sliceout_virtual_engine(struct intel_gt *gt,
- struct intel_engine_cs **siblings,
- unsigned int nsibling)
-{
- const long timeout = slice_timeout(siblings[0]);
- struct intel_context *ce;
- struct i915_request *rq;
- struct igt_spinner spin;
- unsigned int n;
- int err = 0;
-
- /*
- * Virtual requests must allow others a fair timeslice.
- */
-
- if (igt_spinner_init(&spin, gt))
- return -ENOMEM;
-
- /* XXX We do not handle oversubscription and fairness with normal rq */
- for (n = 0; n < nsibling; n++) {
- ce = intel_execlists_create_virtual(siblings, nsibling);
- if (IS_ERR(ce)) {
- err = PTR_ERR(ce);
- goto out;
- }
-
- rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK);
- intel_context_put(ce);
- if (IS_ERR(rq)) {
- err = PTR_ERR(rq);
- goto out;
- }
-
- i915_request_add(rq);
- }
-
- for (n = 0; !err && n < nsibling; n++) {
- ce = intel_context_create(siblings[n]);
- if (IS_ERR(ce)) {
- err = PTR_ERR(ce);
- goto out;
- }
-
- rq = intel_context_create_request(ce);
- intel_context_put(ce);
- if (IS_ERR(rq)) {
- err = PTR_ERR(rq);
- goto out;
- }
-
- i915_request_get(rq);
- i915_request_add(rq);
- if (i915_request_wait(rq, 0, timeout) < 0) {
- GEM_TRACE_ERR("%s(%s) failed to slice out virtual request\n",
- __func__, siblings[n]->name);
- GEM_TRACE_DUMP();
- intel_gt_set_wedged(gt);
- err = -EIO;
- }
- i915_request_put(rq);
- }
-
-out:
- igt_spinner_end(&spin);
- if (igt_flush_test(gt->i915))
- err = -EIO;
- igt_spinner_fini(&spin);
- return err;
-}
-
-static int live_virtual_slice(void *arg)
-{
- struct intel_gt *gt = arg;
- struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
- unsigned int class;
- int err;
-
- if (intel_uc_uses_guc_submission(&gt->uc))
- return 0;
-
- for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
- unsigned int nsibling;
-
- nsibling = __select_siblings(gt, class, siblings,
- intel_engine_has_timeslices);
- if (nsibling < 2)
- continue;
-
- err = slicein_virtual_engine(gt, siblings, nsibling);
- if (err)
- return err;
-
- err = sliceout_virtual_engine(gt, siblings, nsibling);
- if (err)
- return err;
- }
-
- return 0;
-}
-
-static int preserved_virtual_engine(struct intel_gt *gt,
- struct intel_engine_cs **siblings,
- unsigned int nsibling)
-{
- struct i915_request *last = NULL;
- struct intel_context *ve;
- struct i915_vma *scratch;
- struct igt_live_test t;
- unsigned int n;
- int err = 0;
- u32 *cs;
-
- scratch = create_scratch(siblings[0]->gt);
- if (IS_ERR(scratch))
- return PTR_ERR(scratch);
-
- err = i915_vma_sync(scratch);
- if (err)
- goto out_scratch;
-
- ve = intel_execlists_create_virtual(siblings, nsibling);
- if (IS_ERR(ve)) {
- err = PTR_ERR(ve);
- goto out_scratch;
- }
-
- err = intel_context_pin(ve);
- if (err)
- goto out_put;
-
- err = igt_live_test_begin(&t, gt->i915, __func__, ve->engine->name);
- if (err)
- goto out_unpin;
-
- for (n = 0; n < NUM_GPR_DW; n++) {
- struct intel_engine_cs *engine = siblings[n % nsibling];
- struct i915_request *rq;
-
- rq = i915_request_create(ve);
- if (IS_ERR(rq)) {
- err = PTR_ERR(rq);
- goto out_end;
- }
-
- i915_request_put(last);
- last = i915_request_get(rq);
-
- cs = intel_ring_begin(rq, 8);
- if (IS_ERR(cs)) {
- i915_request_add(rq);
- err = PTR_ERR(cs);
- goto out_end;
- }
-
- *cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT;
- *cs++ = CS_GPR(engine, n);
- *cs++ = i915_ggtt_offset(scratch) + n * sizeof(u32);
- *cs++ = 0;
-
- *cs++ = MI_LOAD_REGISTER_IMM(1);
- *cs++ = CS_GPR(engine, (n + 1) % NUM_GPR_DW);
- *cs++ = n + 1;
-
- *cs++ = MI_NOOP;
- intel_ring_advance(rq, cs);
-
- /* Restrict this request to run on a particular engine */
- rq->execution_mask = engine->mask;
- i915_request_add(rq);
- }
-
- if (i915_request_wait(last, 0, HZ / 5) < 0) {
- err = -ETIME;
- goto out_end;
- }
-
- cs = i915_gem_object_pin_map(scratch->obj, I915_MAP_WB);
- if (IS_ERR(cs)) {
- err = PTR_ERR(cs);
- goto out_end;
- }
-
- for (n = 0; n < NUM_GPR_DW; n++) {
- if (cs[n] != n) {
- pr_err("Incorrect value[%d] found for GPR[%d]\n",
- cs[n], n);
- err = -EINVAL;
- break;
- }
- }
-
- i915_gem_object_unpin_map(scratch->obj);
-
-out_end:
- if (igt_live_test_end(&t))
- err = -EIO;
- i915_request_put(last);
-out_unpin:
- intel_context_unpin(ve);
-out_put:
- intel_context_put(ve);
-out_scratch:
- i915_vma_unpin_and_release(&scratch, 0);
- return err;
-}
-
-static int live_virtual_preserved(void *arg)
-{
- struct intel_gt *gt = arg;
- struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
- unsigned int class;
-
- /*
- * Check that the context image retains non-privileged (user) registers
- * from one engine to the next. For this we check that the CS_GPR
- * are preserved.
- */
-
- if (intel_uc_uses_guc_submission(&gt->uc))
- return 0;
-
- /* As we use CS_GPR we cannot run before they existed on all engines. */
- if (INTEL_GEN(gt->i915) < 9)
- return 0;
-
- for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
- int nsibling, err;
-
- nsibling = select_siblings(gt, class, siblings);
- if (nsibling < 2)
- continue;
-
- err = preserved_virtual_engine(gt, siblings, nsibling);
- if (err)
- return err;
- }
-
- return 0;
-}
-
-static int bond_virtual_engine(struct intel_gt *gt,
- unsigned int class,
- struct intel_engine_cs **siblings,
- unsigned int nsibling,
- unsigned int flags)
-#define BOND_SCHEDULE BIT(0)
-{
- struct intel_engine_cs *master;
- struct i915_request *rq[16];
- enum intel_engine_id id;
- struct igt_spinner spin;
- unsigned long n;
- int err;
-
- /*
- * A set of bonded requests is intended to be run concurrently
- * across a number of engines. We use one request per-engine
- * and a magic fence to schedule each of the bonded requests
- * at the same time. A consequence of our current scheduler is that
- * we only move requests to the HW ready queue when the request
- * becomes ready, that is when all of its prerequisite fences have
- * been signaled. As one of those fences is the master submit fence,
- * there is a delay on all secondary fences as the HW may be
- * currently busy. Equally, as all the requests are independent,
- * they may have other fences that delay individual request
- * submission to HW. Ergo, we do not guarantee that all requests are
- * immediately submitted to HW at the same time, just that if the
- * rules are abided by, they are ready at the same time as the
- * first is submitted. Userspace can embed semaphores in its batch
- * to ensure parallel execution of its phases as it requires.
- * Though naturally it gets requested that perhaps the scheduler should
- * take care of parallel execution, even across preemption events on
- * different HW. (The proper answer is of course "lalalala".)
- *
- * With the submit-fence, we have identified three possible phases
- * of synchronisation depending on the master fence: queued (not
- * ready), executing, and signaled. The first two are quite simple
- * and checked below. However, the signaled master fence handling is
- * contentious. Currently we do not distinguish between a signaled
- * fence and an expired fence, as once signaled it does not convey
- * any information about the previous execution. It may even be freed
- * and hence checking later it may not exist at all. Ergo we currently
- * do not apply the bonding constraint for an already signaled fence,
- * as our expectation is that it should not constrain the secondaries
- * and is outside of the scope of the bonded request API (i.e. all
- * userspace requests are meant to be running in parallel). As
- * it imposes no constraint, and is effectively a no-op, we do not
- * check below as normal execution flows are checked extensively above.
- *
- * XXX Is the degenerate handling of signaled submit fences the
- * expected behaviour for userpace?
- */
-
- GEM_BUG_ON(nsibling >= ARRAY_SIZE(rq) - 1);
-
- if (igt_spinner_init(&spin, gt))
- return -ENOMEM;
-
- err = 0;
- rq[0] = ERR_PTR(-ENOMEM);
- for_each_engine(master, gt, id) {
- struct i915_sw_fence fence = {};
- struct intel_context *ce;
-
- if (master->class == class)
- continue;
-
- ce = intel_context_create(master);
- if (IS_ERR(ce)) {
- err = PTR_ERR(ce);
- goto out;
- }
-
- memset_p((void *)rq, ERR_PTR(-EINVAL), ARRAY_SIZE(rq));
-
- rq[0] = igt_spinner_create_request(&spin, ce, MI_NOOP);
- intel_context_put(ce);
- if (IS_ERR(rq[0])) {
- err = PTR_ERR(rq[0]);
- goto out;
- }
- i915_request_get(rq[0]);
-
- if (flags & BOND_SCHEDULE) {
- onstack_fence_init(&fence);
- err = i915_sw_fence_await_sw_fence_gfp(&rq[0]->submit,
- &fence,
- GFP_KERNEL);
- }
-
- i915_request_add(rq[0]);
- if (err < 0)
- goto out;
-
- if (!(flags & BOND_SCHEDULE) &&
- !igt_wait_for_spinner(&spin, rq[0])) {
- err = -EIO;
- goto out;
- }
-
- for (n = 0; n < nsibling; n++) {
- struct intel_context *ve;
-
- ve = intel_execlists_create_virtual(siblings, nsibling);
- if (IS_ERR(ve)) {
- err = PTR_ERR(ve);
- onstack_fence_fini(&fence);
- goto out;
- }
-
- err = intel_virtual_engine_attach_bond(ve->engine,
- master,
- siblings[n]);
- if (err) {
- intel_context_put(ve);
- onstack_fence_fini(&fence);
- goto out;
- }
-
- err = intel_context_pin(ve);
- intel_context_put(ve);
- if (err) {
- onstack_fence_fini(&fence);
- goto out;
- }
-
- rq[n + 1] = i915_request_create(ve);
- intel_context_unpin(ve);
- if (IS_ERR(rq[n + 1])) {
- err = PTR_ERR(rq[n + 1]);
- onstack_fence_fini(&fence);
- goto out;
- }
- i915_request_get(rq[n + 1]);
-
- err = i915_request_await_execution(rq[n + 1],
- &rq[0]->fence,
- ve->engine->bond_execute);
- i915_request_add(rq[n + 1]);
- if (err < 0) {
- onstack_fence_fini(&fence);
- goto out;
- }
- }
- onstack_fence_fini(&fence);
- intel_engine_flush_submission(master);
- igt_spinner_end(&spin);
-
- if (i915_request_wait(rq[0], 0, HZ / 10) < 0) {
- pr_err("Master request did not execute (on %s)!\n",
- rq[0]->engine->name);
- err = -EIO;
- goto out;
- }
-
- for (n = 0; n < nsibling; n++) {
- if (i915_request_wait(rq[n + 1], 0,
- MAX_SCHEDULE_TIMEOUT) < 0) {
- err = -EIO;
- goto out;
- }
-
- if (rq[n + 1]->engine != siblings[n]) {
- pr_err("Bonded request did not execute on target engine: expected %s, used %s; master was %s\n",
- siblings[n]->name,
- rq[n + 1]->engine->name,
- rq[0]->engine->name);
- err = -EINVAL;
- goto out;
- }
- }
-
- for (n = 0; !IS_ERR(rq[n]); n++)
- i915_request_put(rq[n]);
- rq[0] = ERR_PTR(-ENOMEM);
- }
-
-out:
- for (n = 0; !IS_ERR(rq[n]); n++)
- i915_request_put(rq[n]);
- if (igt_flush_test(gt->i915))
- err = -EIO;
-
- igt_spinner_fini(&spin);
- return err;
-}
-
-static int live_virtual_bond(void *arg)
-{
- static const struct phase {
- const char *name;
- unsigned int flags;
- } phases[] = {
- { "", 0 },
- { "schedule", BOND_SCHEDULE },
- { },
- };
- struct intel_gt *gt = arg;
- struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
- unsigned int class;
- int err;
-
- if (intel_uc_uses_guc_submission(&gt->uc))
- return 0;
-
- for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
- const struct phase *p;
- int nsibling;
-
- nsibling = select_siblings(gt, class, siblings);
- if (nsibling < 2)
- continue;
-
- for (p = phases; p->name; p++) {
- err = bond_virtual_engine(gt,
- class, siblings, nsibling,
- p->flags);
- if (err) {
- pr_err("%s(%s): failed class=%d, nsibling=%d, err=%d\n",
- __func__, p->name, class, nsibling, err);
- return err;
- }
- }
- }
-
- return 0;
-}
-
-static int reset_virtual_engine(struct intel_gt *gt,
- struct intel_engine_cs **siblings,
- unsigned int nsibling)
-{
- struct intel_engine_cs *engine;
- struct intel_context *ve;
- struct igt_spinner spin;
- struct i915_request *rq;
- unsigned int n;
- int err = 0;
-
- /*
- * In order to support offline error capture for fast preempt reset,
- * we need to decouple the guilty request and ensure that it and its
- * descendents are not executed while the capture is in progress.
- */
-
- if (igt_spinner_init(&spin, gt))
- return -ENOMEM;
-
- ve = intel_execlists_create_virtual(siblings, nsibling);
- if (IS_ERR(ve)) {
- err = PTR_ERR(ve);
- goto out_spin;
- }
-
- for (n = 0; n < nsibling; n++)
- st_engine_heartbeat_disable(siblings[n]);
-
- rq = igt_spinner_create_request(&spin, ve, MI_ARB_CHECK);
- if (IS_ERR(rq)) {
- err = PTR_ERR(rq);
- goto out_heartbeat;
- }
- i915_request_add(rq);
-
- if (!igt_wait_for_spinner(&spin, rq)) {
- intel_gt_set_wedged(gt);
- err = -ETIME;
- goto out_heartbeat;
- }
-
- engine = rq->engine;
- GEM_BUG_ON(engine == ve->engine);
-
- /* Take ownership of the reset and tasklet */
- if (test_and_set_bit(I915_RESET_ENGINE + engine->id,
- &gt->reset.flags)) {
- intel_gt_set_wedged(gt);
- err = -EBUSY;
- goto out_heartbeat;
- }
- tasklet_disable(&engine->execlists.tasklet);
-
- engine->execlists.tasklet.func(engine->execlists.tasklet.data);
- GEM_BUG_ON(execlists_active(&engine->execlists) != rq);
-
- /* Fake a preemption event; failed of course */
- spin_lock_irq(&engine->active.lock);
- __unwind_incomplete_requests(engine);
- spin_unlock_irq(&engine->active.lock);
- GEM_BUG_ON(rq->engine != ve->engine);
-
- /* Reset the engine while keeping our active request on hold */
- execlists_hold(engine, rq);
- GEM_BUG_ON(!i915_request_on_hold(rq));
-
- intel_engine_reset(engine, NULL);
- GEM_BUG_ON(rq->fence.error != -EIO);
-
- /* Release our grasp on the engine, letting CS flow again */
- tasklet_enable(&engine->execlists.tasklet);
- clear_and_wake_up_bit(I915_RESET_ENGINE + engine->id, &gt->reset.flags);
-
- /* Check that we do not resubmit the held request */
- i915_request_get(rq);
- if (!i915_request_wait(rq, 0, HZ / 5)) {
- pr_err("%s: on hold request completed!\n",
- engine->name);
- intel_gt_set_wedged(gt);
- err = -EIO;
- goto out_rq;
- }
- GEM_BUG_ON(!i915_request_on_hold(rq));
-
- /* But is resubmitted on release */
- execlists_unhold(engine, rq);
- if (i915_request_wait(rq, 0, HZ / 5) < 0) {
- pr_err("%s: held request did not complete!\n",
- engine->name);
- intel_gt_set_wedged(gt);
- err = -ETIME;
- }
-
-out_rq:
- i915_request_put(rq);
-out_heartbeat:
- for (n = 0; n < nsibling; n++)
- st_engine_heartbeat_enable(siblings[n]);
-
- intel_context_put(ve);
-out_spin:
- igt_spinner_fini(&spin);
- return err;
-}
-
-static int live_virtual_reset(void *arg)
-{
- struct intel_gt *gt = arg;
- struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
- unsigned int class;
-
- /*
- * Check that we handle a reset event within a virtual engine.
- * Only the physical engine is reset, but we have to check the flow
- * of the virtual requests around the reset, and make sure it is not
- * forgotten.
- */
-
- if (intel_uc_uses_guc_submission(&gt->uc))
- return 0;
-
- if (!intel_has_reset_engine(gt))
- return 0;
-
- for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
- int nsibling, err;
-
- nsibling = select_siblings(gt, class, siblings);
- if (nsibling < 2)
- continue;
-
- err = reset_virtual_engine(gt, siblings, nsibling);
- if (err)
- return err;
- }
-
- return 0;
-}
-
-int intel_execlists_live_selftests(struct drm_i915_private *i915)
-{
- static const struct i915_subtest tests[] = {
- SUBTEST(live_sanitycheck),
- SUBTEST(live_unlite_switch),
- SUBTEST(live_unlite_preempt),
- SUBTEST(live_unlite_ring),
- SUBTEST(live_pin_rewind),
- SUBTEST(live_hold_reset),
- SUBTEST(live_error_interrupt),
- SUBTEST(live_timeslice_preempt),
- SUBTEST(live_timeslice_rewind),
- SUBTEST(live_timeslice_queue),
- SUBTEST(live_timeslice_nopreempt),
- SUBTEST(live_busywait_preempt),
- SUBTEST(live_preempt),
- SUBTEST(live_late_preempt),
- SUBTEST(live_nopreempt),
- SUBTEST(live_preempt_cancel),
- SUBTEST(live_suppress_self_preempt),
- SUBTEST(live_chain_preempt),
- SUBTEST(live_preempt_ring),
- SUBTEST(live_preempt_gang),
- SUBTEST(live_preempt_timeout),
- SUBTEST(live_preempt_user),
- SUBTEST(live_preempt_smoke),
- SUBTEST(live_virtual_engine),
- SUBTEST(live_virtual_mask),
- SUBTEST(live_virtual_preserved),
- SUBTEST(live_virtual_slice),
- SUBTEST(live_virtual_bond),
- SUBTEST(live_virtual_reset),
- };
-
- if (!HAS_EXECLISTS(i915))
- return 0;
-
- if (intel_gt_is_wedged(&i915->gt))
- return 0;
-
- return intel_gt_live_subtests(tests, &i915->gt);
-}
-
static int emit_semaphore_signal(struct intel_context *ce, void *slot)
{
const u32 offset =
@@ -4775,9 +139,10 @@ static int live_lrc_layout(void *arg)
* match the layout saved by HW.
*/
- lrc = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ lrc = (u32 *)__get_free_page(GFP_KERNEL); /* requires page alignment */
if (!lrc)
return -ENOMEM;
+ GEM_BUG_ON(offset_in_page(lrc));
err = 0;
for_each_engine(engine, gt, id) {
@@ -4794,15 +159,12 @@ static int live_lrc_layout(void *arg)
}
hw += LRC_STATE_OFFSET / sizeof(*hw);
- execlists_init_reg_state(memset(lrc, POISON_INUSE, PAGE_SIZE),
- engine->kernel_context,
- engine,
- engine->kernel_context->ring,
- true);
+ __lrc_init_regs(memset(lrc, POISON_INUSE, PAGE_SIZE),
+ engine->kernel_context, engine, true);
dw = 0;
do {
- u32 lri = hw[dw];
+ u32 lri = READ_ONCE(hw[dw]);
if (lri == 0) {
dw++;
@@ -4835,9 +197,11 @@ static int live_lrc_layout(void *arg)
dw++;
while (lri) {
- if (hw[dw] != lrc[dw]) {
+ u32 offset = READ_ONCE(hw[dw]);
+
+ if (offset != lrc[dw]) {
pr_err("%s: Different registers found at dword %d, expected %x, found %x\n",
- engine->name, dw, hw[dw], lrc[dw]);
+ engine->name, dw, offset, lrc[dw]);
err = -EINVAL;
break;
}
@@ -4849,7 +213,7 @@ static int live_lrc_layout(void *arg)
dw += 2;
lri -= 2;
}
- } while ((lrc[dw] & ~BIT(0)) != MI_BATCH_BUFFER_END);
+ } while (!err && (lrc[dw] & ~BIT(0)) != MI_BATCH_BUFFER_END);
if (err) {
pr_info("%s: HW register image:\n", engine->name);
@@ -4864,7 +228,7 @@ static int live_lrc_layout(void *arg)
break;
}
- kfree(lrc);
+ free_page((unsigned long)lrc);
return err;
}
@@ -5249,6 +613,10 @@ static int __live_lrc_gpr(struct intel_engine_cs *engine,
err = emit_semaphore_signal(engine->kernel_context, slot);
if (err)
goto err_rq;
+
+ err = wait_for_submit(engine, rq, HZ / 2);
+ if (err)
+ goto err_rq;
} else {
slot[0] = 1;
wmb();
@@ -6242,16 +1610,17 @@ static void garbage_reset(struct intel_engine_cs *engine,
const unsigned int bit = I915_RESET_ENGINE + engine->id;
unsigned long *lock = &engine->gt->reset.flags;
- if (test_and_set_bit(bit, lock))
- return;
-
- tasklet_disable(&engine->execlists.tasklet);
+ local_bh_disable();
+ if (!test_and_set_bit(bit, lock)) {
+ tasklet_disable(&engine->execlists.tasklet);
- if (!rq->fence.error)
- intel_engine_reset(engine, NULL);
+ if (!rq->fence.error)
+ __intel_engine_reset_bh(engine, NULL);
- tasklet_enable(&engine->execlists.tasklet);
- clear_and_wake_up_bit(bit, lock);
+ tasklet_enable(&engine->execlists.tasklet);
+ clear_and_wake_up_bit(bit, lock);
+ }
+ local_bh_enable();
}
static struct i915_request *garbage(struct intel_context *ce,
diff --git a/drivers/gpu/drm/i915/gt/selftest_mocs.c b/drivers/gpu/drm/i915/gt/selftest_mocs.c
index b25eba50c88e..cf373c72359e 100644
--- a/drivers/gpu/drm/i915/gt/selftest_mocs.c
+++ b/drivers/gpu/drm/i915/gt/selftest_mocs.c
@@ -5,6 +5,7 @@
*/
#include "gt/intel_engine_pm.h"
+#include "gt/intel_gpu_commands.h"
#include "i915_selftest.h"
#include "gem/selftests/mock_context.h"
@@ -56,33 +57,6 @@ static int request_add_spin(struct i915_request *rq, struct igt_spinner *spin)
return err;
}
-static struct i915_vma *create_scratch(struct intel_gt *gt)
-{
- struct drm_i915_gem_object *obj;
- struct i915_vma *vma;
- int err;
-
- obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
- if (IS_ERR(obj))
- return ERR_CAST(obj);
-
- i915_gem_object_set_cache_coherency(obj, I915_CACHING_CACHED);
-
- vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL);
- if (IS_ERR(vma)) {
- i915_gem_object_put(obj);
- return vma;
- }
-
- err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
- if (err) {
- i915_gem_object_put(obj);
- return ERR_PTR(err);
- }
-
- return vma;
-}
-
static int live_mocs_init(struct live_mocs *arg, struct intel_gt *gt)
{
struct drm_i915_mocs_table table;
@@ -101,7 +75,7 @@ static int live_mocs_init(struct live_mocs *arg, struct intel_gt *gt)
if (flags & (HAS_GLOBAL_MOCS | HAS_ENGINE_MOCS))
arg->mocs = table;
- arg->scratch = create_scratch(gt);
+ arg->scratch = __vm_create_scratch_for_read(&gt->ggtt->vm, PAGE_SIZE);
if (IS_ERR(arg->scratch))
return PTR_ERR(arg->scratch);
@@ -125,7 +99,7 @@ static void live_mocs_fini(struct live_mocs *arg)
static int read_regs(struct i915_request *rq,
u32 addr, unsigned int count,
- uint32_t *offset)
+ u32 *offset)
{
unsigned int i;
u32 *cs;
@@ -153,7 +127,7 @@ static int read_regs(struct i915_request *rq,
static int read_mocs_table(struct i915_request *rq,
const struct drm_i915_mocs_table *table,
- uint32_t *offset)
+ u32 *offset)
{
u32 addr;
@@ -167,7 +141,7 @@ static int read_mocs_table(struct i915_request *rq,
static int read_l3cc_table(struct i915_request *rq,
const struct drm_i915_mocs_table *table,
- uint32_t *offset)
+ u32 *offset)
{
u32 addr = i915_mmio_reg_offset(GEN9_LNCFCMOCS(0));
@@ -176,7 +150,7 @@ static int read_l3cc_table(struct i915_request *rq,
static int check_mocs_table(struct intel_engine_cs *engine,
const struct drm_i915_mocs_table *table,
- uint32_t **vaddr)
+ u32 **vaddr)
{
unsigned int i;
u32 expect;
@@ -205,7 +179,7 @@ static bool mcr_range(struct drm_i915_private *i915, u32 offset)
static int check_l3cc_table(struct intel_engine_cs *engine,
const struct drm_i915_mocs_table *table,
- uint32_t **vaddr)
+ u32 **vaddr)
{
/* Can we read the MCR range 0xb00 directly? See intel_workarounds! */
u32 reg = i915_mmio_reg_offset(GEN9_LNCFCMOCS(0));
@@ -361,29 +335,34 @@ static int active_engine_reset(struct intel_context *ce,
static int __live_mocs_reset(struct live_mocs *mocs,
struct intel_context *ce)
{
+ struct intel_gt *gt = ce->engine->gt;
int err;
- err = intel_engine_reset(ce->engine, "mocs");
- if (err)
- return err;
+ if (intel_has_reset_engine(gt)) {
+ err = intel_engine_reset(ce->engine, "mocs");
+ if (err)
+ return err;
- err = check_mocs_engine(mocs, ce);
- if (err)
- return err;
+ err = check_mocs_engine(mocs, ce);
+ if (err)
+ return err;
- err = active_engine_reset(ce, "mocs");
- if (err)
- return err;
+ err = active_engine_reset(ce, "mocs");
+ if (err)
+ return err;
- err = check_mocs_engine(mocs, ce);
- if (err)
- return err;
+ err = check_mocs_engine(mocs, ce);
+ if (err)
+ return err;
+ }
- intel_gt_reset(ce->engine->gt, ce->engine->mask, "mocs");
+ if (intel_has_gpu_reset(gt)) {
+ intel_gt_reset(gt, ce->engine->mask, "mocs");
- err = check_mocs_engine(mocs, ce);
- if (err)
- return err;
+ err = check_mocs_engine(mocs, ce);
+ if (err)
+ return err;
+ }
return 0;
}
@@ -398,9 +377,6 @@ static int live_mocs_reset(void *arg)
/* Check the mocs setup is retained over per-engine and global resets */
- if (!intel_has_reset_engine(gt))
- return 0;
-
err = live_mocs_init(&mocs, gt);
if (err)
return err;
diff --git a/drivers/gpu/drm/i915/gt/selftest_rc6.c b/drivers/gpu/drm/i915/gt/selftest_rc6.c
index 64ef5ee5decf..61abc0556601 100644
--- a/drivers/gpu/drm/i915/gt/selftest_rc6.c
+++ b/drivers/gpu/drm/i915/gt/selftest_rc6.c
@@ -6,6 +6,7 @@
#include "intel_context.h"
#include "intel_engine_pm.h"
+#include "intel_gpu_commands.h"
#include "intel_gt_requests.h"
#include "intel_ring.h"
#include "selftest_rc6.h"
diff --git a/drivers/gpu/drm/i915/gt/selftest_reset.c b/drivers/gpu/drm/i915/gt/selftest_reset.c
index ef5aeebbeeb0..8784257ec808 100644
--- a/drivers/gpu/drm/i915/gt/selftest_reset.c
+++ b/drivers/gpu/drm/i915/gt/selftest_reset.c
@@ -9,6 +9,7 @@
#include "i915_memcpy.h"
#include "i915_selftest.h"
+#include "intel_gpu_commands.h"
#include "selftests/igt_reset.h"
#include "selftests/igt_atomic.h"
#include "selftests/igt_spinner.h"
@@ -95,10 +96,10 @@ __igt_reset_stolen(struct intel_gt *gt,
if (!__drm_mm_interval_first(&gt->i915->mm.stolen,
page << PAGE_SHIFT,
((page + 1) << PAGE_SHIFT) - 1))
- memset32(s, STACK_MAGIC, PAGE_SIZE / sizeof(u32));
+ memset_io(s, STACK_MAGIC, PAGE_SIZE);
- in = s;
- if (i915_memcpy_from_wc(tmp, s, PAGE_SIZE))
+ in = (void __force *)s;
+ if (i915_memcpy_from_wc(tmp, in, PAGE_SIZE))
in = tmp;
crc[page] = crc32_le(0, in, PAGE_SIZE);
@@ -133,8 +134,8 @@ __igt_reset_stolen(struct intel_gt *gt,
ggtt->error_capture.start,
PAGE_SIZE);
- in = s;
- if (i915_memcpy_from_wc(tmp, s, PAGE_SIZE))
+ in = (void __force *)s;
+ if (i915_memcpy_from_wc(tmp, in, PAGE_SIZE))
in = tmp;
x = crc32_le(0, in, PAGE_SIZE);
@@ -320,17 +321,25 @@ static int igt_atomic_engine_reset(void *arg)
goto out_unlock;
for_each_engine(engine, gt, id) {
- tasklet_disable(&engine->execlists.tasklet);
+ struct tasklet_struct *t = &engine->execlists.tasklet;
+
+ if (t->func)
+ tasklet_disable(t);
intel_engine_pm_get(engine);
for (p = igt_atomic_phases; p->name; p++) {
GEM_TRACE("intel_engine_reset(%s) under %s\n",
engine->name, p->name);
+ if (strcmp(p->name, "softirq"))
+ local_bh_disable();
p->critical_section_begin();
- err = intel_engine_reset(engine, NULL);
+ err = __intel_engine_reset_bh(engine, NULL);
p->critical_section_end();
+ if (strcmp(p->name, "softirq"))
+ local_bh_enable();
+
if (err) {
pr_err("intel_engine_reset(%s) failed under %s\n",
engine->name, p->name);
@@ -339,7 +348,10 @@ static int igt_atomic_engine_reset(void *arg)
}
intel_engine_pm_put(engine);
- tasklet_enable(&engine->execlists.tasklet);
+ if (t->func) {
+ tasklet_enable(t);
+ tasklet_hi_schedule(t);
+ }
if (err)
break;
}
diff --git a/drivers/gpu/drm/i915/gt/selftest_rps.c b/drivers/gpu/drm/i915/gt/selftest_rps.c
index aa5675ecb5cc..967641fee42a 100644
--- a/drivers/gpu/drm/i915/gt/selftest_rps.c
+++ b/drivers/gpu/drm/i915/gt/selftest_rps.c
@@ -185,7 +185,10 @@ static u8 rps_set_check(struct intel_rps *rps, u8 freq)
{
mutex_lock(&rps->lock);
GEM_BUG_ON(!intel_rps_is_active(rps));
- intel_rps_set(rps, freq);
+ if (wait_for(!intel_rps_set(rps, freq), 50)) {
+ mutex_unlock(&rps->lock);
+ return 0;
+ }
GEM_BUG_ON(rps->last_freq != freq);
mutex_unlock(&rps->lock);
diff --git a/drivers/gpu/drm/i915/gt/selftest_timeline.c b/drivers/gpu/drm/i915/gt/selftest_timeline.c
index 2edf2b15885f..6f3a3687ef0f 100644
--- a/drivers/gpu/drm/i915/gt/selftest_timeline.c
+++ b/drivers/gpu/drm/i915/gt/selftest_timeline.c
@@ -9,6 +9,7 @@
#include "intel_context.h"
#include "intel_engine_heartbeat.h"
#include "intel_engine_pm.h"
+#include "intel_gpu_commands.h"
#include "intel_gt.h"
#include "intel_gt_requests.h"
#include "intel_ring.h"
@@ -1090,12 +1091,6 @@ static int live_hwsp_read(void *arg)
}
count++;
- if (8 * watcher[1].rq->ring->emit >
- 3 * watcher[1].rq->ring->size) {
- i915_request_put(rq);
- break;
- }
-
/* Flush the timeline before manually wrapping again */
if (i915_request_wait(rq,
I915_WAIT_INTERRUPTIBLE,
@@ -1104,9 +1099,14 @@ static int live_hwsp_read(void *arg)
i915_request_put(rq);
goto out;
}
-
retire_requests(tl);
i915_request_put(rq);
+
+ /* Single requests are limited to half a ring at most */
+ if (8 * watcher[1].rq->ring->emit >
+ 3 * watcher[1].rq->ring->size)
+ break;
+
} while (!__igt_timeout(end_time, NULL));
WRITE_ONCE(*(u32 *)tl->hwsp_seqno, 0xdeadbeef);
diff --git a/drivers/gpu/drm/i915/gt/selftest_workarounds.c b/drivers/gpu/drm/i915/gt/selftest_workarounds.c
index 61a0532d0f3d..2070b91cb607 100644
--- a/drivers/gpu/drm/i915/gt/selftest_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/selftest_workarounds.c
@@ -95,8 +95,9 @@ reference_lists_fini(struct intel_gt *gt, struct wa_lists *lists)
}
static struct drm_i915_gem_object *
-read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
+read_nonprivs(struct intel_context *ce)
{
+ struct intel_engine_cs *engine = ce->engine;
const u32 base = engine->mmio_base;
struct drm_i915_gem_object *result;
struct i915_request *rq;
@@ -130,7 +131,7 @@ read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
if (err)
goto err_obj;
- rq = igt_request_alloc(ctx, engine);
+ rq = intel_context_create_request(ce);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto err_pin;
@@ -145,7 +146,7 @@ read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
goto err_req;
srm = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
- if (INTEL_GEN(ctx->i915) >= 8)
+ if (INTEL_GEN(engine->i915) >= 8)
srm++;
cs = intel_ring_begin(rq, 4 * RING_MAX_NONPRIV_SLOTS);
@@ -200,16 +201,16 @@ print_results(const struct intel_engine_cs *engine, const u32 *results)
}
}
-static int check_whitelist(struct i915_gem_context *ctx,
- struct intel_engine_cs *engine)
+static int check_whitelist(struct intel_context *ce)
{
+ struct intel_engine_cs *engine = ce->engine;
struct drm_i915_gem_object *results;
struct intel_wedge_me wedge;
u32 *vaddr;
int err;
int i;
- results = read_nonprivs(ctx, engine);
+ results = read_nonprivs(ce);
if (IS_ERR(results))
return PTR_ERR(results);
@@ -293,8 +294,7 @@ static int check_whitelist_across_reset(struct intel_engine_cs *engine,
int (*reset)(struct intel_engine_cs *),
const char *name)
{
- struct drm_i915_private *i915 = engine->i915;
- struct i915_gem_context *ctx, *tmp;
+ struct intel_context *ce, *tmp;
struct igt_spinner spin;
intel_wakeref_t wakeref;
int err;
@@ -302,15 +302,15 @@ static int check_whitelist_across_reset(struct intel_engine_cs *engine,
pr_info("Checking %d whitelisted registers on %s (RING_NONPRIV) [%s]\n",
engine->whitelist.count, engine->name, name);
- ctx = kernel_context(i915);
- if (IS_ERR(ctx))
- return PTR_ERR(ctx);
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce))
+ return PTR_ERR(ce);
err = igt_spinner_init(&spin, engine->gt);
if (err)
goto out_ctx;
- err = check_whitelist(ctx, engine);
+ err = check_whitelist(ce);
if (err) {
pr_err("Invalid whitelist *before* %s reset!\n", name);
goto out_spin;
@@ -330,22 +330,22 @@ static int check_whitelist_across_reset(struct intel_engine_cs *engine,
goto out_spin;
}
- err = check_whitelist(ctx, engine);
+ err = check_whitelist(ce);
if (err) {
pr_err("Whitelist not preserved in context across %s reset!\n",
name);
goto out_spin;
}
- tmp = kernel_context(i915);
+ tmp = intel_context_create(engine);
if (IS_ERR(tmp)) {
err = PTR_ERR(tmp);
goto out_spin;
}
- kernel_context_close(ctx);
- ctx = tmp;
+ intel_context_put(ce);
+ ce = tmp;
- err = check_whitelist(ctx, engine);
+ err = check_whitelist(ce);
if (err) {
pr_err("Invalid whitelist *after* %s reset in fresh context!\n",
name);
@@ -355,7 +355,7 @@ static int check_whitelist_across_reset(struct intel_engine_cs *engine,
out_spin:
igt_spinner_fini(&spin);
out_ctx:
- kernel_context_close(ctx);
+ intel_context_put(ce);
return err;
}
@@ -486,10 +486,11 @@ static int check_dirty_whitelist(struct intel_context *ce)
struct intel_engine_cs *engine = ce->engine;
struct i915_vma *scratch;
struct i915_vma *batch;
- int err = 0, i, v;
+ int err = 0, i, v, sz;
u32 *cs, *results;
- scratch = create_scratch(ce->vm, 2 * ARRAY_SIZE(values) + 1);
+ sz = (2 * ARRAY_SIZE(values) + 1) * sizeof(u32);
+ scratch = __vm_create_scratch_for_read(ce->vm, sz);
if (IS_ERR(scratch))
return PTR_ERR(scratch);
@@ -786,15 +787,15 @@ out:
return err;
}
-static int read_whitelisted_registers(struct i915_gem_context *ctx,
- struct intel_engine_cs *engine,
+static int read_whitelisted_registers(struct intel_context *ce,
struct i915_vma *results)
{
+ struct intel_engine_cs *engine = ce->engine;
struct i915_request *rq;
int i, err = 0;
u32 srm, *cs;
- rq = igt_request_alloc(ctx, engine);
+ rq = intel_context_create_request(ce);
if (IS_ERR(rq))
return PTR_ERR(rq);
@@ -807,7 +808,7 @@ static int read_whitelisted_registers(struct i915_gem_context *ctx,
goto err_req;
srm = MI_STORE_REGISTER_MEM;
- if (INTEL_GEN(ctx->i915) >= 8)
+ if (INTEL_GEN(engine->i915) >= 8)
srm++;
cs = intel_ring_begin(rq, 4 * engine->whitelist.count);
@@ -834,18 +835,15 @@ err_req:
return request_add_sync(rq, err);
}
-static int scrub_whitelisted_registers(struct i915_gem_context *ctx,
- struct intel_engine_cs *engine)
+static int scrub_whitelisted_registers(struct intel_context *ce)
{
- struct i915_address_space *vm;
+ struct intel_engine_cs *engine = ce->engine;
struct i915_request *rq;
struct i915_vma *batch;
int i, err = 0;
u32 *cs;
- vm = i915_gem_context_get_vm_rcu(ctx);
- batch = create_batch(vm);
- i915_vm_put(vm);
+ batch = create_batch(ce->vm);
if (IS_ERR(batch))
return PTR_ERR(batch);
@@ -873,7 +871,7 @@ static int scrub_whitelisted_registers(struct i915_gem_context *ctx,
i915_gem_object_flush_map(batch->obj);
intel_gt_chipset_flush(engine->gt);
- rq = igt_request_alloc(ctx, engine);
+ rq = intel_context_create_request(ce);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto err_unpin;
@@ -1016,7 +1014,6 @@ static int live_isolated_whitelist(void *arg)
{
struct intel_gt *gt = arg;
struct {
- struct i915_gem_context *ctx;
struct i915_vma *scratch[2];
} client[2] = {};
struct intel_engine_cs *engine;
@@ -1032,61 +1029,57 @@ static int live_isolated_whitelist(void *arg)
return 0;
for (i = 0; i < ARRAY_SIZE(client); i++) {
- struct i915_address_space *vm;
- struct i915_gem_context *c;
-
- c = kernel_context(gt->i915);
- if (IS_ERR(c)) {
- err = PTR_ERR(c);
- goto err;
- }
-
- vm = i915_gem_context_get_vm_rcu(c);
-
- client[i].scratch[0] = create_scratch(vm, 1024);
+ client[i].scratch[0] =
+ __vm_create_scratch_for_read(gt->vm, 4096);
if (IS_ERR(client[i].scratch[0])) {
err = PTR_ERR(client[i].scratch[0]);
- i915_vm_put(vm);
- kernel_context_close(c);
goto err;
}
- client[i].scratch[1] = create_scratch(vm, 1024);
+ client[i].scratch[1] =
+ __vm_create_scratch_for_read(gt->vm, 4096);
if (IS_ERR(client[i].scratch[1])) {
err = PTR_ERR(client[i].scratch[1]);
i915_vma_unpin_and_release(&client[i].scratch[0], 0);
- i915_vm_put(vm);
- kernel_context_close(c);
goto err;
}
-
- client[i].ctx = c;
- i915_vm_put(vm);
}
for_each_engine(engine, gt, id) {
+ struct intel_context *ce[2];
+
if (!engine->kernel_context->vm)
continue;
if (!whitelist_writable_count(engine))
continue;
+ ce[0] = intel_context_create(engine);
+ if (IS_ERR(ce[0])) {
+ err = PTR_ERR(ce[0]);
+ break;
+ }
+ ce[1] = intel_context_create(engine);
+ if (IS_ERR(ce[1])) {
+ err = PTR_ERR(ce[1]);
+ intel_context_put(ce[0]);
+ break;
+ }
+
/* Read default values */
- err = read_whitelisted_registers(client[0].ctx, engine,
- client[0].scratch[0]);
+ err = read_whitelisted_registers(ce[0], client[0].scratch[0]);
if (err)
- goto err;
+ goto err_ce;
/* Try to overwrite registers (should only affect ctx0) */
- err = scrub_whitelisted_registers(client[0].ctx, engine);
+ err = scrub_whitelisted_registers(ce[0]);
if (err)
- goto err;
+ goto err_ce;
/* Read values from ctx1, we expect these to be defaults */
- err = read_whitelisted_registers(client[1].ctx, engine,
- client[1].scratch[0]);
+ err = read_whitelisted_registers(ce[1], client[1].scratch[0]);
if (err)
- goto err;
+ goto err_ce;
/* Verify that both reads return the same default values */
err = check_whitelisted_registers(engine,
@@ -1094,31 +1087,29 @@ static int live_isolated_whitelist(void *arg)
client[1].scratch[0],
result_eq);
if (err)
- goto err;
+ goto err_ce;
/* Read back the updated values in ctx0 */
- err = read_whitelisted_registers(client[0].ctx, engine,
- client[0].scratch[1]);
+ err = read_whitelisted_registers(ce[0], client[0].scratch[1]);
if (err)
- goto err;
+ goto err_ce;
/* User should be granted privilege to overwhite regs */
err = check_whitelisted_registers(engine,
client[0].scratch[0],
client[0].scratch[1],
result_neq);
+err_ce:
+ intel_context_put(ce[1]);
+ intel_context_put(ce[0]);
if (err)
- goto err;
+ break;
}
err:
for (i = 0; i < ARRAY_SIZE(client); i++) {
- if (!client[i].ctx)
- break;
-
i915_vma_unpin_and_release(&client[i].scratch[1], 0);
i915_vma_unpin_and_release(&client[i].scratch[0], 0);
- kernel_context_close(client[i].ctx);
}
if (igt_flush_test(gt->i915))
@@ -1128,18 +1119,21 @@ err:
}
static bool
-verify_wa_lists(struct i915_gem_context *ctx, struct wa_lists *lists,
+verify_wa_lists(struct intel_gt *gt, struct wa_lists *lists,
const char *str)
{
- struct drm_i915_private *i915 = ctx->i915;
- struct i915_gem_engines_iter it;
- struct intel_context *ce;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
bool ok = true;
- ok &= wa_list_verify(&i915->uncore, &lists->gt_wa_list, str);
+ ok &= wa_list_verify(gt->uncore, &lists->gt_wa_list, str);
+
+ for_each_engine(engine, gt, id) {
+ struct intel_context *ce;
- for_each_gem_engine(ce, i915_gem_context_engines(ctx), it) {
- enum intel_engine_id id = ce->engine->id;
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce))
+ return false;
ok &= engine_wa_list_verify(ce,
&lists->engine[id].wa_list,
@@ -1148,6 +1142,8 @@ verify_wa_lists(struct i915_gem_context *ctx, struct wa_lists *lists,
ok &= engine_wa_list_verify(ce,
&lists->engine[id].ctx_wa_list,
str) == 0;
+
+ intel_context_put(ce);
}
return ok;
@@ -1157,7 +1153,6 @@ static int
live_gpu_reset_workarounds(void *arg)
{
struct intel_gt *gt = arg;
- struct i915_gem_context *ctx;
intel_wakeref_t wakeref;
struct wa_lists lists;
bool ok;
@@ -1165,12 +1160,6 @@ live_gpu_reset_workarounds(void *arg)
if (!intel_has_gpu_reset(gt))
return 0;
- ctx = kernel_context(gt->i915);
- if (IS_ERR(ctx))
- return PTR_ERR(ctx);
-
- i915_gem_context_lock_engines(ctx);
-
pr_info("Verifying after GPU reset...\n");
igt_global_reset_lock(gt);
@@ -1178,17 +1167,15 @@ live_gpu_reset_workarounds(void *arg)
reference_lists_init(gt, &lists);
- ok = verify_wa_lists(ctx, &lists, "before reset");
+ ok = verify_wa_lists(gt, &lists, "before reset");
if (!ok)
goto out;
intel_gt_reset(gt, ALL_ENGINES, "live_workarounds");
- ok = verify_wa_lists(ctx, &lists, "after reset");
+ ok = verify_wa_lists(gt, &lists, "after reset");
out:
- i915_gem_context_unlock_engines(ctx);
- kernel_context_close(ctx);
reference_lists_fini(gt, &lists);
intel_runtime_pm_put(gt->uncore->rpm, wakeref);
igt_global_reset_unlock(gt);
@@ -1200,8 +1187,8 @@ static int
live_engine_reset_workarounds(void *arg)
{
struct intel_gt *gt = arg;
- struct i915_gem_engines_iter it;
- struct i915_gem_context *ctx;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
struct intel_context *ce;
struct igt_spinner spin;
struct i915_request *rq;
@@ -1212,30 +1199,30 @@ live_engine_reset_workarounds(void *arg)
if (!intel_has_reset_engine(gt))
return 0;
- ctx = kernel_context(gt->i915);
- if (IS_ERR(ctx))
- return PTR_ERR(ctx);
-
igt_global_reset_lock(gt);
wakeref = intel_runtime_pm_get(gt->uncore->rpm);
reference_lists_init(gt, &lists);
- for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
- struct intel_engine_cs *engine = ce->engine;
+ for_each_engine(engine, gt, id) {
bool ok;
pr_info("Verifying after %s reset...\n", engine->name);
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce)) {
+ ret = PTR_ERR(ce);
+ break;
+ }
- ok = verify_wa_lists(ctx, &lists, "before reset");
+ ok = verify_wa_lists(gt, &lists, "before reset");
if (!ok) {
ret = -ESRCH;
goto err;
}
- intel_engine_reset(engine, "live_workarounds");
+ intel_engine_reset(engine, "live_workarounds:idle");
- ok = verify_wa_lists(ctx, &lists, "after idle reset");
+ ok = verify_wa_lists(gt, &lists, "after idle reset");
if (!ok) {
ret = -ESRCH;
goto err;
@@ -1259,23 +1246,26 @@ live_engine_reset_workarounds(void *arg)
goto err;
}
- intel_engine_reset(engine, "live_workarounds");
+ intel_engine_reset(engine, "live_workarounds:active");
igt_spinner_end(&spin);
igt_spinner_fini(&spin);
- ok = verify_wa_lists(ctx, &lists, "after busy reset");
+ ok = verify_wa_lists(gt, &lists, "after busy reset");
if (!ok) {
ret = -ESRCH;
goto err;
}
- }
+
err:
- i915_gem_context_unlock_engines(ctx);
+ intel_context_put(ce);
+ if (ret)
+ break;
+ }
+
reference_lists_fini(gt, &lists);
intel_runtime_pm_put(gt->uncore->rpm, wakeref);
igt_global_reset_unlock(gt);
- kernel_context_close(ctx);
igt_flush_test(gt->i915);
diff --git a/drivers/gpu/drm/i915/gt/shmem_utils.c b/drivers/gpu/drm/i915/gt/shmem_utils.c
index 5982b62f913d..a4d8fc9e2374 100644
--- a/drivers/gpu/drm/i915/gt/shmem_utils.c
+++ b/drivers/gpu/drm/i915/gt/shmem_utils.c
@@ -33,7 +33,7 @@ struct file *shmem_create_from_object(struct drm_i915_gem_object *obj)
struct file *file;
void *ptr;
- if (obj->ops == &i915_gem_shmem_ops) {
+ if (i915_gem_object_is_shmem(obj)) {
file = obj->base.filp;
atomic_long_inc(&file->f_count);
return file;
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
index 2a343a977987..4545e90e3bf1 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
@@ -579,20 +579,8 @@ int intel_guc_reset_engine(struct intel_guc *guc,
*/
int intel_guc_resume(struct intel_guc *guc)
{
- u32 action[] = {
- INTEL_GUC_ACTION_EXIT_S_STATE,
- GUC_POWER_D0,
- };
-
- /*
- * If GuC communication is enabled but submission is not supported,
- * we do not need to resume the GuC but we do need to enable the
- * GuC communication on resume (above).
- */
- if (!intel_guc_submission_is_used(guc) || !intel_guc_is_ready(guc))
- return 0;
-
- return intel_guc_send(guc, action, ARRAY_SIZE(action));
+ /* XXX: to be implemented with submission interface rework */
+ return 0;
}
/**
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
index e84ab67b317d..bc2ba7d0626c 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
@@ -47,13 +47,6 @@ struct intel_guc {
struct i915_vma *stage_desc_pool;
void *stage_desc_pool_vaddr;
- struct i915_vma *workqueue;
- void *workqueue_vaddr;
- spinlock_t wq_lock;
-
- struct i915_vma *proc_desc;
- void *proc_desc_vaddr;
-
/* Control params for fw initialization */
u32 params[GUC_CTL_MAX_DWORDS];
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
index 5212ff844292..17526717368c 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
@@ -4,6 +4,7 @@
*/
#include "gt/intel_gt.h"
+#include "gt/intel_lrc.h"
#include "intel_guc_ads.h"
#include "intel_uc.h"
#include "i915_drv.h"
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c
index f9d0907ea1a5..2270d6b3b272 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c
@@ -76,7 +76,6 @@ static inline bool guc_ready(struct intel_uncore *uncore, u32 *status)
static int guc_wait_ucode(struct intel_uncore *uncore)
{
- struct drm_device *drm = &uncore->i915->drm;
u32 status;
int ret;
@@ -89,11 +88,11 @@ static int guc_wait_ucode(struct intel_uncore *uncore)
* attempt the ucode load again if this happens.)
*/
ret = wait_for(guc_ready(uncore, &status), 100);
- DRM_DEBUG_DRIVER("GuC status %#x\n", status);
-
if (ret) {
- drm_err(drm, "GuC load failed: status = 0x%08X\n", status);
- drm_err(drm, "GuC load failed: status: Reset = %d, "
+ struct drm_device *drm = &uncore->i915->drm;
+
+ drm_dbg(drm, "GuC load failed: status = 0x%08X\n", status);
+ drm_dbg(drm, "GuC load failed: status: Reset = %d, "
"BootROM = 0x%02X, UKernel = 0x%02X, "
"MIA = 0x%02X, Auth = 0x%02X\n",
REG_FIELD_GET(GS_MIA_IN_RESET, status),
@@ -103,12 +102,12 @@ static int guc_wait_ucode(struct intel_uncore *uncore)
REG_FIELD_GET(GS_AUTH_STATUS_MASK, status));
if ((status & GS_BOOTROM_MASK) == GS_BOOTROM_RSA_FAILED) {
- drm_err(drm, "GuC firmware signature verification failed\n");
+ drm_dbg(drm, "GuC firmware signature verification failed\n");
ret = -ENOEXEC;
}
if ((status & GS_UKERNEL_MASK) == GS_UKERNEL_EXCEPTION) {
- drm_err(drm, "GuC firmware exception. EIP: %#x\n",
+ drm_dbg(drm, "GuC firmware exception. EIP: %#x\n",
intel_uncore_read(uncore, SOFT_SCRATCH(13)));
ret = -ENXIO;
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index fdfeb4b9b0f5..23dc0aeaa0ab 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -6,11 +6,14 @@
#include <linux/circ_buf.h>
#include "gem/i915_gem_context.h"
+#include "gt/gen8_engine_cs.h"
+#include "gt/intel_breadcrumbs.h"
#include "gt/intel_context.h"
#include "gt/intel_engine_pm.h"
#include "gt/intel_gt.h"
#include "gt/intel_gt_pm.h"
-#include "gt/intel_lrc_reg.h"
+#include "gt/intel_lrc.h"
+#include "gt/intel_mocs.h"
#include "gt/intel_ring.h"
#include "intel_guc_submission.h"
@@ -54,6 +57,8 @@
*
*/
+#define GUC_REQUEST_SIZE 64 /* bytes */
+
static inline struct i915_priolist *to_priolist(struct rb_node *rb)
{
return rb_entry(rb, struct i915_priolist, node);
@@ -66,58 +71,6 @@ static struct guc_stage_desc *__get_stage_desc(struct intel_guc *guc, u32 id)
return &base[id];
}
-static int guc_workqueue_create(struct intel_guc *guc)
-{
- return intel_guc_allocate_and_map_vma(guc, GUC_WQ_SIZE, &guc->workqueue,
- &guc->workqueue_vaddr);
-}
-
-static void guc_workqueue_destroy(struct intel_guc *guc)
-{
- i915_vma_unpin_and_release(&guc->workqueue, I915_VMA_RELEASE_MAP);
-}
-
-/*
- * Initialise the process descriptor shared with the GuC firmware.
- */
-static int guc_proc_desc_create(struct intel_guc *guc)
-{
- const u32 size = PAGE_ALIGN(sizeof(struct guc_process_desc));
-
- return intel_guc_allocate_and_map_vma(guc, size, &guc->proc_desc,
- &guc->proc_desc_vaddr);
-}
-
-static void guc_proc_desc_destroy(struct intel_guc *guc)
-{
- i915_vma_unpin_and_release(&guc->proc_desc, I915_VMA_RELEASE_MAP);
-}
-
-static void guc_proc_desc_init(struct intel_guc *guc)
-{
- struct guc_process_desc *desc;
-
- desc = memset(guc->proc_desc_vaddr, 0, sizeof(*desc));
-
- /*
- * XXX: pDoorbell and WQVBaseAddress are pointers in process address
- * space for ring3 clients (set them as in mmap_ioctl) or kernel
- * space for kernel clients (map on demand instead? May make debug
- * easier to have it mapped).
- */
- desc->wq_base_addr = 0;
- desc->db_base_addr = 0;
-
- desc->wq_size_bytes = GUC_WQ_SIZE;
- desc->wq_status = WQ_STATUS_ACTIVE;
- desc->priority = GUC_CLIENT_PRIORITY_KMD_NORMAL;
-}
-
-static void guc_proc_desc_fini(struct intel_guc *guc)
-{
- memset(guc->proc_desc_vaddr, 0, sizeof(struct guc_process_desc));
-}
-
static int guc_stage_desc_pool_create(struct intel_guc *guc)
{
u32 size = PAGE_ALIGN(sizeof(struct guc_stage_desc) *
@@ -153,8 +106,6 @@ static void guc_stage_desc_init(struct intel_guc *guc)
desc->stage_id = 0;
desc->priority = GUC_CLIENT_PRIORITY_KMD_NORMAL;
- desc->process_desc = intel_guc_ggtt_offset(guc, guc->proc_desc);
- desc->wq_addr = intel_guc_ggtt_offset(guc, guc->workqueue);
desc->wq_size = GUC_WQ_SIZE;
}
@@ -166,62 +117,9 @@ static void guc_stage_desc_fini(struct intel_guc *guc)
memset(desc, 0, sizeof(*desc));
}
-/* Construct a Work Item and append it to the GuC's Work Queue */
-static void guc_wq_item_append(struct intel_guc *guc,
- u32 target_engine, u32 context_desc,
- u32 ring_tail, u32 fence_id)
-{
- /* wqi_len is in DWords, and does not include the one-word header */
- const size_t wqi_size = sizeof(struct guc_wq_item);
- const u32 wqi_len = wqi_size / sizeof(u32) - 1;
- struct guc_process_desc *desc = guc->proc_desc_vaddr;
- struct guc_wq_item *wqi;
- u32 wq_off;
-
- lockdep_assert_held(&guc->wq_lock);
-
- /* For now workqueue item is 4 DWs; workqueue buffer is 2 pages. So we
- * should not have the case where structure wqi is across page, neither
- * wrapped to the beginning. This simplifies the implementation below.
- *
- * XXX: if not the case, we need save data to a temp wqi and copy it to
- * workqueue buffer dw by dw.
- */
- BUILD_BUG_ON(wqi_size != 16);
-
- /* We expect the WQ to be active if we're appending items to it */
- GEM_BUG_ON(desc->wq_status != WQ_STATUS_ACTIVE);
-
- /* Free space is guaranteed. */
- wq_off = READ_ONCE(desc->tail);
- GEM_BUG_ON(CIRC_SPACE(wq_off, READ_ONCE(desc->head),
- GUC_WQ_SIZE) < wqi_size);
- GEM_BUG_ON(wq_off & (wqi_size - 1));
-
- wqi = guc->workqueue_vaddr + wq_off;
-
- /* Now fill in the 4-word work queue item */
- wqi->header = WQ_TYPE_INORDER |
- (wqi_len << WQ_LEN_SHIFT) |
- (target_engine << WQ_TARGET_SHIFT) |
- WQ_NO_WCFLUSH_WAIT;
- wqi->context_desc = context_desc;
- wqi->submit_element_info = ring_tail << WQ_RING_TAIL_SHIFT;
- GEM_BUG_ON(ring_tail > WQ_RING_TAIL_MAX);
- wqi->fence_id = fence_id;
-
- /* Make the update visible to GuC */
- WRITE_ONCE(desc->tail, (wq_off + wqi_size) & (GUC_WQ_SIZE - 1));
-}
-
static void guc_add_request(struct intel_guc *guc, struct i915_request *rq)
{
- struct intel_engine_cs *engine = rq->engine;
- u32 ctx_desc = rq->context->lrc.ccid;
- u32 ring_tail = intel_ring_set_tail(rq->ring, rq->tail) / sizeof(u64);
-
- guc_wq_item_append(guc, engine->guc_id, ctx_desc,
- ring_tail, rq->fence.seqno);
+ /* Leaving stub as this function will be used in future patches */
}
/*
@@ -244,16 +142,12 @@ static void guc_submit(struct intel_engine_cs *engine,
{
struct intel_guc *guc = &engine->gt->uc.guc;
- spin_lock(&guc->wq_lock);
-
do {
struct i915_request *rq = *out++;
flush_ggtt_writes(rq->ring->vma);
guc_add_request(guc, rq);
} while (out != end);
-
- spin_unlock(&guc->wq_lock);
}
static inline int rq_prio(const struct i915_request *rq)
@@ -388,17 +282,26 @@ static void guc_reset_prepare(struct intel_engine_cs *engine)
__tasklet_disable_sync_once(&execlists->tasklet);
}
-static void
-cancel_port_requests(struct intel_engine_execlists * const execlists)
+static void guc_reset_state(struct intel_context *ce,
+ struct intel_engine_cs *engine,
+ u32 head,
+ bool scrub)
{
- struct i915_request * const *port, *rq;
+ GEM_BUG_ON(!intel_context_is_pinned(ce));
- /* Note we are only using the inflight and not the pending queue */
+ /*
+ * We want a simple context + ring to execute the breadcrumb update.
+ * We cannot rely on the context being intact across the GPU hang,
+ * so clear it and rebuild just what we need for the breadcrumb.
+ * All pending requests for this context will be zapped, and any
+ * future request will be after userspace has had the opportunity
+ * to recreate its own state.
+ */
+ if (scrub)
+ lrc_init_regs(ce, engine, true);
- for (port = execlists->active; (rq = *port); port++)
- schedule_out(rq);
- execlists->active =
- memset(execlists->inflight, 0, sizeof(execlists->inflight));
+ /* Rerun the request; its payload has been neutered (if guilty). */
+ lrc_update_regs(ce, engine, head);
}
static void guc_reset_rewind(struct intel_engine_cs *engine, bool stalled)
@@ -409,8 +312,6 @@ static void guc_reset_rewind(struct intel_engine_cs *engine, bool stalled)
spin_lock_irqsave(&engine->active.lock, flags);
- cancel_port_requests(execlists);
-
/* Push back any incomplete requests for replay after the reset. */
rq = execlists_unwind_incomplete_requests(execlists);
if (!rq)
@@ -420,7 +321,7 @@ static void guc_reset_rewind(struct intel_engine_cs *engine, bool stalled)
stalled = false;
__i915_request_reset(rq, stalled);
- intel_lr_context_reset(engine, rq->context, rq->head, stalled);
+ guc_reset_state(rq->context, engine, rq->head, stalled);
out_unlock:
spin_unlock_irqrestore(&engine->active.lock, flags);
@@ -451,9 +352,6 @@ static void guc_reset_cancel(struct intel_engine_cs *engine)
*/
spin_lock_irqsave(&engine->active.lock, flags);
- /* Cancel the requests on the HW and clear the ELSP tracker. */
- cancel_port_requests(execlists);
-
/* Mark all executing requests as skipped. */
list_for_each_entry(rq, &engine->active.requests, sched.link) {
i915_request_set_error_once(rq, -EIO);
@@ -497,12 +395,6 @@ static void guc_reset_finish(struct intel_engine_cs *engine)
}
/*
- * Everything below here is concerned with setup & teardown, and is
- * therefore not part of the somewhat time-critical batch-submission
- * path of guc_submit() above.
- */
-
-/*
* Set up the memory resources to be shared with the GuC (via the GGTT)
* at firmware loading time.
*/
@@ -522,30 +414,12 @@ int intel_guc_submission_init(struct intel_guc *guc)
*/
GEM_BUG_ON(!guc->stage_desc_pool);
- ret = guc_workqueue_create(guc);
- if (ret)
- goto err_pool;
-
- ret = guc_proc_desc_create(guc);
- if (ret)
- goto err_workqueue;
-
- spin_lock_init(&guc->wq_lock);
-
return 0;
-
-err_workqueue:
- guc_workqueue_destroy(guc);
-err_pool:
- guc_stage_desc_pool_destroy(guc);
- return ret;
}
void intel_guc_submission_fini(struct intel_guc *guc)
{
if (guc->stage_desc_pool) {
- guc_proc_desc_destroy(guc);
- guc_workqueue_destroy(guc);
guc_stage_desc_pool_destroy(guc);
}
}
@@ -576,33 +450,186 @@ static void guc_interrupts_release(struct intel_gt *gt)
intel_uncore_rmw(uncore, GEN11_VCS_VECS_INTR_ENABLE, 0, dmask);
}
-static void guc_set_default_submission(struct intel_engine_cs *engine)
+static int guc_context_alloc(struct intel_context *ce)
+{
+ return lrc_alloc(ce, ce->engine);
+}
+
+static int guc_context_pre_pin(struct intel_context *ce,
+ struct i915_gem_ww_ctx *ww,
+ void **vaddr)
+{
+ return lrc_pre_pin(ce, ce->engine, ww, vaddr);
+}
+
+static int guc_context_pin(struct intel_context *ce, void *vaddr)
{
+ return lrc_pin(ce, ce->engine, vaddr);
+}
+
+static const struct intel_context_ops guc_context_ops = {
+ .alloc = guc_context_alloc,
+
+ .pre_pin = guc_context_pre_pin,
+ .pin = guc_context_pin,
+ .unpin = lrc_unpin,
+ .post_unpin = lrc_post_unpin,
+
+ .enter = intel_context_enter_engine,
+ .exit = intel_context_exit_engine,
+
+ .reset = lrc_reset,
+ .destroy = lrc_destroy,
+};
+
+static int guc_request_alloc(struct i915_request *request)
+{
+ int ret;
+
+ GEM_BUG_ON(!intel_context_is_pinned(request->context));
+
/*
- * We inherit a bunch of functions from execlists that we'd like
- * to keep using:
- *
- * engine->submit_request = execlists_submit_request;
- * engine->cancel_requests = execlists_cancel_requests;
- * engine->schedule = execlists_schedule;
+ * Flush enough space to reduce the likelihood of waiting after
+ * we start building the request - in which case we will just
+ * have to repeat work.
+ */
+ request->reserved_space += GUC_REQUEST_SIZE;
+
+ /*
+ * Note that after this point, we have committed to using
+ * this request as it is being used to both track the
+ * state of engine initialisation and liveness of the
+ * golden renderstate above. Think twice before you try
+ * to cancel/unwind this request now.
+ */
+
+ /* Unconditionally invalidate GPU caches and TLBs. */
+ ret = request->engine->emit_flush(request, EMIT_INVALIDATE);
+ if (ret)
+ return ret;
+
+ request->reserved_space -= GUC_REQUEST_SIZE;
+ return 0;
+}
+
+static inline void queue_request(struct intel_engine_cs *engine,
+ struct i915_request *rq,
+ int prio)
+{
+ GEM_BUG_ON(!list_empty(&rq->sched.link));
+ list_add_tail(&rq->sched.link,
+ i915_sched_lookup_priolist(engine, prio));
+ set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
+}
+
+static void guc_submit_request(struct i915_request *rq)
+{
+ struct intel_engine_cs *engine = rq->engine;
+ unsigned long flags;
+
+ /* Will be called from irq-context when using foreign fences. */
+ spin_lock_irqsave(&engine->active.lock, flags);
+
+ queue_request(engine, rq, rq_prio(rq));
+
+ GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root));
+ GEM_BUG_ON(list_empty(&rq->sched.link));
+
+ tasklet_hi_schedule(&engine->execlists.tasklet);
+
+ spin_unlock_irqrestore(&engine->active.lock, flags);
+}
+
+static void sanitize_hwsp(struct intel_engine_cs *engine)
+{
+ struct intel_timeline *tl;
+
+ list_for_each_entry(tl, &engine->status_page.timelines, engine_link)
+ intel_timeline_reset_seqno(tl);
+}
+
+static void guc_sanitize(struct intel_engine_cs *engine)
+{
+ /*
+ * Poison residual state on resume, in case the suspend didn't!
*
- * But we need to override the actual submission backend in order
- * to talk to the GuC.
+ * We have to assume that across suspend/resume (or other loss
+ * of control) that the contents of our pinned buffers has been
+ * lost, replaced by garbage. Since this doesn't always happen,
+ * let's poison such state so that we more quickly spot when
+ * we falsely assume it has been preserved.
*/
- intel_execlists_set_default_submission(engine);
+ if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
+ memset(engine->status_page.addr, POISON_INUSE, PAGE_SIZE);
- engine->execlists.tasklet.func = guc_submission_tasklet;
+ /*
+ * The kernel_context HWSP is stored in the status_page. As above,
+ * that may be lost on resume/initialisation, and so we need to
+ * reset the value in the HWSP.
+ */
+ sanitize_hwsp(engine);
- /* do not use execlists park/unpark */
- engine->park = engine->unpark = NULL;
+ /* And scrub the dirty cachelines for the HWSP */
+ clflush_cache_range(engine->status_page.addr, PAGE_SIZE);
+}
+
+static void setup_hwsp(struct intel_engine_cs *engine)
+{
+ intel_engine_set_hwsp_writemask(engine, ~0u); /* HWSTAM */
+
+ ENGINE_WRITE_FW(engine,
+ RING_HWS_PGA,
+ i915_ggtt_offset(engine->status_page.vma));
+}
+
+static void start_engine(struct intel_engine_cs *engine)
+{
+ ENGINE_WRITE_FW(engine,
+ RING_MODE_GEN7,
+ _MASKED_BIT_ENABLE(GEN11_GFX_DISABLE_LEGACY_MODE));
+
+ ENGINE_WRITE_FW(engine, RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING));
+ ENGINE_POSTING_READ(engine, RING_MI_MODE);
+}
+
+static int guc_resume(struct intel_engine_cs *engine)
+{
+ assert_forcewakes_active(engine->uncore, FORCEWAKE_ALL);
+
+ intel_mocs_init_engine(engine);
+
+ intel_breadcrumbs_reset(engine->breadcrumbs);
+
+ setup_hwsp(engine);
+ start_engine(engine);
+
+ return 0;
+}
+
+static void guc_set_default_submission(struct intel_engine_cs *engine)
+{
+ engine->submit_request = guc_submit_request;
+ engine->schedule = i915_schedule;
+ engine->execlists.tasklet.func = guc_submission_tasklet;
engine->reset.prepare = guc_reset_prepare;
engine->reset.rewind = guc_reset_rewind;
engine->reset.cancel = guc_reset_cancel;
engine->reset.finish = guc_reset_finish;
- engine->flags &= ~I915_ENGINE_SUPPORTS_STATS;
engine->flags |= I915_ENGINE_NEEDS_BREADCRUMB_TASKLET;
+ engine->flags |= I915_ENGINE_HAS_PREEMPTION;
+
+ /*
+ * TODO: GuC supports timeslicing and semaphores as well, but they're
+ * handled by the firmware so some minor tweaks are required before
+ * enabling.
+ *
+ * engine->flags |= I915_ENGINE_HAS_TIMESLICES;
+ * engine->flags |= I915_ENGINE_HAS_SEMAPHORES;
+ */
+
+ engine->emit_bb_start = gen8_emit_bb_start;
/*
* For the breadcrumb irq to work we need the interrupts to stay
@@ -613,35 +640,92 @@ static void guc_set_default_submission(struct intel_engine_cs *engine)
GEM_BUG_ON(engine->irq_enable || engine->irq_disable);
}
-void intel_guc_submission_enable(struct intel_guc *guc)
+static void guc_release(struct intel_engine_cs *engine)
{
- struct intel_gt *gt = guc_to_gt(guc);
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
+ engine->sanitize = NULL; /* no longer in control, nothing to sanitize */
+
+ tasklet_kill(&engine->execlists.tasklet);
+
+ intel_engine_cleanup_common(engine);
+ lrc_fini_wa_ctx(engine);
+}
+
+static void guc_default_vfuncs(struct intel_engine_cs *engine)
+{
+ /* Default vfuncs which can be overridden by each engine. */
+
+ engine->resume = guc_resume;
+
+ engine->cops = &guc_context_ops;
+ engine->request_alloc = guc_request_alloc;
+
+ engine->emit_flush = gen8_emit_flush_xcs;
+ engine->emit_init_breadcrumb = gen8_emit_init_breadcrumb;
+ engine->emit_fini_breadcrumb = gen8_emit_fini_breadcrumb_xcs;
+ if (INTEL_GEN(engine->i915) >= 12) {
+ engine->emit_fini_breadcrumb = gen12_emit_fini_breadcrumb_xcs;
+ engine->emit_flush = gen12_emit_flush_xcs;
+ }
+ engine->set_default_submission = guc_set_default_submission;
+}
+
+static void rcs_submission_override(struct intel_engine_cs *engine)
+{
+ switch (INTEL_GEN(engine->i915)) {
+ case 12:
+ engine->emit_flush = gen12_emit_flush_rcs;
+ engine->emit_fini_breadcrumb = gen12_emit_fini_breadcrumb_rcs;
+ break;
+ case 11:
+ engine->emit_flush = gen11_emit_flush_rcs;
+ engine->emit_fini_breadcrumb = gen11_emit_fini_breadcrumb_rcs;
+ break;
+ default:
+ engine->emit_flush = gen8_emit_flush_rcs;
+ engine->emit_fini_breadcrumb = gen8_emit_fini_breadcrumb_rcs;
+ break;
+ }
+}
+
+static inline void guc_default_irqs(struct intel_engine_cs *engine)
+{
+ engine->irq_keep_mask = GT_RENDER_USER_INTERRUPT;
+}
+
+int intel_guc_submission_setup(struct intel_engine_cs *engine)
+{
+ struct drm_i915_private *i915 = engine->i915;
/*
- * We're using GuC work items for submitting work through GuC. Since
- * we're coalescing multiple requests from a single context into a
- * single work item prior to assigning it to execlist_port, we can
- * never have more work items than the total number of ports (for all
- * engines). The GuC firmware is controlling the HEAD of work queue,
- * and it is guaranteed that it will remove the work item from the
- * queue before our request is completed.
+ * The setup relies on several assumptions (e.g. irqs always enabled)
+ * that are only valid on gen11+
*/
- BUILD_BUG_ON(ARRAY_SIZE(engine->execlists.inflight) *
- sizeof(struct guc_wq_item) *
- I915_NUM_ENGINES > GUC_WQ_SIZE);
+ GEM_BUG_ON(INTEL_GEN(i915) < 11);
+
+ tasklet_init(&engine->execlists.tasklet,
+ guc_submission_tasklet, (unsigned long)engine);
+
+ guc_default_vfuncs(engine);
+ guc_default_irqs(engine);
- guc_proc_desc_init(guc);
+ if (engine->class == RENDER_CLASS)
+ rcs_submission_override(engine);
+
+ lrc_init_wa_ctx(engine);
+
+ /* Finally, take ownership and responsibility for cleanup! */
+ engine->sanitize = guc_sanitize;
+ engine->release = guc_release;
+
+ return 0;
+}
+
+void intel_guc_submission_enable(struct intel_guc *guc)
+{
guc_stage_desc_init(guc);
/* Take over from manual control of ELSP (execlists) */
- guc_interrupts_capture(gt);
-
- for_each_engine(engine, gt, id) {
- engine->set_default_submission = guc_set_default_submission;
- engine->set_default_submission(engine);
- }
+ guc_interrupts_capture(guc_to_gt(guc));
}
void intel_guc_submission_disable(struct intel_guc *guc)
@@ -655,7 +739,6 @@ void intel_guc_submission_disable(struct intel_guc *guc)
guc_interrupts_release(gt);
guc_stage_desc_fini(guc);
- guc_proc_desc_fini(guc);
}
static bool __guc_submission_selected(struct intel_guc *guc)
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h
index 4cf9d3e50263..5f7b9e6347d0 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h
@@ -19,6 +19,7 @@ void intel_guc_submission_disable(struct intel_guc *guc);
void intel_guc_submission_fini(struct intel_guc *guc);
int intel_guc_preempt_work_create(struct intel_guc *guc);
void intel_guc_preempt_work_destroy(struct intel_guc *guc);
+int intel_guc_submission_setup(struct intel_engine_cs *engine);
bool intel_engine_in_guc_submission_mode(const struct intel_engine_cs *engine);
static inline bool intel_guc_submission_is_supported(struct intel_guc *guc)
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
index 4e6070e95fe9..6abb8f2dc33d 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
@@ -15,6 +15,29 @@
static const struct intel_uc_ops uc_ops_off;
static const struct intel_uc_ops uc_ops_on;
+static void uc_expand_default_options(struct intel_uc *uc)
+{
+ struct drm_i915_private *i915 = uc_to_gt(uc)->i915;
+
+ if (i915->params.enable_guc != -1)
+ return;
+
+ /* Don't enable GuC/HuC on pre-Gen12 */
+ if (INTEL_GEN(i915) < 12) {
+ i915->params.enable_guc = 0;
+ return;
+ }
+
+ /* Don't enable GuC/HuC on older Gen12 platforms */
+ if (IS_TIGERLAKE(i915) || IS_ROCKETLAKE(i915)) {
+ i915->params.enable_guc = 0;
+ return;
+ }
+
+ /* Default: enable HuC authentication only */
+ i915->params.enable_guc = ENABLE_GUC_LOAD_HUC;
+}
+
/* Reset GuC providing us with fresh state for both GuC and HuC.
*/
static int __intel_uc_reset_hw(struct intel_uc *uc)
@@ -52,9 +75,6 @@ static void __confirm_options(struct intel_uc *uc)
yesno(intel_uc_wants_guc_submission(uc)),
yesno(intel_uc_wants_huc(uc)));
- if (i915->params.enable_guc == -1)
- return;
-
if (i915->params.enable_guc == 0) {
GEM_BUG_ON(intel_uc_wants_guc(uc));
GEM_BUG_ON(intel_uc_wants_guc_submission(uc));
@@ -79,8 +99,7 @@ static void __confirm_options(struct intel_uc *uc)
"Incompatible option enable_guc=%d - %s\n",
i915->params.enable_guc, "GuC submission is N/A");
- if (i915->params.enable_guc & ~(ENABLE_GUC_SUBMISSION |
- ENABLE_GUC_LOAD_HUC))
+ if (i915->params.enable_guc & ~ENABLE_GUC_MASK)
drm_info(&i915->drm,
"Incompatible option enable_guc=%d - %s\n",
i915->params.enable_guc, "undocumented flag");
@@ -88,6 +107,8 @@ static void __confirm_options(struct intel_uc *uc)
void intel_uc_init_early(struct intel_uc *uc)
{
+ uc_expand_default_options(uc);
+
intel_guc_init_early(&uc->guc);
intel_huc_init_early(&uc->huc);
@@ -175,19 +196,15 @@ static void guc_get_mmio_msg(struct intel_guc *guc)
static void guc_handle_mmio_msg(struct intel_guc *guc)
{
- struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
-
/* we need communication to be enabled to reply to GuC */
GEM_BUG_ON(!guc_communication_enabled(guc));
- if (!guc->mmio_msg)
- return;
-
- spin_lock_irq(&i915->irq_lock);
- intel_guc_to_host_process_recv_msg(guc, &guc->mmio_msg, 1);
- spin_unlock_irq(&i915->irq_lock);
-
- guc->mmio_msg = 0;
+ spin_lock_irq(&guc->irq_lock);
+ if (guc->mmio_msg) {
+ intel_guc_to_host_process_recv_msg(guc, &guc->mmio_msg, 1);
+ guc->mmio_msg = 0;
+ }
+ spin_unlock_irq(&guc->irq_lock);
}
static void guc_reset_interrupts(struct intel_guc *guc)
@@ -207,7 +224,8 @@ static void guc_disable_interrupts(struct intel_guc *guc)
static int guc_enable_communication(struct intel_guc *guc)
{
- struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
+ struct intel_gt *gt = guc_to_gt(guc);
+ struct drm_i915_private *i915 = gt->i915;
int ret;
GEM_BUG_ON(guc_communication_enabled(guc));
@@ -227,9 +245,9 @@ static int guc_enable_communication(struct intel_guc *guc)
guc_enable_interrupts(guc);
/* check for CT messages received before we enabled interrupts */
- spin_lock_irq(&i915->irq_lock);
+ spin_lock_irq(&gt->irq_lock);
intel_guc_ct_event_handler(&guc->ct);
- spin_unlock_irq(&i915->irq_lock);
+ spin_unlock_irq(&gt->irq_lock);
drm_dbg(&i915->drm, "GuC communication enabled\n");
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
index 180c23e2e25e..67b06fde1225 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
@@ -53,6 +53,7 @@ void intel_uc_fw_change_status(struct intel_uc_fw *uc_fw,
fw_def(ELKHARTLAKE, 0, guc_def(ehl, 49, 0, 1), huc_def(ehl, 9, 0, 0)) \
fw_def(ICELAKE, 0, guc_def(icl, 49, 0, 1), huc_def(icl, 9, 0, 0)) \
fw_def(COMETLAKE, 5, guc_def(cml, 49, 0, 1), huc_def(cml, 4, 0, 0)) \
+ fw_def(COMETLAKE, 0, guc_def(kbl, 49, 0, 1), huc_def(kbl, 4, 0, 0)) \
fw_def(COFFEELAKE, 0, guc_def(kbl, 49, 0, 1), huc_def(kbl, 4, 0, 0)) \
fw_def(GEMINILAKE, 0, guc_def(glk, 49, 0, 1), huc_def(glk, 4, 0, 0)) \
fw_def(KABYLAKE, 0, guc_def(kbl, 49, 0, 1), huc_def(kbl, 4, 0, 0)) \
@@ -151,16 +152,11 @@ __uc_fw_auto_select(struct drm_i915_private *i915, struct intel_uc_fw *uc_fw)
uc_fw->path = NULL;
}
}
-
- /* We don't want to enable GuC/HuC on pre-Gen11 by default */
- if (i915->params.enable_guc == -1 && p < INTEL_ICELAKE)
- uc_fw->path = NULL;
}
static const char *__override_guc_firmware_path(struct drm_i915_private *i915)
{
- if (i915->params.enable_guc & (ENABLE_GUC_SUBMISSION |
- ENABLE_GUC_LOAD_HUC))
+ if (i915->params.enable_guc & ENABLE_GUC_MASK)
return i915->params.guc_firmware_path;
return "";
}
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index 16b582cb97ed..7fb91de06557 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -37,11 +37,18 @@
#include <linux/slab.h>
#include "i915_drv.h"
+#include "gt/intel_gpu_commands.h"
+#include "gt/intel_lrc.h"
#include "gt/intel_ring.h"
+#include "gt/intel_gt_requests.h"
#include "gvt.h"
#include "i915_pvinfo.h"
#include "trace.h"
+#include "gem/i915_gem_context.h"
+#include "gem/i915_gem_pm.h"
+#include "gt/intel_context.h"
+
#define INVALID_OP (~0U)
#define OP_LEN_MI 9
@@ -454,6 +461,7 @@ enum {
RING_BUFFER_INSTRUCTION,
BATCH_BUFFER_INSTRUCTION,
BATCH_BUFFER_2ND_LEVEL,
+ RING_BUFFER_CTX,
};
enum {
@@ -495,6 +503,7 @@ struct parser_exec_state {
*/
int saved_buf_addr_type;
bool is_ctx_wa;
+ bool is_init_ctx;
const struct cmd_info *info;
@@ -708,6 +717,11 @@ static inline u32 cmd_val(struct parser_exec_state *s, int index)
return *cmd_ptr(s, index);
}
+static inline bool is_init_ctx(struct parser_exec_state *s)
+{
+ return (s->buf_type == RING_BUFFER_CTX && s->is_init_ctx);
+}
+
static void parser_exec_state_dump(struct parser_exec_state *s)
{
int cnt = 0;
@@ -721,7 +735,8 @@ static void parser_exec_state_dump(struct parser_exec_state *s)
gvt_dbg_cmd(" %s %s ip_gma(%08lx) ",
s->buf_type == RING_BUFFER_INSTRUCTION ?
- "RING_BUFFER" : "BATCH_BUFFER",
+ "RING_BUFFER" : ((s->buf_type == RING_BUFFER_CTX) ?
+ "CTX_BUFFER" : "BATCH_BUFFER"),
s->buf_addr_type == GTT_BUFFER ?
"GTT" : "PPGTT", s->ip_gma);
@@ -756,7 +771,8 @@ static inline void update_ip_va(struct parser_exec_state *s)
if (WARN_ON(s->ring_head == s->ring_tail))
return;
- if (s->buf_type == RING_BUFFER_INSTRUCTION) {
+ if (s->buf_type == RING_BUFFER_INSTRUCTION ||
+ s->buf_type == RING_BUFFER_CTX) {
unsigned long ring_top = s->ring_start + s->ring_size;
if (s->ring_head > s->ring_tail) {
@@ -820,68 +836,12 @@ static inline int cmd_length(struct parser_exec_state *s)
*addr = val; \
} while (0)
-static bool is_shadowed_mmio(unsigned int offset)
-{
- bool ret = false;
-
- if ((offset == 0x2168) || /*BB current head register UDW */
- (offset == 0x2140) || /*BB current header register */
- (offset == 0x211c) || /*second BB header register UDW */
- (offset == 0x2114)) { /*second BB header register UDW */
- ret = true;
- }
- return ret;
-}
-
-static inline bool is_force_nonpriv_mmio(unsigned int offset)
-{
- return (offset >= 0x24d0 && offset < 0x2500);
-}
-
-static int force_nonpriv_reg_handler(struct parser_exec_state *s,
- unsigned int offset, unsigned int index, char *cmd)
-{
- struct intel_gvt *gvt = s->vgpu->gvt;
- unsigned int data;
- u32 ring_base;
- u32 nopid;
-
- if (!strcmp(cmd, "lri"))
- data = cmd_val(s, index + 1);
- else {
- gvt_err("Unexpected forcenonpriv 0x%x write from cmd %s\n",
- offset, cmd);
- return -EINVAL;
- }
-
- ring_base = s->engine->mmio_base;
- nopid = i915_mmio_reg_offset(RING_NOPID(ring_base));
-
- if (!intel_gvt_in_force_nonpriv_whitelist(gvt, data) &&
- data != nopid) {
- gvt_err("Unexpected forcenonpriv 0x%x LRI write, value=0x%x\n",
- offset, data);
- patch_value(s, cmd_ptr(s, index), nopid);
- return 0;
- }
- return 0;
-}
-
static inline bool is_mocs_mmio(unsigned int offset)
{
return ((offset >= 0xc800) && (offset <= 0xcff8)) ||
((offset >= 0xb020) && (offset <= 0xb0a0));
}
-static int mocs_cmd_reg_handler(struct parser_exec_state *s,
- unsigned int offset, unsigned int index)
-{
- if (!is_mocs_mmio(offset))
- return -EINVAL;
- vgpu_vreg(s->vgpu, offset) = cmd_val(s, index + 1);
- return 0;
-}
-
static int is_cmd_update_pdps(unsigned int offset,
struct parser_exec_state *s)
{
@@ -929,6 +889,7 @@ static int cmd_reg_handler(struct parser_exec_state *s,
struct intel_vgpu *vgpu = s->vgpu;
struct intel_gvt *gvt = vgpu->gvt;
u32 ctx_sr_ctl;
+ u32 *vreg, vreg_old;
if (offset + 4 > gvt->device_info.mmio_size) {
gvt_vgpu_err("%s access to (%x) outside of MMIO range\n",
@@ -936,34 +897,101 @@ static int cmd_reg_handler(struct parser_exec_state *s,
return -EFAULT;
}
+ if (is_init_ctx(s)) {
+ struct intel_gvt_mmio_info *mmio_info;
+
+ intel_gvt_mmio_set_cmd_accessible(gvt, offset);
+ mmio_info = intel_gvt_find_mmio_info(gvt, offset);
+ if (mmio_info && mmio_info->write)
+ intel_gvt_mmio_set_cmd_write_patch(gvt, offset);
+ return 0;
+ }
+
if (!intel_gvt_mmio_is_cmd_accessible(gvt, offset)) {
gvt_vgpu_err("%s access to non-render register (%x)\n",
cmd, offset);
return -EBADRQC;
}
- if (is_shadowed_mmio(offset)) {
- gvt_vgpu_err("found access of shadowed MMIO %x\n", offset);
- return 0;
+ if (!strncmp(cmd, "srm", 3) ||
+ !strncmp(cmd, "lrm", 3)) {
+ if (offset != i915_mmio_reg_offset(GEN8_L3SQCREG4) &&
+ offset != 0x21f0) {
+ gvt_vgpu_err("%s access to register (%x)\n",
+ cmd, offset);
+ return -EPERM;
+ } else
+ return 0;
}
- if (is_mocs_mmio(offset) &&
- mocs_cmd_reg_handler(s, offset, index))
- return -EINVAL;
+ if (!strncmp(cmd, "lrr-src", 7) ||
+ !strncmp(cmd, "lrr-dst", 7)) {
+ gvt_vgpu_err("not allowed cmd %s\n", cmd);
+ return -EPERM;
+ }
+
+ if (!strncmp(cmd, "pipe_ctrl", 9)) {
+ /* TODO: add LRI POST logic here */
+ return 0;
+ }
- if (is_force_nonpriv_mmio(offset) &&
- force_nonpriv_reg_handler(s, offset, index, cmd))
+ if (strncmp(cmd, "lri", 3))
return -EPERM;
+ /* below are all lri handlers */
+ vreg = &vgpu_vreg(s->vgpu, offset);
+ if (!intel_gvt_mmio_is_cmd_accessible(gvt, offset)) {
+ gvt_vgpu_err("%s access to non-render register (%x)\n",
+ cmd, offset);
+ return -EBADRQC;
+ }
+
+ if (is_cmd_update_pdps(offset, s) &&
+ cmd_pdp_mmio_update_handler(s, offset, index))
+ return -EINVAL;
+
if (offset == i915_mmio_reg_offset(DERRMR) ||
offset == i915_mmio_reg_offset(FORCEWAKE_MT)) {
/* Writing to HW VGT_PVINFO_PAGE offset will be discarded */
patch_value(s, cmd_ptr(s, index), VGT_PVINFO_PAGE);
}
- if (is_cmd_update_pdps(offset, s) &&
- cmd_pdp_mmio_update_handler(s, offset, index))
- return -EINVAL;
+ if (is_mocs_mmio(offset))
+ *vreg = cmd_val(s, index + 1);
+
+ vreg_old = *vreg;
+
+ if (intel_gvt_mmio_is_cmd_write_patch(gvt, offset)) {
+ u32 cmdval_new, cmdval;
+ struct intel_gvt_mmio_info *mmio_info;
+
+ cmdval = cmd_val(s, index + 1);
+
+ mmio_info = intel_gvt_find_mmio_info(gvt, offset);
+ if (!mmio_info) {
+ cmdval_new = cmdval;
+ } else {
+ u64 ro_mask = mmio_info->ro_mask;
+ int ret;
+
+ if (likely(!ro_mask))
+ ret = mmio_info->write(s->vgpu, offset,
+ &cmdval, 4);
+ else {
+ gvt_vgpu_err("try to write RO reg %x\n",
+ offset);
+ ret = -EBADRQC;
+ }
+ if (ret)
+ return ret;
+ cmdval_new = *vreg;
+ }
+ if (cmdval_new != cmdval)
+ patch_value(s, cmd_ptr(s, index+1), cmdval_new);
+ }
+
+ /* only patch cmd. restore vreg value if changed in mmio write handler*/
+ *vreg = vreg_old;
/* TODO
* In order to let workload with inhibit context to generate
@@ -1215,6 +1243,8 @@ static int cmd_handler_mi_batch_buffer_end(struct parser_exec_state *s)
s->buf_type = BATCH_BUFFER_INSTRUCTION;
ret = ip_gma_set(s, s->ret_ip_gma_bb);
s->buf_addr_type = s->saved_buf_addr_type;
+ } else if (s->buf_type == RING_BUFFER_CTX) {
+ ret = ip_gma_set(s, s->ring_tail);
} else {
s->buf_type = RING_BUFFER_INSTRUCTION;
s->buf_addr_type = GTT_BUFFER;
@@ -2763,7 +2793,8 @@ static int command_scan(struct parser_exec_state *s,
gma_bottom = rb_start + rb_len;
while (s->ip_gma != gma_tail) {
- if (s->buf_type == RING_BUFFER_INSTRUCTION) {
+ if (s->buf_type == RING_BUFFER_INSTRUCTION ||
+ s->buf_type == RING_BUFFER_CTX) {
if (!(s->ip_gma >= rb_start) ||
!(s->ip_gma < gma_bottom)) {
gvt_vgpu_err("ip_gma %lx out of ring scope."
@@ -3056,6 +3087,171 @@ int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
return 0;
}
+/* generate dummy contexts by sending empty requests to HW, and let
+ * the HW to fill Engine Contexts. This dummy contexts are used for
+ * initialization purpose (update reg whitelist), so referred to as
+ * init context here
+ */
+void intel_gvt_update_reg_whitelist(struct intel_vgpu *vgpu)
+{
+ struct intel_gvt *gvt = vgpu->gvt;
+ struct drm_i915_private *dev_priv = gvt->gt->i915;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ const unsigned long start = LRC_STATE_PN * PAGE_SIZE;
+ struct i915_request *rq;
+ struct intel_vgpu_submission *s = &vgpu->submission;
+ struct i915_request *requests[I915_NUM_ENGINES] = {};
+ bool is_ctx_pinned[I915_NUM_ENGINES] = {};
+ int ret;
+
+ if (gvt->is_reg_whitelist_updated)
+ return;
+
+ for_each_engine(engine, &dev_priv->gt, id) {
+ ret = intel_context_pin(s->shadow[id]);
+ if (ret) {
+ gvt_vgpu_err("fail to pin shadow ctx\n");
+ goto out;
+ }
+ is_ctx_pinned[id] = true;
+
+ rq = i915_request_create(s->shadow[id]);
+ if (IS_ERR(rq)) {
+ gvt_vgpu_err("fail to alloc default request\n");
+ ret = -EIO;
+ goto out;
+ }
+ requests[id] = i915_request_get(rq);
+ i915_request_add(rq);
+ }
+
+ if (intel_gt_wait_for_idle(&dev_priv->gt,
+ I915_GEM_IDLE_TIMEOUT) == -ETIME) {
+ ret = -EIO;
+ goto out;
+ }
+
+ /* scan init ctx to update cmd accessible list */
+ for_each_engine(engine, &dev_priv->gt, id) {
+ int size = engine->context_size - PAGE_SIZE;
+ void *vaddr;
+ struct parser_exec_state s;
+ struct drm_i915_gem_object *obj;
+ struct i915_request *rq;
+
+ rq = requests[id];
+ GEM_BUG_ON(!i915_request_completed(rq));
+ GEM_BUG_ON(!intel_context_is_pinned(rq->context));
+ obj = rq->context->state->obj;
+
+ if (!obj) {
+ ret = -EIO;
+ goto out;
+ }
+
+ i915_gem_object_set_cache_coherency(obj,
+ I915_CACHE_LLC);
+
+ vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
+ if (IS_ERR(vaddr)) {
+ gvt_err("failed to pin init ctx obj, ring=%d, err=%lx\n",
+ id, PTR_ERR(vaddr));
+ goto out;
+ }
+
+ s.buf_type = RING_BUFFER_CTX;
+ s.buf_addr_type = GTT_BUFFER;
+ s.vgpu = vgpu;
+ s.engine = engine;
+ s.ring_start = 0;
+ s.ring_size = size;
+ s.ring_head = 0;
+ s.ring_tail = size;
+ s.rb_va = vaddr + start;
+ s.workload = NULL;
+ s.is_ctx_wa = false;
+ s.is_init_ctx = true;
+
+ /* skipping the first RING_CTX_SIZE(0x50) dwords */
+ ret = ip_gma_set(&s, RING_CTX_SIZE);
+ if (ret) {
+ i915_gem_object_unpin_map(obj);
+ goto out;
+ }
+
+ ret = command_scan(&s, 0, size, 0, size);
+ if (ret)
+ gvt_err("Scan init ctx error\n");
+
+ i915_gem_object_unpin_map(obj);
+ }
+
+out:
+ if (!ret)
+ gvt->is_reg_whitelist_updated = true;
+
+ for (id = 0; id < I915_NUM_ENGINES ; id++) {
+ if (requests[id])
+ i915_request_put(requests[id]);
+
+ if (is_ctx_pinned[id])
+ intel_context_unpin(s->shadow[id]);
+ }
+}
+
+int intel_gvt_scan_engine_context(struct intel_vgpu_workload *workload)
+{
+ struct intel_vgpu *vgpu = workload->vgpu;
+ unsigned long gma_head, gma_tail, gma_start, ctx_size;
+ struct parser_exec_state s;
+ int ring_id = workload->engine->id;
+ struct intel_context *ce = vgpu->submission.shadow[ring_id];
+ int ret;
+
+ GEM_BUG_ON(atomic_read(&ce->pin_count) < 0);
+
+ ctx_size = workload->engine->context_size - PAGE_SIZE;
+
+ /* Only ring contxt is loaded to HW for inhibit context, no need to
+ * scan engine context
+ */
+ if (is_inhibit_context(ce))
+ return 0;
+
+ gma_start = i915_ggtt_offset(ce->state) + LRC_STATE_PN*PAGE_SIZE;
+ gma_head = 0;
+ gma_tail = ctx_size;
+
+ s.buf_type = RING_BUFFER_CTX;
+ s.buf_addr_type = GTT_BUFFER;
+ s.vgpu = workload->vgpu;
+ s.engine = workload->engine;
+ s.ring_start = gma_start;
+ s.ring_size = ctx_size;
+ s.ring_head = gma_start + gma_head;
+ s.ring_tail = gma_start + gma_tail;
+ s.rb_va = ce->lrc_reg_state;
+ s.workload = workload;
+ s.is_ctx_wa = false;
+ s.is_init_ctx = false;
+
+ /* don't scan the first RING_CTX_SIZE(0x50) dwords, as it's ring
+ * context
+ */
+ ret = ip_gma_set(&s, gma_start + gma_head + RING_CTX_SIZE);
+ if (ret)
+ goto out;
+
+ ret = command_scan(&s, gma_head, gma_tail,
+ gma_start, ctx_size);
+out:
+ if (ret)
+ gvt_vgpu_err("scan shadow ctx error\n");
+
+ return ret;
+}
+
static int init_cmd_table(struct intel_gvt *gvt)
{
unsigned int gen_type = intel_gvt_get_device_type(gvt);
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.h b/drivers/gpu/drm/i915/gvt/cmd_parser.h
index ab25d151932a..416d345e2816 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.h
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.h
@@ -40,6 +40,7 @@
struct intel_gvt;
struct intel_shadow_wa_ctx;
+struct intel_vgpu;
struct intel_vgpu_workload;
void intel_gvt_clean_cmd_parser(struct intel_gvt *gvt);
@@ -50,4 +51,8 @@ int intel_gvt_scan_and_shadow_ringbuffer(struct intel_vgpu_workload *workload);
int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx);
+void intel_gvt_update_reg_whitelist(struct intel_vgpu *vgpu);
+
+int intel_gvt_scan_engine_context(struct intel_vgpu_workload *workload);
+
#endif
diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c
index a15f87539657..62a5b0dd2003 100644
--- a/drivers/gpu/drm/i915/gvt/display.c
+++ b/drivers/gpu/drm/i915/gvt/display.c
@@ -217,6 +217,15 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
DDI_BUF_CTL_ENABLE);
vgpu_vreg_t(vgpu, DDI_BUF_CTL(port)) |= DDI_BUF_IS_IDLE;
}
+ vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) &=
+ ~(PORTA_HOTPLUG_ENABLE | PORTA_HOTPLUG_STATUS_MASK);
+ vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) &=
+ ~(PORTB_HOTPLUG_ENABLE | PORTB_HOTPLUG_STATUS_MASK);
+ vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) &=
+ ~(PORTC_HOTPLUG_ENABLE | PORTC_HOTPLUG_STATUS_MASK);
+ /* No hpd_invert set in vgpu vbt, need to clear invert mask */
+ vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) &= ~BXT_DDI_HPD_INVERT_MASK;
+ vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) &= ~BXT_DE_PORT_HOTPLUG_MASK;
vgpu_vreg_t(vgpu, BXT_P_CR_GT_DISP_PWRON) &= ~(BIT(0) | BIT(1));
vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY0)) &=
@@ -273,6 +282,8 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_EDP)) |=
(TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
TRANS_DDI_FUNC_ENABLE);
+ vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) |=
+ PORTA_HOTPLUG_ENABLE;
vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |=
GEN8_DE_PORT_HOTPLUG(HPD_PORT_A);
}
@@ -301,6 +312,8 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
(TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
(PORT_B << TRANS_DDI_PORT_SHIFT) |
TRANS_DDI_FUNC_ENABLE);
+ vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) |=
+ PORTB_HOTPLUG_ENABLE;
vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |=
GEN8_DE_PORT_HOTPLUG(HPD_PORT_B);
}
@@ -329,6 +342,8 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
(TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
(PORT_B << TRANS_DDI_PORT_SHIFT) |
TRANS_DDI_FUNC_ENABLE);
+ vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) |=
+ PORTC_HOTPLUG_ENABLE;
vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |=
GEN8_DE_PORT_HOTPLUG(HPD_PORT_C);
}
@@ -661,44 +676,62 @@ void intel_vgpu_emulate_hotplug(struct intel_vgpu *vgpu, bool connected)
PORTD_HOTPLUG_STATUS_MASK;
intel_vgpu_trigger_virtual_event(vgpu, DP_D_HOTPLUG);
} else if (IS_BROXTON(i915)) {
- if (connected) {
- if (intel_vgpu_has_monitor_on_port(vgpu, PORT_A)) {
+ if (intel_vgpu_has_monitor_on_port(vgpu, PORT_A)) {
+ if (connected) {
vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |=
GEN8_DE_PORT_HOTPLUG(HPD_PORT_A);
+ } else {
+ vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) &=
+ ~GEN8_DE_PORT_HOTPLUG(HPD_PORT_A);
}
- if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B)) {
- vgpu_vreg_t(vgpu, SFUSE_STRAP) |=
- SFUSE_STRAP_DDIB_DETECTED;
+ vgpu_vreg_t(vgpu, GEN8_DE_PORT_IIR) |=
+ GEN8_DE_PORT_HOTPLUG(HPD_PORT_A);
+ vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) &=
+ ~PORTA_HOTPLUG_STATUS_MASK;
+ vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) |=
+ PORTA_HOTPLUG_LONG_DETECT;
+ intel_vgpu_trigger_virtual_event(vgpu, DP_A_HOTPLUG);
+ }
+ if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B)) {
+ if (connected) {
vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |=
GEN8_DE_PORT_HOTPLUG(HPD_PORT_B);
- }
- if (intel_vgpu_has_monitor_on_port(vgpu, PORT_C)) {
vgpu_vreg_t(vgpu, SFUSE_STRAP) |=
- SFUSE_STRAP_DDIC_DETECTED;
- vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |=
- GEN8_DE_PORT_HOTPLUG(HPD_PORT_C);
- }
- } else {
- if (intel_vgpu_has_monitor_on_port(vgpu, PORT_A)) {
+ SFUSE_STRAP_DDIB_DETECTED;
+ } else {
vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) &=
- ~GEN8_DE_PORT_HOTPLUG(HPD_PORT_A);
- }
- if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B)) {
+ ~GEN8_DE_PORT_HOTPLUG(HPD_PORT_B);
vgpu_vreg_t(vgpu, SFUSE_STRAP) &=
~SFUSE_STRAP_DDIB_DETECTED;
- vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) &=
- ~GEN8_DE_PORT_HOTPLUG(HPD_PORT_B);
}
- if (intel_vgpu_has_monitor_on_port(vgpu, PORT_C)) {
- vgpu_vreg_t(vgpu, SFUSE_STRAP) &=
- ~SFUSE_STRAP_DDIC_DETECTED;
+ vgpu_vreg_t(vgpu, GEN8_DE_PORT_IIR) |=
+ GEN8_DE_PORT_HOTPLUG(HPD_PORT_B);
+ vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) &=
+ ~PORTB_HOTPLUG_STATUS_MASK;
+ vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) |=
+ PORTB_HOTPLUG_LONG_DETECT;
+ intel_vgpu_trigger_virtual_event(vgpu, DP_B_HOTPLUG);
+ }
+ if (intel_vgpu_has_monitor_on_port(vgpu, PORT_C)) {
+ if (connected) {
+ vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |=
+ GEN8_DE_PORT_HOTPLUG(HPD_PORT_C);
+ vgpu_vreg_t(vgpu, SFUSE_STRAP) |=
+ SFUSE_STRAP_DDIC_DETECTED;
+ } else {
vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) &=
~GEN8_DE_PORT_HOTPLUG(HPD_PORT_C);
+ vgpu_vreg_t(vgpu, SFUSE_STRAP) &=
+ ~SFUSE_STRAP_DDIC_DETECTED;
}
+ vgpu_vreg_t(vgpu, GEN8_DE_PORT_IIR) |=
+ GEN8_DE_PORT_HOTPLUG(HPD_PORT_C);
+ vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) &=
+ ~PORTC_HOTPLUG_STATUS_MASK;
+ vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) |=
+ PORTC_HOTPLUG_LONG_DETECT;
+ intel_vgpu_trigger_virtual_event(vgpu, DP_C_HOTPLUG);
}
- vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) |=
- PORTB_HOTPLUG_STATUS_MASK;
- intel_vgpu_trigger_virtual_event(vgpu, DP_B_HOTPLUG);
}
}
diff --git a/drivers/gpu/drm/i915/gvt/execlist.h b/drivers/gpu/drm/i915/gvt/execlist.h
index d62cd14605a3..84ad74b37d66 100644
--- a/drivers/gpu/drm/i915/gvt/execlist.h
+++ b/drivers/gpu/drm/i915/gvt/execlist.h
@@ -182,7 +182,4 @@ int intel_vgpu_init_execlist(struct intel_vgpu *vgpu);
int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu,
const struct intel_engine_cs *engine);
-void intel_vgpu_reset_execlist(struct intel_vgpu *vgpu,
- intel_engine_mask_t engine_mask);
-
#endif /*_GVT_EXECLIST_H_*/
diff --git a/drivers/gpu/drm/i915/gvt/fb_decoder.h b/drivers/gpu/drm/i915/gvt/fb_decoder.h
index 67b6ede9e707..0daa3931aef7 100644
--- a/drivers/gpu/drm/i915/gvt/fb_decoder.h
+++ b/drivers/gpu/drm/i915/gvt/fb_decoder.h
@@ -38,6 +38,10 @@
#include <linux/types.h>
+#include "display/intel_display.h"
+
+struct intel_vgpu;
+
#define _PLANE_CTL_FORMAT_SHIFT 24
#define _PLANE_CTL_TILED_SHIFT 10
#define _PIPE_V_SRCSZ_SHIFT 0
@@ -98,8 +102,6 @@ enum DDI_PORT {
DDI_PORT_E = 4
};
-struct intel_gvt;
-
/* color space conversion and gamma correction are not included */
struct intel_vgpu_primary_plane_format {
u8 enabled; /* plane is enabled */
diff --git a/drivers/gpu/drm/i915/gvt/gtt.h b/drivers/gpu/drm/i915/gvt/gtt.h
index b0e173f2d990..3bf45672ef98 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.h
+++ b/drivers/gpu/drm/i915/gvt/gtt.h
@@ -34,10 +34,19 @@
#ifndef _GVT_GTT_H_
#define _GVT_GTT_H_
-#define I915_GTT_PAGE_SHIFT 12
+#include <linux/kernel.h>
+#include <linux/kref.h>
+#include <linux/mutex.h>
+#include <linux/radix-tree.h>
+
+#include "gt/intel_gtt.h"
+struct intel_gvt;
+struct intel_vgpu;
struct intel_vgpu_mm;
+#define I915_GTT_PAGE_SHIFT 12
+
#define INTEL_GVT_INVALID_ADDR (~0UL)
struct intel_gvt_gtt_entry {
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index cf3578e3f4dd..03c993d68f10 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -33,6 +33,10 @@
#ifndef _GVT_H_
#define _GVT_H_
+#include <uapi/linux/pci_regs.h>
+
+#include "i915_drv.h"
+
#include "debug.h"
#include "hypercall.h"
#include "mmio.h"
@@ -244,7 +248,7 @@ struct gvt_mmio_block {
#define INTEL_GVT_MMIO_HASH_BITS 11
struct intel_gvt_mmio {
- u8 *mmio_attribute;
+ u16 *mmio_attribute;
/* Register contains RO bits */
#define F_RO (1 << 0)
/* Register contains graphics address */
@@ -263,6 +267,8 @@ struct intel_gvt_mmio {
* logical context image
*/
#define F_SR_IN_CTX (1 << 7)
+/* Value of command write of this reg needs to be patched */
+#define F_CMD_WRITE_PATCH (1 << 8)
struct gvt_mmio_block *mmio_block;
unsigned int num_mmio_block;
@@ -329,6 +335,7 @@ struct intel_gvt {
u32 *mocs_mmio_offset_list;
u32 mocs_mmio_offset_list_cnt;
} engine_mmio_list;
+ bool is_reg_whitelist_updated;
struct dentry *debugfs_root;
};
@@ -412,6 +419,9 @@ int intel_gvt_load_firmware(struct intel_gvt *gvt);
#define vgpu_fence_base(vgpu) (vgpu->fence.base)
#define vgpu_fence_sz(vgpu) (vgpu->fence.size)
+/* ring context size i.e. the first 0x50 dwords*/
+#define RING_CTX_SIZE 320
+
struct intel_vgpu_creation_params {
__u64 handle;
__u64 low_gm_sz; /* in MB */
@@ -683,6 +693,35 @@ static inline void intel_gvt_mmio_set_sr_in_ctx(
}
void intel_gvt_debugfs_add_vgpu(struct intel_vgpu *vgpu);
+/**
+ * intel_gvt_mmio_set_cmd_write_patch -
+ * mark an MMIO if its cmd write needs to be
+ * patched
+ * @gvt: a GVT device
+ * @offset: register offset
+ *
+ */
+static inline void intel_gvt_mmio_set_cmd_write_patch(
+ struct intel_gvt *gvt, unsigned int offset)
+{
+ gvt->mmio.mmio_attribute[offset >> 2] |= F_CMD_WRITE_PATCH;
+}
+
+/**
+ * intel_gvt_mmio_is_cmd_write_patch - check if an mmio's cmd access needs to
+ * be patched
+ * @gvt: a GVT device
+ * @offset: register offset
+ *
+ * Returns:
+ * True if GPU commmand write to an MMIO should be patched
+ */
+static inline bool intel_gvt_mmio_is_cmd_write_patch(
+ struct intel_gvt *gvt, unsigned int offset)
+{
+ return gvt->mmio.mmio_attribute[offset >> 2] & F_CMD_WRITE_PATCH;
+}
+
void intel_gvt_debugfs_remove_vgpu(struct intel_vgpu *vgpu);
void intel_gvt_debugfs_init(struct intel_gvt *gvt);
void intel_gvt_debugfs_clean(struct intel_gvt *gvt);
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index aa7e75cb3e6a..6eeaeecb7f85 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -83,7 +83,7 @@ static void write_vreg(struct intel_vgpu *vgpu, unsigned int offset,
memcpy(&vgpu_vreg(vgpu, offset), p_data, bytes);
}
-static struct intel_gvt_mmio_info *find_mmio_info(struct intel_gvt *gvt,
+struct intel_gvt_mmio_info *intel_gvt_find_mmio_info(struct intel_gvt *gvt,
unsigned int offset)
{
struct intel_gvt_mmio_info *e;
@@ -96,7 +96,7 @@ static struct intel_gvt_mmio_info *find_mmio_info(struct intel_gvt *gvt,
}
static int new_mmio_info(struct intel_gvt *gvt,
- u32 offset, u8 flags, u32 size,
+ u32 offset, u16 flags, u32 size,
u32 addr_mask, u32 ro_mask, u32 device,
gvt_mmio_func read, gvt_mmio_func write)
{
@@ -118,7 +118,7 @@ static int new_mmio_info(struct intel_gvt *gvt,
return -ENOMEM;
info->offset = i;
- p = find_mmio_info(gvt, info->offset);
+ p = intel_gvt_find_mmio_info(gvt, info->offset);
if (p) {
WARN(1, "dup mmio definition offset %x\n",
info->offset);
@@ -1651,7 +1651,7 @@ static int edp_psr_imr_iir_write(struct intel_vgpu *vgpu,
return 0;
}
-/**
+/*
* FixMe:
* If guest fills non-priv batch buffer on ApolloLake/Broxton as Mesa i965 did:
* 717e7539124d (i965: Use a WC map and memcpy for the batch instead of pwrite.)
@@ -1965,7 +1965,8 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
/* RING MODE */
#define RING_REG(base) _MMIO((base) + 0x29c)
- MMIO_RING_DFH(RING_REG, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL,
+ MMIO_RING_DFH(RING_REG, D_ALL,
+ F_MODE_MASK | F_CMD_ACCESS | F_CMD_WRITE_PATCH, NULL,
ring_mode_mmio_write);
#undef RING_REG
@@ -2885,8 +2886,8 @@ static int init_bdw_mmio_info(struct intel_gvt *gvt)
MMIO_DFH(_MMIO(0xb10c), D_BDW, F_CMD_ACCESS, NULL, NULL);
MMIO_D(_MMIO(0xb110), D_BDW);
- MMIO_F(_MMIO(0x24d0), 48, F_CMD_ACCESS, 0, 0, D_BDW_PLUS,
- NULL, force_nonpriv_write);
+ MMIO_F(_MMIO(0x24d0), 48, F_CMD_ACCESS | F_CMD_WRITE_PATCH, 0, 0,
+ D_BDW_PLUS, NULL, force_nonpriv_write);
MMIO_D(_MMIO(0x44484), D_BDW_PLUS);
MMIO_D(_MMIO(0x4448c), D_BDW_PLUS);
@@ -3626,7 +3627,7 @@ int intel_vgpu_mmio_reg_rw(struct intel_vgpu *vgpu, unsigned int offset,
/*
* Normal tracked MMIOs.
*/
- mmio_info = find_mmio_info(gvt, offset);
+ mmio_info = intel_gvt_find_mmio_info(gvt, offset);
if (!mmio_info) {
gvt_dbg_mmio("untracked MMIO %08x len %d\n", offset, bytes);
goto default_rw;
@@ -3686,14 +3687,13 @@ void intel_gvt_restore_fence(struct intel_gvt *gvt)
}
}
-static inline int mmio_pm_restore_handler(struct intel_gvt *gvt,
- u32 offset, void *data)
+static int mmio_pm_restore_handler(struct intel_gvt *gvt, u32 offset, void *data)
{
struct intel_vgpu *vgpu = data;
struct drm_i915_private *dev_priv = gvt->gt->i915;
if (gvt->mmio.mmio_attribute[offset >> 2] & F_PM_SAVE)
- I915_WRITE(_MMIO(offset), vgpu_vreg(vgpu, offset));
+ intel_uncore_write(&dev_priv->uncore, _MMIO(offset), vgpu_vreg(vgpu, offset));
return 0;
}
diff --git a/drivers/gpu/drm/i915/gvt/interrupt.h b/drivers/gpu/drm/i915/gvt/interrupt.h
index fcd663811d37..287cd142629e 100644
--- a/drivers/gpu/drm/i915/gvt/interrupt.h
+++ b/drivers/gpu/drm/i915/gvt/interrupt.h
@@ -32,7 +32,10 @@
#ifndef _GVT_INTERRUPT_H_
#define _GVT_INTERRUPT_H_
-#include <linux/types.h>
+#include <linux/hrtimer.h>
+#include <linux/kernel.h>
+
+#include "i915_reg.h"
enum intel_gvt_event_type {
RCS_MI_USER_INTERRUPT = 0,
diff --git a/drivers/gpu/drm/i915/gvt/mmio.h b/drivers/gpu/drm/i915/gvt/mmio.h
index 9e862dc73579..7c26af39fbfc 100644
--- a/drivers/gpu/drm/i915/gvt/mmio.h
+++ b/drivers/gpu/drm/i915/gvt/mmio.h
@@ -80,6 +80,9 @@ int intel_gvt_for_each_tracked_mmio(struct intel_gvt *gvt,
int (*handler)(struct intel_gvt *gvt, u32 offset, void *data),
void *data);
+struct intel_gvt_mmio_info *intel_gvt_find_mmio_info(struct intel_gvt *gvt,
+ unsigned int offset);
+
int intel_vgpu_init_mmio(struct intel_vgpu *vgpu);
void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu, bool dmlr);
void intel_vgpu_clean_mmio(struct intel_vgpu *vgpu);
diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c
index afe574d6b3b5..c9589e26af93 100644
--- a/drivers/gpu/drm/i915/gvt/mmio_context.c
+++ b/drivers/gpu/drm/i915/gvt/mmio_context.c
@@ -35,6 +35,7 @@
#include "i915_drv.h"
#include "gt/intel_context.h"
+#include "gt/intel_gpu_commands.h"
#include "gt/intel_ring.h"
#include "gvt.h"
#include "trace.h"
diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.h b/drivers/gpu/drm/i915/gvt/mmio_context.h
index 3b25e7fe32f6..b6b69777af49 100644
--- a/drivers/gpu/drm/i915/gvt/mmio_context.h
+++ b/drivers/gpu/drm/i915/gvt/mmio_context.h
@@ -36,6 +36,18 @@
#ifndef __GVT_RENDER_H__
#define __GVT_RENDER_H__
+#include <linux/types.h>
+
+#include "gt/intel_engine_types.h"
+#include "gt/intel_lrc_reg.h"
+#include "i915_reg.h"
+
+struct i915_request;
+struct intel_context;
+struct intel_engine_cs;
+struct intel_gvt;
+struct intel_vgpu;
+
struct engine_mmio {
enum intel_engine_id id;
i915_reg_t reg;
diff --git a/drivers/gpu/drm/i915/gvt/mpt.h b/drivers/gpu/drm/i915/gvt/mpt.h
index 6f92cde71971..550a456e936f 100644
--- a/drivers/gpu/drm/i915/gvt/mpt.h
+++ b/drivers/gpu/drm/i915/gvt/mpt.h
@@ -33,6 +33,8 @@
#ifndef _GVT_MPT_H_
#define _GVT_MPT_H_
+#include "gvt.h"
+
/**
* DOC: Hypervisor Service APIs for GVT-g Core Logic
*
diff --git a/drivers/gpu/drm/i915/gvt/reg.h b/drivers/gpu/drm/i915/gvt/reg.h
index b58860dee970..244cc7320b54 100644
--- a/drivers/gpu/drm/i915/gvt/reg.h
+++ b/drivers/gpu/drm/i915/gvt/reg.h
@@ -133,4 +133,6 @@
#define RING_GFX_MODE(base) _MMIO((base) + 0x29c)
#define VF_GUARDBAND _MMIO(0x83a4)
+
+#define BCS_TILE_REGISTER_VAL_OFFSET (0x43*4)
#endif
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index aed2ef6466a2..43f31c2eab14 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -37,6 +37,8 @@
#include "gem/i915_gem_pm.h"
#include "gt/intel_context.h"
+#include "gt/intel_execlists_submission.h"
+#include "gt/intel_lrc.h"
#include "gt/intel_ring.h"
#include "i915_drv.h"
@@ -135,6 +137,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
int i;
bool skip = false;
int ring_id = workload->engine->id;
+ int ret;
GEM_BUG_ON(!intel_context_is_pinned(ctx));
@@ -161,16 +164,24 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
COPY_REG(bb_per_ctx_ptr);
COPY_REG(rcs_indirect_ctx);
COPY_REG(rcs_indirect_ctx_offset);
- }
+ } else if (workload->engine->id == BCS0)
+ intel_gvt_hypervisor_read_gpa(vgpu,
+ workload->ring_context_gpa +
+ BCS_TILE_REGISTER_VAL_OFFSET,
+ (void *)shadow_ring_context +
+ BCS_TILE_REGISTER_VAL_OFFSET, 4);
#undef COPY_REG
#undef COPY_REG_MASKED
+ /* don't copy Ring Context (the first 0x50 dwords),
+ * only copy the Engine Context part from guest
+ */
intel_gvt_hypervisor_read_gpa(vgpu,
workload->ring_context_gpa +
- sizeof(*shadow_ring_context),
+ RING_CTX_SIZE,
(void *)shadow_ring_context +
- sizeof(*shadow_ring_context),
- I915_GTT_PAGE_SIZE - sizeof(*shadow_ring_context));
+ RING_CTX_SIZE,
+ I915_GTT_PAGE_SIZE - RING_CTX_SIZE);
sr_oa_regs(workload, (u32 *)shadow_ring_context, false);
@@ -237,6 +248,11 @@ read:
gpa_size = I915_GTT_PAGE_SIZE;
dst = context_base + (i << I915_GTT_PAGE_SHIFT);
}
+ ret = intel_gvt_scan_engine_context(workload);
+ if (ret) {
+ gvt_vgpu_err("invalid cmd found in guest context pages\n");
+ return ret;
+ }
s->last_ctx[ring_id].valid = true;
return 0;
}
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.h b/drivers/gpu/drm/i915/gvt/scheduler.h
index 64e7a0b791c3..7c86984a842f 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.h
+++ b/drivers/gpu/drm/i915/gvt/scheduler.h
@@ -36,6 +36,11 @@
#ifndef _GVT_SCHEDULER_H_
#define _GVT_SCHEDULER_H_
+#include "gt/intel_engine_types.h"
+
+#include "execlist.h"
+#include "interrupt.h"
+
struct intel_gvt_workload_scheduler {
struct intel_vgpu *current_vgpu;
struct intel_vgpu *next_vgpu;
diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c
index e49944fde333..6a16d0ca7cda 100644
--- a/drivers/gpu/drm/i915/gvt/vgpu.c
+++ b/drivers/gpu/drm/i915/gvt/vgpu.c
@@ -437,10 +437,9 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
if (ret)
goto out_clean_sched_policy;
- if (IS_BROADWELL(dev_priv))
+ if (IS_BROADWELL(dev_priv) || IS_BROXTON(dev_priv))
ret = intel_gvt_hypervisor_set_edid(vgpu, PORT_B);
- /* FixMe: Re-enable APL/BXT once vfio_edid enabled */
- else if (!IS_BROXTON(dev_priv))
+ else
ret = intel_gvt_hypervisor_set_edid(vgpu, PORT_D);
if (ret)
goto out_clean_sched_policy;
@@ -500,9 +499,11 @@ struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
mutex_lock(&gvt->lock);
vgpu = __intel_gvt_create_vgpu(gvt, &param);
- if (!IS_ERR(vgpu))
+ if (!IS_ERR(vgpu)) {
/* calculate left instance change for types */
intel_gvt_update_vgpu_types(gvt);
+ intel_gvt_update_reg_whitelist(vgpu);
+ }
mutex_unlock(&gvt->lock);
return vgpu;
diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c
index 10a865f3dc09..ab4382841c6b 100644
--- a/drivers/gpu/drm/i915/i915_active.c
+++ b/drivers/gpu/drm/i915/i915_active.c
@@ -159,8 +159,7 @@ __active_retire(struct i915_active *ref)
GEM_BUG_ON(ref->tree.rb_node != &ref->cache->node);
/* Make the cached node available for reuse with any timeline */
- if (IS_ENABLED(CONFIG_64BIT))
- ref->cache->timeline = 0; /* needs cmpxchg(u64) */
+ ref->cache->timeline = 0; /* needs cmpxchg(u64) */
}
spin_unlock_irqrestore(&ref->tree_lock, flags);
@@ -256,7 +255,6 @@ static struct active_node *__active_lookup(struct i915_active *ref, u64 idx)
if (cached == idx)
return it;
-#ifdef CONFIG_64BIT /* for cmpxchg(u64) */
/*
* An unclaimed cache [.timeline=0] can only be claimed once.
*
@@ -267,9 +265,8 @@ static struct active_node *__active_lookup(struct i915_active *ref, u64 idx)
* only the winner of that race will cmpxchg return the old
* value of 0).
*/
- if (!cached && !cmpxchg(&it->timeline, 0, idx))
+ if (!cached && !cmpxchg64(&it->timeline, 0, idx))
return it;
-#endif
}
BUILD_BUG_ON(offsetof(typeof(*it), node));
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
index 93265951fdbb..ced9a96d7c34 100644
--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -26,6 +26,7 @@
*/
#include "gt/intel_engine.h"
+#include "gt/intel_gpu_commands.h"
#include "i915_drv.h"
#include "i915_memcpy.h"
@@ -1142,7 +1143,7 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj,
void *dst, *src;
int ret;
- dst = i915_gem_object_pin_map(dst_obj, I915_MAP_FORCE_WB);
+ dst = i915_gem_object_pin_map(dst_obj, I915_MAP_WB);
if (IS_ERR(dst))
return dst;
@@ -1166,7 +1167,7 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj,
}
}
if (IS_ERR(src)) {
- unsigned long x, n;
+ unsigned long x, n, remain;
void *ptr;
/*
@@ -1177,14 +1178,15 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj,
* We don't care about copying too much here as we only
* validate up to the end of the batch.
*/
+ remain = length;
if (!(dst_obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ))
- length = round_up(length,
+ remain = round_up(remain,
boot_cpu_data.x86_clflush_size);
ptr = dst;
x = offset_in_page(offset);
- for (n = offset >> PAGE_SHIFT; length; n++) {
- int len = min(length, PAGE_SIZE - x);
+ for (n = offset >> PAGE_SHIFT; remain; n++) {
+ int len = min(remain, PAGE_SIZE - x);
src = kmap_atomic(i915_gem_object_get_page(src_obj, n));
if (needs_clflush)
@@ -1193,13 +1195,15 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj,
kunmap_atomic(src);
ptr += len;
- length -= len;
+ remain -= len;
x = 0;
}
}
i915_gem_object_unpin_pages(src_obj);
+ memset32(dst + length, 0, (dst_obj->base.size - length) / sizeof(u32));
+
/* dst_obj is returned with vmap pinned */
return dst;
}
@@ -1392,11 +1396,6 @@ static unsigned long *alloc_whitelist(u32 batch_length)
#define LENGTH_BIAS 2
-static bool shadow_needs_clflush(struct drm_i915_gem_object *obj)
-{
- return !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE);
-}
-
/**
* intel_engine_cmd_parser() - parse a batch buffer for privilege violations
* @engine: the engine on which the batch is to execute
@@ -1538,16 +1537,9 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine,
ret = 0; /* allow execution */
}
}
-
- if (shadow_needs_clflush(shadow->obj))
- drm_clflush_virt_range(batch_end, 8);
}
- if (shadow_needs_clflush(shadow->obj)) {
- void *ptr = page_mask_bits(shadow->obj->mm.mapping);
-
- drm_clflush_virt_range(ptr, (void *)(cmd + 1) - ptr);
- }
+ i915_gem_object_flush_map(shadow->obj);
if (!IS_ERR_OR_NULL(jump_whitelist))
kfree(jump_whitelist);
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 77e76b665098..88336ff4bf09 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -45,6 +45,7 @@
#include "i915_debugfs.h"
#include "i915_debugfs_params.h"
#include "i915_irq.h"
+#include "i915_scheduler.h"
#include "i915_trace.h"
#include "intel_pm.h"
#include "intel_sideband.h"
@@ -209,7 +210,7 @@ i915_debugfs_describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
spin_unlock(&obj->vma.lock);
seq_printf(m, " (pinned x %d)", pin_count);
- if (obj->stolen)
+ if (i915_gem_object_is_stolen(obj))
seq_printf(m, " (stolen: %08llx)", obj->stolen->start);
if (i915_gem_object_is_framebuffer(obj))
seq_printf(m, " (fb)");
@@ -219,145 +220,6 @@ i915_debugfs_describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
seq_printf(m, " (%s)", engine->name);
}
-struct file_stats {
- struct i915_address_space *vm;
- unsigned long count;
- u64 total;
- u64 active, inactive;
- u64 closed;
-};
-
-static int per_file_stats(int id, void *ptr, void *data)
-{
- struct drm_i915_gem_object *obj = ptr;
- struct file_stats *stats = data;
- struct i915_vma *vma;
-
- if (IS_ERR_OR_NULL(obj) || !kref_get_unless_zero(&obj->base.refcount))
- return 0;
-
- stats->count++;
- stats->total += obj->base.size;
-
- spin_lock(&obj->vma.lock);
- if (!stats->vm) {
- for_each_ggtt_vma(vma, obj) {
- if (!drm_mm_node_allocated(&vma->node))
- continue;
-
- if (i915_vma_is_active(vma))
- stats->active += vma->node.size;
- else
- stats->inactive += vma->node.size;
-
- if (i915_vma_is_closed(vma))
- stats->closed += vma->node.size;
- }
- } else {
- struct rb_node *p = obj->vma.tree.rb_node;
-
- while (p) {
- long cmp;
-
- vma = rb_entry(p, typeof(*vma), obj_node);
- cmp = i915_vma_compare(vma, stats->vm, NULL);
- if (cmp == 0) {
- if (drm_mm_node_allocated(&vma->node)) {
- if (i915_vma_is_active(vma))
- stats->active += vma->node.size;
- else
- stats->inactive += vma->node.size;
-
- if (i915_vma_is_closed(vma))
- stats->closed += vma->node.size;
- }
- break;
- }
- if (cmp < 0)
- p = p->rb_right;
- else
- p = p->rb_left;
- }
- }
- spin_unlock(&obj->vma.lock);
-
- i915_gem_object_put(obj);
- return 0;
-}
-
-#define print_file_stats(m, name, stats) do { \
- if (stats.count) \
- seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu closed)\n", \
- name, \
- stats.count, \
- stats.total, \
- stats.active, \
- stats.inactive, \
- stats.closed); \
-} while (0)
-
-static void print_context_stats(struct seq_file *m,
- struct drm_i915_private *i915)
-{
- struct file_stats kstats = {};
- struct i915_gem_context *ctx, *cn;
-
- spin_lock(&i915->gem.contexts.lock);
- list_for_each_entry_safe(ctx, cn, &i915->gem.contexts.list, link) {
- struct i915_gem_engines_iter it;
- struct intel_context *ce;
-
- if (!kref_get_unless_zero(&ctx->ref))
- continue;
-
- spin_unlock(&i915->gem.contexts.lock);
-
- for_each_gem_engine(ce,
- i915_gem_context_lock_engines(ctx), it) {
- if (intel_context_pin_if_active(ce)) {
- rcu_read_lock();
- if (ce->state)
- per_file_stats(0,
- ce->state->obj, &kstats);
- per_file_stats(0, ce->ring->vma->obj, &kstats);
- rcu_read_unlock();
- intel_context_unpin(ce);
- }
- }
- i915_gem_context_unlock_engines(ctx);
-
- mutex_lock(&ctx->mutex);
- if (!IS_ERR_OR_NULL(ctx->file_priv)) {
- struct file_stats stats = {
- .vm = rcu_access_pointer(ctx->vm),
- };
- struct drm_file *file = ctx->file_priv->file;
- struct task_struct *task;
- char name[80];
-
- rcu_read_lock();
- idr_for_each(&file->object_idr, per_file_stats, &stats);
- rcu_read_unlock();
-
- rcu_read_lock();
- task = pid_task(ctx->pid ?: file->pid, PIDTYPE_PID);
- snprintf(name, sizeof(name), "%s",
- task ? task->comm : "<unknown>");
- rcu_read_unlock();
-
- print_file_stats(m, name, stats);
- }
- mutex_unlock(&ctx->mutex);
-
- spin_lock(&i915->gem.contexts.lock);
- list_safe_reset_next(ctx, cn, link);
- i915_gem_context_put(ctx);
- }
- spin_unlock(&i915->gem.contexts.lock);
-
- print_file_stats(m, "[k]contexts", kstats);
-}
-
static int i915_gem_object_info(struct seq_file *m, void *data)
{
struct drm_i915_private *i915 = node_to_i915(m->private);
@@ -371,311 +233,6 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
for_each_memory_region(mr, i915, id)
seq_printf(m, "%s: total:%pa, available:%pa bytes\n",
mr->name, &mr->total, &mr->avail);
- seq_putc(m, '\n');
-
- print_context_stats(m, i915);
-
- return 0;
-}
-
-static void gen8_display_interrupt_info(struct seq_file *m)
-{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- enum pipe pipe;
-
- for_each_pipe(dev_priv, pipe) {
- enum intel_display_power_domain power_domain;
- intel_wakeref_t wakeref;
-
- power_domain = POWER_DOMAIN_PIPE(pipe);
- wakeref = intel_display_power_get_if_enabled(dev_priv,
- power_domain);
- if (!wakeref) {
- seq_printf(m, "Pipe %c power disabled\n",
- pipe_name(pipe));
- continue;
- }
- seq_printf(m, "Pipe %c IMR:\t%08x\n",
- pipe_name(pipe),
- I915_READ(GEN8_DE_PIPE_IMR(pipe)));
- seq_printf(m, "Pipe %c IIR:\t%08x\n",
- pipe_name(pipe),
- I915_READ(GEN8_DE_PIPE_IIR(pipe)));
- seq_printf(m, "Pipe %c IER:\t%08x\n",
- pipe_name(pipe),
- I915_READ(GEN8_DE_PIPE_IER(pipe)));
-
- intel_display_power_put(dev_priv, power_domain, wakeref);
- }
-
- seq_printf(m, "Display Engine port interrupt mask:\t%08x\n",
- I915_READ(GEN8_DE_PORT_IMR));
- seq_printf(m, "Display Engine port interrupt identity:\t%08x\n",
- I915_READ(GEN8_DE_PORT_IIR));
- seq_printf(m, "Display Engine port interrupt enable:\t%08x\n",
- I915_READ(GEN8_DE_PORT_IER));
-
- seq_printf(m, "Display Engine misc interrupt mask:\t%08x\n",
- I915_READ(GEN8_DE_MISC_IMR));
- seq_printf(m, "Display Engine misc interrupt identity:\t%08x\n",
- I915_READ(GEN8_DE_MISC_IIR));
- seq_printf(m, "Display Engine misc interrupt enable:\t%08x\n",
- I915_READ(GEN8_DE_MISC_IER));
-
- seq_printf(m, "PCU interrupt mask:\t%08x\n",
- I915_READ(GEN8_PCU_IMR));
- seq_printf(m, "PCU interrupt identity:\t%08x\n",
- I915_READ(GEN8_PCU_IIR));
- seq_printf(m, "PCU interrupt enable:\t%08x\n",
- I915_READ(GEN8_PCU_IER));
-}
-
-static int i915_interrupt_info(struct seq_file *m, void *data)
-{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct intel_engine_cs *engine;
- intel_wakeref_t wakeref;
- int i, pipe;
-
- wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
-
- if (IS_CHERRYVIEW(dev_priv)) {
- intel_wakeref_t pref;
-
- seq_printf(m, "Master Interrupt Control:\t%08x\n",
- I915_READ(GEN8_MASTER_IRQ));
-
- seq_printf(m, "Display IER:\t%08x\n",
- I915_READ(VLV_IER));
- seq_printf(m, "Display IIR:\t%08x\n",
- I915_READ(VLV_IIR));
- seq_printf(m, "Display IIR_RW:\t%08x\n",
- I915_READ(VLV_IIR_RW));
- seq_printf(m, "Display IMR:\t%08x\n",
- I915_READ(VLV_IMR));
- for_each_pipe(dev_priv, pipe) {
- enum intel_display_power_domain power_domain;
-
- power_domain = POWER_DOMAIN_PIPE(pipe);
- pref = intel_display_power_get_if_enabled(dev_priv,
- power_domain);
- if (!pref) {
- seq_printf(m, "Pipe %c power disabled\n",
- pipe_name(pipe));
- continue;
- }
-
- seq_printf(m, "Pipe %c stat:\t%08x\n",
- pipe_name(pipe),
- I915_READ(PIPESTAT(pipe)));
-
- intel_display_power_put(dev_priv, power_domain, pref);
- }
-
- pref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
- seq_printf(m, "Port hotplug:\t%08x\n",
- I915_READ(PORT_HOTPLUG_EN));
- seq_printf(m, "DPFLIPSTAT:\t%08x\n",
- I915_READ(VLV_DPFLIPSTAT));
- seq_printf(m, "DPINVGTT:\t%08x\n",
- I915_READ(DPINVGTT));
- intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, pref);
-
- for (i = 0; i < 4; i++) {
- seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
- i, I915_READ(GEN8_GT_IMR(i)));
- seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
- i, I915_READ(GEN8_GT_IIR(i)));
- seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
- i, I915_READ(GEN8_GT_IER(i)));
- }
-
- seq_printf(m, "PCU interrupt mask:\t%08x\n",
- I915_READ(GEN8_PCU_IMR));
- seq_printf(m, "PCU interrupt identity:\t%08x\n",
- I915_READ(GEN8_PCU_IIR));
- seq_printf(m, "PCU interrupt enable:\t%08x\n",
- I915_READ(GEN8_PCU_IER));
- } else if (INTEL_GEN(dev_priv) >= 11) {
- if (HAS_MASTER_UNIT_IRQ(dev_priv))
- seq_printf(m, "Master Unit Interrupt Control: %08x\n",
- I915_READ(DG1_MSTR_UNIT_INTR));
-
- seq_printf(m, "Master Interrupt Control: %08x\n",
- I915_READ(GEN11_GFX_MSTR_IRQ));
-
- seq_printf(m, "Render/Copy Intr Enable: %08x\n",
- I915_READ(GEN11_RENDER_COPY_INTR_ENABLE));
- seq_printf(m, "VCS/VECS Intr Enable: %08x\n",
- I915_READ(GEN11_VCS_VECS_INTR_ENABLE));
- seq_printf(m, "GUC/SG Intr Enable:\t %08x\n",
- I915_READ(GEN11_GUC_SG_INTR_ENABLE));
- seq_printf(m, "GPM/WGBOXPERF Intr Enable: %08x\n",
- I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE));
- seq_printf(m, "Crypto Intr Enable:\t %08x\n",
- I915_READ(GEN11_CRYPTO_RSVD_INTR_ENABLE));
- seq_printf(m, "GUnit/CSME Intr Enable:\t %08x\n",
- I915_READ(GEN11_GUNIT_CSME_INTR_ENABLE));
-
- seq_printf(m, "Display Interrupt Control:\t%08x\n",
- I915_READ(GEN11_DISPLAY_INT_CTL));
-
- gen8_display_interrupt_info(m);
- } else if (INTEL_GEN(dev_priv) >= 8) {
- seq_printf(m, "Master Interrupt Control:\t%08x\n",
- I915_READ(GEN8_MASTER_IRQ));
-
- for (i = 0; i < 4; i++) {
- seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
- i, I915_READ(GEN8_GT_IMR(i)));
- seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
- i, I915_READ(GEN8_GT_IIR(i)));
- seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
- i, I915_READ(GEN8_GT_IER(i)));
- }
-
- gen8_display_interrupt_info(m);
- } else if (IS_VALLEYVIEW(dev_priv)) {
- intel_wakeref_t pref;
-
- seq_printf(m, "Display IER:\t%08x\n",
- I915_READ(VLV_IER));
- seq_printf(m, "Display IIR:\t%08x\n",
- I915_READ(VLV_IIR));
- seq_printf(m, "Display IIR_RW:\t%08x\n",
- I915_READ(VLV_IIR_RW));
- seq_printf(m, "Display IMR:\t%08x\n",
- I915_READ(VLV_IMR));
- for_each_pipe(dev_priv, pipe) {
- enum intel_display_power_domain power_domain;
-
- power_domain = POWER_DOMAIN_PIPE(pipe);
- pref = intel_display_power_get_if_enabled(dev_priv,
- power_domain);
- if (!pref) {
- seq_printf(m, "Pipe %c power disabled\n",
- pipe_name(pipe));
- continue;
- }
-
- seq_printf(m, "Pipe %c stat:\t%08x\n",
- pipe_name(pipe),
- I915_READ(PIPESTAT(pipe)));
- intel_display_power_put(dev_priv, power_domain, pref);
- }
-
- seq_printf(m, "Master IER:\t%08x\n",
- I915_READ(VLV_MASTER_IER));
-
- seq_printf(m, "Render IER:\t%08x\n",
- I915_READ(GTIER));
- seq_printf(m, "Render IIR:\t%08x\n",
- I915_READ(GTIIR));
- seq_printf(m, "Render IMR:\t%08x\n",
- I915_READ(GTIMR));
-
- seq_printf(m, "PM IER:\t\t%08x\n",
- I915_READ(GEN6_PMIER));
- seq_printf(m, "PM IIR:\t\t%08x\n",
- I915_READ(GEN6_PMIIR));
- seq_printf(m, "PM IMR:\t\t%08x\n",
- I915_READ(GEN6_PMIMR));
-
- pref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
- seq_printf(m, "Port hotplug:\t%08x\n",
- I915_READ(PORT_HOTPLUG_EN));
- seq_printf(m, "DPFLIPSTAT:\t%08x\n",
- I915_READ(VLV_DPFLIPSTAT));
- seq_printf(m, "DPINVGTT:\t%08x\n",
- I915_READ(DPINVGTT));
- intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, pref);
-
- } else if (!HAS_PCH_SPLIT(dev_priv)) {
- seq_printf(m, "Interrupt enable: %08x\n",
- I915_READ(GEN2_IER));
- seq_printf(m, "Interrupt identity: %08x\n",
- I915_READ(GEN2_IIR));
- seq_printf(m, "Interrupt mask: %08x\n",
- I915_READ(GEN2_IMR));
- for_each_pipe(dev_priv, pipe)
- seq_printf(m, "Pipe %c stat: %08x\n",
- pipe_name(pipe),
- I915_READ(PIPESTAT(pipe)));
- } else {
- seq_printf(m, "North Display Interrupt enable: %08x\n",
- I915_READ(DEIER));
- seq_printf(m, "North Display Interrupt identity: %08x\n",
- I915_READ(DEIIR));
- seq_printf(m, "North Display Interrupt mask: %08x\n",
- I915_READ(DEIMR));
- seq_printf(m, "South Display Interrupt enable: %08x\n",
- I915_READ(SDEIER));
- seq_printf(m, "South Display Interrupt identity: %08x\n",
- I915_READ(SDEIIR));
- seq_printf(m, "South Display Interrupt mask: %08x\n",
- I915_READ(SDEIMR));
- seq_printf(m, "Graphics Interrupt enable: %08x\n",
- I915_READ(GTIER));
- seq_printf(m, "Graphics Interrupt identity: %08x\n",
- I915_READ(GTIIR));
- seq_printf(m, "Graphics Interrupt mask: %08x\n",
- I915_READ(GTIMR));
- }
-
- if (INTEL_GEN(dev_priv) >= 11) {
- seq_printf(m, "RCS Intr Mask:\t %08x\n",
- I915_READ(GEN11_RCS0_RSVD_INTR_MASK));
- seq_printf(m, "BCS Intr Mask:\t %08x\n",
- I915_READ(GEN11_BCS_RSVD_INTR_MASK));
- seq_printf(m, "VCS0/VCS1 Intr Mask:\t %08x\n",
- I915_READ(GEN11_VCS0_VCS1_INTR_MASK));
- seq_printf(m, "VCS2/VCS3 Intr Mask:\t %08x\n",
- I915_READ(GEN11_VCS2_VCS3_INTR_MASK));
- seq_printf(m, "VECS0/VECS1 Intr Mask:\t %08x\n",
- I915_READ(GEN11_VECS0_VECS1_INTR_MASK));
- seq_printf(m, "GUC/SG Intr Mask:\t %08x\n",
- I915_READ(GEN11_GUC_SG_INTR_MASK));
- seq_printf(m, "GPM/WGBOXPERF Intr Mask: %08x\n",
- I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK));
- seq_printf(m, "Crypto Intr Mask:\t %08x\n",
- I915_READ(GEN11_CRYPTO_RSVD_INTR_MASK));
- seq_printf(m, "Gunit/CSME Intr Mask:\t %08x\n",
- I915_READ(GEN11_GUNIT_CSME_INTR_MASK));
-
- } else if (INTEL_GEN(dev_priv) >= 6) {
- for_each_uabi_engine(engine, dev_priv) {
- seq_printf(m,
- "Graphics Interrupt mask (%s): %08x\n",
- engine->name, ENGINE_READ(engine, RING_IMR));
- }
- }
-
- intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
-
- return 0;
-}
-
-static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
-{
- struct drm_i915_private *i915 = node_to_i915(m->private);
- unsigned int i;
-
- seq_printf(m, "Total fences = %d\n", i915->ggtt.num_fences);
-
- rcu_read_lock();
- for (i = 0; i < i915->ggtt.num_fences; i++) {
- struct i915_fence_reg *reg = &i915->ggtt.fence_regs[i];
- struct i915_vma *vma = reg->vma;
-
- seq_printf(m, "Fence %d, pin count = %d, object = ",
- i, atomic_read(&reg->pin_count));
- if (!vma)
- seq_puts(m, "unused");
- else
- i915_debugfs_describe_obj(m, vma->obj);
- seq_putc(m, '\n');
- }
- rcu_read_unlock();
return 0;
}
@@ -802,7 +359,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
u32 rpmodectl, freq_sts;
- rpmodectl = I915_READ(GEN6_RP_CONTROL);
+ rpmodectl = intel_uncore_read(&dev_priv->uncore, GEN6_RP_CONTROL);
seq_printf(m, "Video Turbo Mode: %s\n",
yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
seq_printf(m, "HW control enabled: %s\n",
@@ -847,19 +404,19 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask;
int max_freq;
- rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
+ rp_state_limits = intel_uncore_read(&dev_priv->uncore, GEN6_RP_STATE_LIMITS);
if (IS_GEN9_LP(dev_priv)) {
- rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
- gt_perf_status = I915_READ(BXT_GT_PERF_STATUS);
+ rp_state_cap = intel_uncore_read(&dev_priv->uncore, BXT_RP_STATE_CAP);
+ gt_perf_status = intel_uncore_read(&dev_priv->uncore, BXT_GT_PERF_STATUS);
} else {
- rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
- gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
+ rp_state_cap = intel_uncore_read(&dev_priv->uncore, GEN6_RP_STATE_CAP);
+ gt_perf_status = intel_uncore_read(&dev_priv->uncore, GEN6_GT_PERF_STATUS);
}
/* RPSTAT1 is in the GT power well */
intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
- reqf = I915_READ(GEN6_RPNSWREQ);
+ reqf = intel_uncore_read(&dev_priv->uncore, GEN6_RPNSWREQ);
if (INTEL_GEN(dev_priv) >= 9)
reqf >>= 23;
else {
@@ -871,24 +428,24 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
}
reqf = intel_gpu_freq(rps, reqf);
- rpmodectl = I915_READ(GEN6_RP_CONTROL);
- rpinclimit = I915_READ(GEN6_RP_UP_THRESHOLD);
- rpdeclimit = I915_READ(GEN6_RP_DOWN_THRESHOLD);
-
- rpstat = I915_READ(GEN6_RPSTAT1);
- rpupei = I915_READ(GEN6_RP_CUR_UP_EI) & GEN6_CURICONT_MASK;
- rpcurup = I915_READ(GEN6_RP_CUR_UP) & GEN6_CURBSYTAVG_MASK;
- rpprevup = I915_READ(GEN6_RP_PREV_UP) & GEN6_CURBSYTAVG_MASK;
- rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK;
- rpcurdown = I915_READ(GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK;
- rpprevdown = I915_READ(GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK;
+ rpmodectl = intel_uncore_read(&dev_priv->uncore, GEN6_RP_CONTROL);
+ rpinclimit = intel_uncore_read(&dev_priv->uncore, GEN6_RP_UP_THRESHOLD);
+ rpdeclimit = intel_uncore_read(&dev_priv->uncore, GEN6_RP_DOWN_THRESHOLD);
+
+ rpstat = intel_uncore_read(&dev_priv->uncore, GEN6_RPSTAT1);
+ rpupei = intel_uncore_read(&dev_priv->uncore, GEN6_RP_CUR_UP_EI) & GEN6_CURICONT_MASK;
+ rpcurup = intel_uncore_read(&dev_priv->uncore, GEN6_RP_CUR_UP) & GEN6_CURBSYTAVG_MASK;
+ rpprevup = intel_uncore_read(&dev_priv->uncore, GEN6_RP_PREV_UP) & GEN6_CURBSYTAVG_MASK;
+ rpdownei = intel_uncore_read(&dev_priv->uncore, GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK;
+ rpcurdown = intel_uncore_read(&dev_priv->uncore, GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK;
+ rpprevdown = intel_uncore_read(&dev_priv->uncore, GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK;
cagf = intel_rps_read_actual_frequency(rps);
intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
if (INTEL_GEN(dev_priv) >= 11) {
- pm_ier = I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE);
- pm_imr = I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK);
+ pm_ier = intel_uncore_read(&dev_priv->uncore, GEN11_GPM_WGBOXPERF_INTR_ENABLE);
+ pm_imr = intel_uncore_read(&dev_priv->uncore, GEN11_GPM_WGBOXPERF_INTR_MASK);
/*
* The equivalent to the PM ISR & IIR cannot be read
* without affecting the current state of the system
@@ -896,17 +453,17 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
pm_isr = 0;
pm_iir = 0;
} else if (INTEL_GEN(dev_priv) >= 8) {
- pm_ier = I915_READ(GEN8_GT_IER(2));
- pm_imr = I915_READ(GEN8_GT_IMR(2));
- pm_isr = I915_READ(GEN8_GT_ISR(2));
- pm_iir = I915_READ(GEN8_GT_IIR(2));
+ pm_ier = intel_uncore_read(&dev_priv->uncore, GEN8_GT_IER(2));
+ pm_imr = intel_uncore_read(&dev_priv->uncore, GEN8_GT_IMR(2));
+ pm_isr = intel_uncore_read(&dev_priv->uncore, GEN8_GT_ISR(2));
+ pm_iir = intel_uncore_read(&dev_priv->uncore, GEN8_GT_IIR(2));
} else {
- pm_ier = I915_READ(GEN6_PMIER);
- pm_imr = I915_READ(GEN6_PMIMR);
- pm_isr = I915_READ(GEN6_PMISR);
- pm_iir = I915_READ(GEN6_PMIIR);
+ pm_ier = intel_uncore_read(&dev_priv->uncore, GEN6_PMIER);
+ pm_imr = intel_uncore_read(&dev_priv->uncore, GEN6_PMIMR);
+ pm_isr = intel_uncore_read(&dev_priv->uncore, GEN6_PMISR);
+ pm_iir = intel_uncore_read(&dev_priv->uncore, GEN6_PMIIR);
}
- pm_mask = I915_READ(GEN6_PMINTRMSK);
+ pm_mask = intel_uncore_read(&dev_priv->uncore, GEN6_PMINTRMSK);
seq_printf(m, "Video Turbo Mode: %s\n",
yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
@@ -936,27 +493,27 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit);
seq_printf(m, "RPNSWREQ: %dMHz\n", reqf);
seq_printf(m, "CAGF: %dMHz\n", cagf);
- seq_printf(m, "RP CUR UP EI: %d (%dns)\n",
+ seq_printf(m, "RP CUR UP EI: %d (%lldns)\n",
rpupei,
intel_gt_pm_interval_to_ns(&dev_priv->gt, rpupei));
- seq_printf(m, "RP CUR UP: %d (%dun)\n",
+ seq_printf(m, "RP CUR UP: %d (%lldun)\n",
rpcurup,
intel_gt_pm_interval_to_ns(&dev_priv->gt, rpcurup));
- seq_printf(m, "RP PREV UP: %d (%dns)\n",
+ seq_printf(m, "RP PREV UP: %d (%lldns)\n",
rpprevup,
intel_gt_pm_interval_to_ns(&dev_priv->gt, rpprevup));
seq_printf(m, "Up threshold: %d%%\n",
rps->power.up_threshold);
- seq_printf(m, "RP CUR DOWN EI: %d (%dns)\n",
+ seq_printf(m, "RP CUR DOWN EI: %d (%lldns)\n",
rpdownei,
intel_gt_pm_interval_to_ns(&dev_priv->gt,
rpdownei));
- seq_printf(m, "RP CUR DOWN: %d (%dns)\n",
+ seq_printf(m, "RP CUR DOWN: %d (%lldns)\n",
rpcurdown,
intel_gt_pm_interval_to_ns(&dev_priv->gt,
rpcurdown));
- seq_printf(m, "RP PREV DOWN: %d (%dns)\n",
+ seq_printf(m, "RP PREV DOWN: %d (%lldns)\n",
rpprevdown,
intel_gt_pm_interval_to_ns(&dev_priv->gt,
rpprevdown));
@@ -1011,111 +568,6 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
return 0;
}
-static int i915_ring_freq_table(struct seq_file *m, void *unused)
-{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct intel_rps *rps = &dev_priv->gt.rps;
- unsigned int max_gpu_freq, min_gpu_freq;
- intel_wakeref_t wakeref;
- int gpu_freq, ia_freq;
-
- if (!HAS_LLC(dev_priv))
- return -ENODEV;
-
- min_gpu_freq = rps->min_freq;
- max_gpu_freq = rps->max_freq;
- if (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
- /* Convert GT frequency to 50 HZ units */
- min_gpu_freq /= GEN9_FREQ_SCALER;
- max_gpu_freq /= GEN9_FREQ_SCALER;
- }
-
- seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
-
- wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
- for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) {
- ia_freq = gpu_freq;
- sandybridge_pcode_read(dev_priv,
- GEN6_PCODE_READ_MIN_FREQ_TABLE,
- &ia_freq, NULL);
- seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
- intel_gpu_freq(rps,
- (gpu_freq *
- (IS_GEN9_BC(dev_priv) ||
- INTEL_GEN(dev_priv) >= 10 ?
- GEN9_FREQ_SCALER : 1))),
- ((ia_freq >> 0) & 0xff) * 100,
- ((ia_freq >> 8) & 0xff) * 100);
- }
- intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
-
- return 0;
-}
-
-static void describe_ctx_ring(struct seq_file *m, struct intel_ring *ring)
-{
- seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u, emit: %u)",
- ring->space, ring->head, ring->tail, ring->emit);
-}
-
-static int i915_context_status(struct seq_file *m, void *unused)
-{
- struct drm_i915_private *i915 = node_to_i915(m->private);
- struct i915_gem_context *ctx, *cn;
-
- spin_lock(&i915->gem.contexts.lock);
- list_for_each_entry_safe(ctx, cn, &i915->gem.contexts.list, link) {
- struct i915_gem_engines_iter it;
- struct intel_context *ce;
-
- if (!kref_get_unless_zero(&ctx->ref))
- continue;
-
- spin_unlock(&i915->gem.contexts.lock);
-
- seq_puts(m, "HW context ");
- if (ctx->pid) {
- struct task_struct *task;
-
- task = get_pid_task(ctx->pid, PIDTYPE_PID);
- if (task) {
- seq_printf(m, "(%s [%d]) ",
- task->comm, task->pid);
- put_task_struct(task);
- }
- } else if (IS_ERR(ctx->file_priv)) {
- seq_puts(m, "(deleted) ");
- } else {
- seq_puts(m, "(kernel) ");
- }
-
- seq_putc(m, ctx->remap_slice ? 'R' : 'r');
- seq_putc(m, '\n');
-
- for_each_gem_engine(ce,
- i915_gem_context_lock_engines(ctx), it) {
- if (intel_context_pin_if_active(ce)) {
- seq_printf(m, "%s: ", ce->engine->name);
- if (ce->state)
- i915_debugfs_describe_obj(m, ce->state->obj);
- describe_ctx_ring(m, ce->ring);
- seq_putc(m, '\n');
- intel_context_unpin(ce);
- }
- }
- i915_gem_context_unlock_engines(ctx);
-
- seq_putc(m, '\n');
-
- spin_lock(&i915->gem.contexts.lock);
- list_safe_reset_next(ctx, cn, link);
- i915_gem_context_put(ctx);
- }
- spin_unlock(&i915->gem.contexts.lock);
-
- return 0;
-}
-
static const char *swizzle_string(unsigned swizzle)
{
switch (swizzle) {
@@ -1193,20 +645,6 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
return 0;
}
-static const char *rps_power_to_str(unsigned int power)
-{
- static const char * const strings[] = {
- [LOW_POWER] = "low power",
- [BETWEEN] = "mixed",
- [HIGH_POWER] = "high power",
- };
-
- if (power >= ARRAY_SIZE(strings) || !strings[power])
- return "unknown";
-
- return strings[power];
-}
-
static int i915_rps_boost_info(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
@@ -1231,42 +669,7 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
intel_gpu_freq(rps, rps->efficient_freq),
intel_gpu_freq(rps, rps->boost_freq));
- seq_printf(m, "Wait boosts: %d\n", atomic_read(&rps->boosts));
-
- if (INTEL_GEN(dev_priv) >= 6 && intel_rps_is_active(rps)) {
- u32 rpup, rpupei;
- u32 rpdown, rpdownei;
-
- intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
- rpup = I915_READ_FW(GEN6_RP_CUR_UP) & GEN6_RP_EI_MASK;
- rpupei = I915_READ_FW(GEN6_RP_CUR_UP_EI) & GEN6_RP_EI_MASK;
- rpdown = I915_READ_FW(GEN6_RP_CUR_DOWN) & GEN6_RP_EI_MASK;
- rpdownei = I915_READ_FW(GEN6_RP_CUR_DOWN_EI) & GEN6_RP_EI_MASK;
- intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
-
- seq_printf(m, "\nRPS Autotuning (current \"%s\" window):\n",
- rps_power_to_str(rps->power.mode));
- seq_printf(m, " Avg. up: %d%% [above threshold? %d%%]\n",
- rpup && rpupei ? 100 * rpup / rpupei : 0,
- rps->power.up_threshold);
- seq_printf(m, " Avg. down: %d%% [below threshold? %d%%]\n",
- rpdown && rpdownei ? 100 * rpdown / rpdownei : 0,
- rps->power.down_threshold);
- } else {
- seq_puts(m, "\nRPS Autotuning inactive\n");
- }
-
- return 0;
-}
-
-static int i915_llc(struct seq_file *m, void *data)
-{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- const bool edram = INTEL_GEN(dev_priv) > 8;
-
- seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev_priv)));
- seq_printf(m, "%s: %uMB\n", edram ? "eDRAM" : "eLLC",
- dev_priv->edram_size_mb);
+ seq_printf(m, "Wait boosts: %d\n", READ_ONCE(rps->boosts));
return 0;
}
@@ -1280,7 +683,7 @@ static int i915_runtime_pm_status(struct seq_file *m, void *unused)
seq_puts(m, "Runtime power management not supported\n");
seq_printf(m, "Runtime power status: %s\n",
- enableddisabled(!dev_priv->power_domains.wakeref));
+ enableddisabled(!dev_priv->power_domains.init_wakeref));
seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->gt.awake));
seq_printf(m, "IRQs disabled: %s\n",
@@ -1306,34 +709,28 @@ static int i915_runtime_pm_status(struct seq_file *m, void *unused)
static int i915_engine_info(struct seq_file *m, void *unused)
{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct drm_i915_private *i915 = node_to_i915(m->private);
struct intel_engine_cs *engine;
intel_wakeref_t wakeref;
struct drm_printer p;
- wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
- seq_printf(m, "GT awake? %s [%d]\n",
- yesno(dev_priv->gt.awake),
- atomic_read(&dev_priv->gt.wakeref.count));
- seq_printf(m, "CS timestamp frequency: %u Hz\n",
- RUNTIME_INFO(dev_priv)->cs_timestamp_frequency_hz);
+ seq_printf(m, "GT awake? %s [%d], %llums\n",
+ yesno(i915->gt.awake),
+ atomic_read(&i915->gt.wakeref.count),
+ ktime_to_ms(intel_gt_get_awake_time(&i915->gt)));
+ seq_printf(m, "CS timestamp frequency: %u Hz, %d ns\n",
+ i915->gt.clock_frequency,
+ i915->gt.clock_period_ns);
p = drm_seq_file_printer(m);
- for_each_uabi_engine(engine, dev_priv)
+ for_each_uabi_engine(engine, i915)
intel_engine_dump(engine, &p, "%s\n", engine->name);
- intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
+ intel_gt_show_timelines(&i915->gt, &p, i915_request_show_with_schedule);
- return 0;
-}
-
-static int i915_shrinker_info(struct seq_file *m, void *unused)
-{
- struct drm_i915_private *i915 = node_to_i915(m->private);
-
- seq_printf(m, "seeks = %d\n", i915->mm.shrinker.seeks);
- seq_printf(m, "batch = %lu\n", i915->mm.shrinker.batch);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
return 0;
}
@@ -1411,7 +808,7 @@ i915_perf_noa_delay_set(void *data, u64 val)
* This would lead to infinite waits as we're doing timestamp
* difference on the CS with only 32bits.
*/
- if (i915_cs_timestamp_ns_to_ticks(i915, val) > U32_MAX)
+ if (intel_gt_ns_to_clock_interval(&i915->gt, val) > U32_MAX)
return -EINVAL;
atomic64_set(&i915->perf.noa_programming_delay, val);
@@ -1529,55 +926,6 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops,
i915_drop_caches_get, i915_drop_caches_set,
"0x%08llx\n");
-static int
-i915_cache_sharing_get(void *data, u64 *val)
-{
- struct drm_i915_private *dev_priv = data;
- intel_wakeref_t wakeref;
- u32 snpcr = 0;
-
- if (!(IS_GEN_RANGE(dev_priv, 6, 7)))
- return -ENODEV;
-
- with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
- snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
-
- *val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
-
- return 0;
-}
-
-static int
-i915_cache_sharing_set(void *data, u64 val)
-{
- struct drm_i915_private *dev_priv = data;
- intel_wakeref_t wakeref;
-
- if (!(IS_GEN_RANGE(dev_priv, 6, 7)))
- return -ENODEV;
-
- if (val > 3)
- return -EINVAL;
-
- drm_dbg(&dev_priv->drm,
- "Manually setting uncore sharing to %llu\n", val);
- with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
- u32 snpcr;
-
- /* Update the cache sharing policy here as well */
- snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
- snpcr &= ~GEN6_MBC_SNPCR_MASK;
- snpcr |= val << GEN6_MBC_SNPCR_SHIFT;
- I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
- }
-
- return 0;
-}
-
-DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops,
- i915_cache_sharing_get, i915_cache_sharing_set,
- "%llu\n");
-
static int i915_sseu_status(struct seq_file *m, void *unused)
{
struct drm_i915_private *i915 = node_to_i915(m->private);
@@ -1621,16 +969,10 @@ static const struct file_operations i915_forcewake_fops = {
static const struct drm_info_list i915_debugfs_list[] = {
{"i915_capabilities", i915_capabilities, 0},
{"i915_gem_objects", i915_gem_object_info, 0},
- {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
- {"i915_gem_interrupt", i915_interrupt_info, 0},
{"i915_frequency_info", i915_frequency_info, 0},
- {"i915_ring_freq_table", i915_ring_freq_table, 0},
- {"i915_context_status", i915_context_status, 0},
{"i915_swizzle_info", i915_swizzle_info, 0},
- {"i915_llc", i915_llc, 0},
{"i915_runtime_pm_status", i915_runtime_pm_status, 0},
{"i915_engine_info", i915_engine_info, 0},
- {"i915_shrinker_info", i915_shrinker_info, 0},
{"i915_wa_registers", i915_wa_registers, 0},
{"i915_sseu_status", i915_sseu_status, 0},
{"i915_rps_boost_info", i915_rps_boost_info, 0},
@@ -1643,7 +985,6 @@ static const struct i915_debugfs_files {
} i915_debugfs_files[] = {
{"i915_perf_noa_delay", &i915_perf_noa_delay_fops},
{"i915_wedged", &i915_wedged_fops},
- {"i915_cache_sharing", &i915_cache_sharing_fops},
{"i915_gem_drop_caches", &i915_drop_caches_fops},
#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
{"i915_error_state", &i915_error_state_fops},
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 320856b665a1..2febda554bca 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -58,6 +58,7 @@
#include "display/intel_hotplug.h"
#include "display/intel_overlay.h"
#include "display/intel_pipe_crc.h"
+#include "display/intel_pps.h"
#include "display/intel_sprite.h"
#include "display/intel_vga.h"
@@ -410,6 +411,7 @@ static int i915_driver_mmio_probe(struct drm_i915_private *dev_priv)
/* Try to make sure MCHBAR is enabled before poking at it */
intel_setup_mchbar(dev_priv);
+ intel_device_info_runtime_init(dev_priv);
ret = intel_gt_init_mmio(&dev_priv->gt);
if (ret)
@@ -516,8 +518,6 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
if (i915_inject_probe_failure(dev_priv))
return -ENODEV;
- intel_device_info_runtime_init(dev_priv);
-
if (HAS_PPGTT(dev_priv)) {
if (intel_vgpu_active(dev_priv) &&
!intel_vgpu_has_full_ppgtt(dev_priv)) {
@@ -578,8 +578,6 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
pci_set_master(pdev);
- cpu_latency_qos_add_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
-
intel_gt_init_workarounds(dev_priv);
/* On the 945G/GM, the chipset reports the MSI capability on the
@@ -611,14 +609,15 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
goto err_msi;
intel_opregion_setup(dev_priv);
+
+ intel_pcode_init(dev_priv);
+
/*
- * Fill the dram structure to get the system raw bandwidth and
- * dram info. This will be used for memory latency calculation.
+ * Fill the dram structure to get the system dram info. This will be
+ * used for memory latency calculation.
*/
intel_dram_detect(dev_priv);
- intel_pcode_init(dev_priv);
-
intel_bw_init_hw(dev_priv);
return 0;
@@ -626,7 +625,6 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
err_msi:
if (pdev->msi_enabled)
pci_disable_msi(pdev);
- cpu_latency_qos_remove_request(&dev_priv->pm_qos);
err_mem_regions:
intel_memory_regions_driver_release(dev_priv);
err_ggtt:
@@ -648,8 +646,6 @@ static void i915_driver_hw_remove(struct drm_i915_private *dev_priv)
if (pdev->msi_enabled)
pci_disable_msi(pdev);
-
- cpu_latency_qos_remove_request(&dev_priv->pm_qos);
}
/**
@@ -738,6 +734,7 @@ static void i915_driver_unregister(struct drm_i915_private *dev_priv)
* events.
*/
drm_kms_helper_poll_fini(&dev_priv->drm);
+ drm_atomic_helper_shutdown(&dev_priv->drm);
intel_gt_driver_unregister(&dev_priv->gt);
acpi_video_unregister();
@@ -940,8 +937,6 @@ void i915_driver_remove(struct drm_i915_private *i915)
i915_gem_suspend(i915);
- drm_atomic_helper_shutdown(&i915->drm);
-
intel_gvt_driver_remove(i915);
intel_modeset_driver_remove(i915);
@@ -1052,6 +1047,8 @@ static void intel_shutdown_encoders(struct drm_i915_private *dev_priv)
void i915_driver_shutdown(struct drm_i915_private *i915)
{
+ disable_rpm_wakeref_asserts(&i915->runtime_pm);
+
i915_gem_suspend(i915);
drm_kms_helper_poll_disable(&i915->drm);
@@ -1065,6 +1062,8 @@ void i915_driver_shutdown(struct drm_i915_private *i915)
intel_suspend_encoders(i915);
intel_shutdown_encoders(i915);
+
+ enable_rpm_wakeref_asserts(&i915->runtime_pm);
}
static bool suspend_to_idle(struct drm_i915_private *dev_priv)
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 0a3ee4f9dc0a..574f3575879d 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -79,9 +79,9 @@
#include "gem/i915_gem_shrinker.h"
#include "gem/i915_gem_stolen.h"
-#include "gt/intel_lrc.h"
#include "gt/intel_engine.h"
#include "gt/intel_gt_types.h"
+#include "gt/intel_region_lmem.h"
#include "gt/intel_workarounds.h"
#include "gt/uc/intel_uc.h"
@@ -103,7 +103,6 @@
#include "i915_vma.h"
#include "i915_irq.h"
-#include "intel_region_lmem.h"
/* General customization:
*/
@@ -416,6 +415,7 @@ struct intel_fbc {
u16 gen9_wa_cfb_stride;
u16 interval;
s8 fence_id;
+ bool psr2_active;
} state_cache;
/*
@@ -891,9 +891,6 @@ struct drm_i915_private {
bool display_irqs_enabled;
- /* To control wakeup latency, e.g. for irq-driven dp aux transfers. */
- struct pm_qos_request pm_qos;
-
/* Sideband mailbox protection */
struct mutex sb_lock;
struct pm_qos_request sb_qos;
@@ -1125,23 +1122,11 @@ struct drm_i915_private {
* crtc_state->wm.need_postvbl_update.
*/
struct mutex wm_mutex;
-
- /*
- * Set during HW readout of watermarks/DDB. Some platforms
- * need to know when we're still using BIOS-provided values
- * (which we don't fully trust).
- *
- * FIXME get rid of this.
- */
- bool distrust_bios_wm;
} wm;
struct dram_info {
- bool valid;
- bool is_16gb_dimm;
+ bool wm_lv_0_adjust_needed;
u8 num_channels;
- u8 ranks;
- u32 bandwidth_kbps;
bool symmetric_memory;
enum intel_dram_type {
INTEL_DRAM_UNKNOWN,
@@ -1150,6 +1135,7 @@ struct drm_i915_private {
INTEL_DRAM_LPDDR3,
INTEL_DRAM_LPDDR4
} type;
+ u8 num_qgv_points;
} dram_info;
struct intel_bw_info {
@@ -1172,9 +1158,6 @@ struct drm_i915_private {
struct i915_gem_contexts {
spinlock_t lock; /* locks list */
struct list_head list;
-
- struct llist_head free_list;
- struct work_struct free_work;
} contexts;
/*
@@ -1188,6 +1171,8 @@ struct drm_i915_private {
struct file *mmap_singleton;
} gem;
+ u8 framestart_delay;
+
u8 pch_ssc_use;
/* For i915gm/i945gm vblank irq workaround */
@@ -1572,16 +1557,30 @@ enum {
TGL_REVID_D0,
};
-extern const struct i915_rev_steppings tgl_uy_revids[];
-extern const struct i915_rev_steppings tgl_revids[];
+#define TGL_UY_REVIDS_SIZE 4
+#define TGL_REVIDS_SIZE 2
+
+extern const struct i915_rev_steppings tgl_uy_revids[TGL_UY_REVIDS_SIZE];
+extern const struct i915_rev_steppings tgl_revids[TGL_REVIDS_SIZE];
static inline const struct i915_rev_steppings *
tgl_revids_get(struct drm_i915_private *dev_priv)
{
- if (IS_TGL_U(dev_priv) || IS_TGL_Y(dev_priv))
- return &tgl_uy_revids[INTEL_REVID(dev_priv)];
- else
- return &tgl_revids[INTEL_REVID(dev_priv)];
+ u8 revid = INTEL_REVID(dev_priv);
+ u8 size;
+ const struct i915_rev_steppings *tgl_revid_tbl;
+
+ if (IS_TGL_U(dev_priv) || IS_TGL_Y(dev_priv)) {
+ tgl_revid_tbl = tgl_uy_revids;
+ size = ARRAY_SIZE(tgl_uy_revids);
+ } else {
+ tgl_revid_tbl = tgl_revids;
+ size = ARRAY_SIZE(tgl_revids);
+ }
+
+ revid = min_t(u8, revid, size - 1);
+
+ return &tgl_revid_tbl[revid];
}
#define IS_TGL_DISP_REVID(p, since, until) \
@@ -1591,14 +1590,14 @@ tgl_revids_get(struct drm_i915_private *dev_priv)
#define IS_TGL_UY_GT_REVID(p, since, until) \
((IS_TGL_U(p) || IS_TGL_Y(p)) && \
- tgl_uy_revids[INTEL_REVID(p)].gt_stepping >= (since) && \
- tgl_uy_revids[INTEL_REVID(p)].gt_stepping <= (until))
+ tgl_revids_get(p)->gt_stepping >= (since) && \
+ tgl_revids_get(p)->gt_stepping <= (until))
#define IS_TGL_GT_REVID(p, since, until) \
(IS_TIGERLAKE(p) && \
!(IS_TGL_U(p) || IS_TGL_Y(p)) && \
- tgl_revids[INTEL_REVID(p)].gt_stepping >= (since) && \
- tgl_revids[INTEL_REVID(p)].gt_stepping <= (until))
+ tgl_revids_get(p)->gt_stepping >= (since) && \
+ tgl_revids_get(p)->gt_stepping <= (until))
#define RKL_REVID_A0 0x0
#define RKL_REVID_B0 0x1
@@ -1649,8 +1648,6 @@ tgl_revids_get(struct drm_i915_private *dev_priv)
(INTEL_INFO(dev_priv)->has_logical_ring_contexts)
#define HAS_LOGICAL_RING_ELSQ(dev_priv) \
(INTEL_INFO(dev_priv)->has_logical_ring_elsq)
-#define HAS_LOGICAL_RING_PREEMPTION(dev_priv) \
- (INTEL_INFO(dev_priv)->has_logical_ring_preemption)
#define HAS_MASTER_UNIT_IRQ(dev_priv) (INTEL_INFO(dev_priv)->has_master_unit_irq)
@@ -1752,10 +1749,17 @@ tgl_revids_get(struct drm_i915_private *dev_priv)
#define HAS_DISPLAY(dev_priv) (INTEL_INFO(dev_priv)->pipe_mask != 0)
+#define HAS_VRR(i915) (INTEL_GEN(i915) >= 12)
+
/* Only valid when HAS_DISPLAY() is true */
#define INTEL_DISPLAY_ENABLED(dev_priv) \
(drm_WARN_ON(&(dev_priv)->drm, !HAS_DISPLAY(dev_priv)), !(dev_priv)->params.disable_display)
+static inline bool run_as_guest(void)
+{
+ return !hypervisor_is_type(X86_HYPER_NATIVE);
+}
+
static inline bool intel_vtd_active(void)
{
#ifdef CONFIG_INTEL_IOMMU
@@ -1764,7 +1768,7 @@ static inline bool intel_vtd_active(void)
#endif
/* Running as a guest, we assume the host is enforcing VT'd */
- return !hypervisor_is_type(X86_HYPER_NATIVE);
+ return run_as_guest();
}
static inline bool intel_scanout_needs_vtd_wa(struct drm_i915_private *dev_priv)
@@ -1970,43 +1974,6 @@ mkwrite_device_info(struct drm_i915_private *dev_priv)
int i915_reg_read_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
-#define __I915_REG_OP(op__, dev_priv__, ...) \
- intel_uncore_##op__(&(dev_priv__)->uncore, __VA_ARGS__)
-
-#define I915_READ(reg__) __I915_REG_OP(read, dev_priv, (reg__))
-#define I915_WRITE(reg__, val__) __I915_REG_OP(write, dev_priv, (reg__), (val__))
-
-#define POSTING_READ(reg__) __I915_REG_OP(posting_read, dev_priv, (reg__))
-
-/* These are untraced mmio-accessors that are only valid to be used inside
- * critical sections, such as inside IRQ handlers, where forcewake is explicitly
- * controlled.
- *
- * Think twice, and think again, before using these.
- *
- * As an example, these accessors can possibly be used between:
- *
- * spin_lock_irq(&dev_priv->uncore.lock);
- * intel_uncore_forcewake_get__locked();
- *
- * and
- *
- * intel_uncore_forcewake_put__locked();
- * spin_unlock_irq(&dev_priv->uncore.lock);
- *
- *
- * Note: some registers may not need forcewake held, so
- * intel_uncore_forcewake_{get,put} can be omitted, see
- * intel_uncore_forcewake_for_reg().
- *
- * Certain architectures will die if the same cacheline is concurrently accessed
- * by different clients (e.g. on Ivybridge). Access to registers should
- * therefore generally be serialised, by either the dev_priv->uncore.lock or
- * a more localised lock guarding all access to that bank of registers.
- */
-#define I915_READ_FW(reg__) __I915_REG_OP(read_fw, dev_priv, (reg__))
-#define I915_WRITE_FW(reg__, val__) __I915_REG_OP(write_fw, dev_priv, (reg__), (val__))
-
/* i915_mm.c */
int remap_io_mapping(struct vm_area_struct *vma,
unsigned long addr, unsigned long pfn, unsigned long size,
@@ -2029,16 +1996,4 @@ i915_coherent_map_type(struct drm_i915_private *i915)
return HAS_LLC(i915) ? I915_MAP_WB : I915_MAP_WC;
}
-static inline u64 i915_cs_timestamp_ns_to_ticks(struct drm_i915_private *i915, u64 val)
-{
- return DIV_ROUND_UP_ULL(val * RUNTIME_INFO(i915)->cs_timestamp_frequency_hz,
- 1000000000);
-}
-
-static inline u64 i915_cs_timestamp_ticks_to_ns(struct drm_i915_private *i915, u64 val)
-{
- return div_u64(val * 1000000000,
- RUNTIME_INFO(i915)->cs_timestamp_frequency_hz);
-}
-
#endif
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 58276694c848..9b04dff5eb32 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -180,108 +180,6 @@ try_again:
}
static int
-i915_gem_create(struct drm_file *file,
- struct intel_memory_region *mr,
- u64 *size_p,
- u32 *handle_p)
-{
- struct drm_i915_gem_object *obj;
- u32 handle;
- u64 size;
- int ret;
-
- GEM_BUG_ON(!is_power_of_2(mr->min_page_size));
- size = round_up(*size_p, mr->min_page_size);
- if (size == 0)
- return -EINVAL;
-
- /* For most of the ABI (e.g. mmap) we think in system pages */
- GEM_BUG_ON(!IS_ALIGNED(size, PAGE_SIZE));
-
- /* Allocate the new object */
- obj = i915_gem_object_create_region(mr, size, 0);
- if (IS_ERR(obj))
- return PTR_ERR(obj);
-
- ret = drm_gem_handle_create(file, &obj->base, &handle);
- /* drop reference from allocate - handle holds it now */
- i915_gem_object_put(obj);
- if (ret)
- return ret;
-
- *handle_p = handle;
- *size_p = size;
- return 0;
-}
-
-int
-i915_gem_dumb_create(struct drm_file *file,
- struct drm_device *dev,
- struct drm_mode_create_dumb *args)
-{
- enum intel_memory_type mem_type;
- int cpp = DIV_ROUND_UP(args->bpp, 8);
- u32 format;
-
- switch (cpp) {
- case 1:
- format = DRM_FORMAT_C8;
- break;
- case 2:
- format = DRM_FORMAT_RGB565;
- break;
- case 4:
- format = DRM_FORMAT_XRGB8888;
- break;
- default:
- return -EINVAL;
- }
-
- /* have to work out size/pitch and return them */
- args->pitch = ALIGN(args->width * cpp, 64);
-
- /* align stride to page size so that we can remap */
- if (args->pitch > intel_plane_fb_max_stride(to_i915(dev), format,
- DRM_FORMAT_MOD_LINEAR))
- args->pitch = ALIGN(args->pitch, 4096);
-
- if (args->pitch < args->width)
- return -EINVAL;
-
- args->size = mul_u32_u32(args->pitch, args->height);
-
- mem_type = INTEL_MEMORY_SYSTEM;
- if (HAS_LMEM(to_i915(dev)))
- mem_type = INTEL_MEMORY_LOCAL;
-
- return i915_gem_create(file,
- intel_memory_region_by_type(to_i915(dev),
- mem_type),
- &args->size, &args->handle);
-}
-
-/**
- * Creates a new mm object and returns a handle to it.
- * @dev: drm device pointer
- * @data: ioctl data blob
- * @file: drm file pointer
- */
-int
-i915_gem_create_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file)
-{
- struct drm_i915_private *i915 = to_i915(dev);
- struct drm_i915_gem_create *args = data;
-
- i915_gem_flush_free_objects(i915);
-
- return i915_gem_create(file,
- intel_memory_region_by_type(i915,
- INTEL_MEMORY_SYSTEM),
- &args->size, &args->handle);
-}
-
-static int
shmem_pread(struct page *page, int offset, int len, char __user *user_data,
bool needs_clflush)
{
@@ -1059,14 +957,14 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
i915_gem_object_is_tiled(obj) &&
i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
if (obj->mm.madv == I915_MADV_WILLNEED) {
- GEM_BUG_ON(!obj->mm.quirked);
- __i915_gem_object_unpin_pages(obj);
- obj->mm.quirked = false;
+ GEM_BUG_ON(!i915_gem_object_has_tiling_quirk(obj));
+ i915_gem_object_clear_tiling_quirk(obj);
+ i915_gem_object_make_shrinkable(obj);
}
if (args->madv == I915_MADV_WILLNEED) {
- GEM_BUG_ON(obj->mm.quirked);
- __i915_gem_object_pin_pages(obj);
- obj->mm.quirked = true;
+ GEM_BUG_ON(i915_gem_object_has_tiling_quirk(obj));
+ i915_gem_object_make_unshrinkable(obj);
+ i915_gem_object_set_tiling_quirk(obj);
}
}
@@ -1207,8 +1105,6 @@ void i915_gem_driver_remove(struct drm_i915_private *dev_priv)
void i915_gem_driver_release(struct drm_i915_private *dev_priv)
{
- i915_gem_driver_release__contexts(dev_priv);
-
intel_gt_driver_release(&dev_priv->gt);
intel_wa_list_free(&dev_priv->gt_wa_list);
@@ -1279,19 +1175,13 @@ int i915_gem_freeze_late(struct drm_i915_private *i915)
* the objects as well, see i915_gem_freeze()
*/
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
-
- i915_gem_shrink(i915, -1UL, NULL, ~0);
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref)
+ i915_gem_shrink(i915, -1UL, NULL, ~0);
i915_gem_drain_freed_objects(i915);
- list_for_each_entry(obj, &i915->mm.shrink_list, mm.link) {
- i915_gem_object_lock(obj, NULL);
- drm_WARN_ON(&i915->drm,
- i915_gem_object_set_to_cpu_domain(obj, true));
- i915_gem_object_unlock(obj);
- }
-
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+ wbinvd_on_all_cpus();
+ list_for_each_entry(obj, &i915->mm.shrink_list, mm.link)
+ __start_cpu_write(obj);
return 0;
}
diff --git a/drivers/gpu/drm/i915/i915_gem.h b/drivers/gpu/drm/i915/i915_gem.h
index a4cad3f154ca..e622aee6e4be 100644
--- a/drivers/gpu/drm/i915/i915_gem.h
+++ b/drivers/gpu/drm/i915/i915_gem.h
@@ -38,11 +38,18 @@ struct drm_i915_private;
#define GEM_SHOW_DEBUG() drm_debug_enabled(DRM_UT_DRIVER)
+#ifdef CONFIG_DRM_I915_DEBUG_GEM_ONCE
+#define __GEM_BUG(cond) BUG()
+#else
+#define __GEM_BUG(cond) \
+ WARN(1, "%s:%d GEM_BUG_ON(%s)\n", __func__, __LINE__, __stringify(cond))
+#endif
+
#define GEM_BUG_ON(condition) do { if (unlikely((condition))) { \
GEM_TRACE_ERR("%s:%d GEM_BUG_ON(%s)\n", \
__func__, __LINE__, __stringify(condition)); \
GEM_TRACE_DUMP(); \
- BUG(); \
+ __GEM_BUG(condition); \
} \
} while(0)
#define GEM_WARN_ON(expr) WARN_ON(expr)
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index e1a66c8245b8..4d2d59a9942b 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -61,6 +61,17 @@ mark_free(struct drm_mm_scan *scan,
return drm_mm_scan_add_block(scan, &vma->node);
}
+static bool defer_evict(struct i915_vma *vma)
+{
+ if (i915_vma_is_active(vma))
+ return true;
+
+ if (i915_vma_is_scanout(vma))
+ return true;
+
+ return false;
+}
+
/**
* i915_gem_evict_something - Evict vmas to make room for binding a new one
* @vm: address space to evict from
@@ -150,7 +161,7 @@ search_again:
* To notice when we complete one full cycle, we record the
* first active element seen, before moving it to the tail.
*/
- if (active != ERR_PTR(-EAGAIN) && i915_vma_is_active(vma)) {
+ if (active != ERR_PTR(-EAGAIN) && defer_evict(vma)) {
if (!active)
active = vma;
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index c5ee1567f3d1..3ee2f682eff6 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -55,22 +55,17 @@ int i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
struct sg_table *pages)
{
- struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
- struct device *kdev = &dev_priv->drm.pdev->dev;
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
-
- if (unlikely(ggtt->do_idle_maps)) {
- /* XXX This does not prevent more requests being submitted! */
- if (intel_gt_retire_requests_timeout(ggtt->vm.gt,
- -MAX_SCHEDULE_TIMEOUT)) {
- drm_err(&dev_priv->drm,
- "Failed to wait for idle; VT'd may hang.\n");
- /* Wait a bit, in hopes it avoids the hang */
- udelay(10);
- }
- }
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ struct i915_ggtt *ggtt = &i915->ggtt;
+
+ /* XXX This does not prevent more requests being submitted! */
+ if (unlikely(ggtt->do_idle_maps))
+ /* Wait a bit, in the hope it avoids the hang */
+ usleep_range(100, 250);
- dma_unmap_sg(kdev, pages->sgl, pages->nents, PCI_DMA_BIDIRECTIONAL);
+ dma_unmap_sg(&i915->drm.pdev->dev,
+ pages->sgl, pages->nents,
+ PCI_DMA_BIDIRECTIONAL);
}
/**
diff --git a/drivers/gpu/drm/i915/i915_getparam.c b/drivers/gpu/drm/i915/i915_getparam.c
index f96032c60a12..75c3bfc2486e 100644
--- a/drivers/gpu/drm/i915/i915_getparam.c
+++ b/drivers/gpu/drm/i915/i915_getparam.c
@@ -154,7 +154,7 @@ int i915_getparam_ioctl(struct drm_device *dev, void *data,
return -ENODEV;
break;
case I915_PARAM_CS_TIMESTAMP_FREQUENCY:
- value = RUNTIME_INFO(i915)->cs_timestamp_frequency_hz;
+ value = i915->gt.clock_frequency;
break;
case I915_PARAM_MMAP_GTT_COHERENT:
value = INTEL_INFO(i915)->has_coherent_ggtt;
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index d8cac4c5881f..f962693404b7 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -485,7 +485,7 @@ static void error_print_context(struct drm_i915_error_state_buf *m,
const char *header,
const struct i915_gem_context_coredump *ctx)
{
- const u32 period = RUNTIME_INFO(m->i915)->cs_timestamp_period_ns;
+ const u32 period = m->i915->gt.clock_period_ns;
err_printf(m, "%s%s[%d] prio %d, guilty %d active %d, runtime total %lluns, avg %lluns\n",
header, ctx->comm, ctx->pid, ctx->sched_attr.priority,
@@ -1051,7 +1051,9 @@ i915_vma_coredump_create(const struct intel_gt *gt,
for_each_sgt_daddr(dma, iter, vma->pages) {
void __iomem *s;
- s = io_mapping_map_wc(&mem->iomap, dma, PAGE_SIZE);
+ s = io_mapping_map_wc(&mem->iomap,
+ dma - mem->region.start,
+ PAGE_SIZE);
ret = compress_page(compress,
(void __force *)s, dst,
true);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 6cdb052e3850..1a701367a718 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -327,10 +327,10 @@ i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv,
lockdep_assert_held(&dev_priv->irq_lock);
drm_WARN_ON(&dev_priv->drm, bits & ~mask);
- val = I915_READ(PORT_HOTPLUG_EN);
+ val = intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_EN);
val &= ~mask;
val |= bits;
- I915_WRITE(PORT_HOTPLUG_EN, val);
+ intel_uncore_write(&dev_priv->uncore, PORT_HOTPLUG_EN, val);
}
/**
@@ -376,8 +376,8 @@ void ilk_update_display_irq(struct drm_i915_private *dev_priv,
if (new_val != dev_priv->irq_mask &&
!drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv))) {
dev_priv->irq_mask = new_val;
- I915_WRITE(DEIMR, dev_priv->irq_mask);
- POSTING_READ(DEIMR);
+ intel_uncore_write(&dev_priv->uncore, DEIMR, dev_priv->irq_mask);
+ intel_uncore_posting_read(&dev_priv->uncore, DEIMR);
}
}
@@ -401,15 +401,15 @@ static void bdw_update_port_irq(struct drm_i915_private *dev_priv,
if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
return;
- old_val = I915_READ(GEN8_DE_PORT_IMR);
+ old_val = intel_uncore_read(&dev_priv->uncore, GEN8_DE_PORT_IMR);
new_val = old_val;
new_val &= ~interrupt_mask;
new_val |= (~enabled_irq_mask & interrupt_mask);
if (new_val != old_val) {
- I915_WRITE(GEN8_DE_PORT_IMR, new_val);
- POSTING_READ(GEN8_DE_PORT_IMR);
+ intel_uncore_write(&dev_priv->uncore, GEN8_DE_PORT_IMR, new_val);
+ intel_uncore_posting_read(&dev_priv->uncore, GEN8_DE_PORT_IMR);
}
}
@@ -440,8 +440,8 @@ void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
if (new_val != dev_priv->de_irq_mask[pipe]) {
dev_priv->de_irq_mask[pipe] = new_val;
- I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
- POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
+ intel_uncore_write(&dev_priv->uncore, GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
+ intel_uncore_posting_read(&dev_priv->uncore, GEN8_DE_PIPE_IMR(pipe));
}
}
@@ -455,7 +455,7 @@ void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
u32 interrupt_mask,
u32 enabled_irq_mask)
{
- u32 sdeimr = I915_READ(SDEIMR);
+ u32 sdeimr = intel_uncore_read(&dev_priv->uncore, SDEIMR);
sdeimr &= ~interrupt_mask;
sdeimr |= (~enabled_irq_mask & interrupt_mask);
@@ -466,8 +466,8 @@ void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
return;
- I915_WRITE(SDEIMR, sdeimr);
- POSTING_READ(SDEIMR);
+ intel_uncore_write(&dev_priv->uncore, SDEIMR, sdeimr);
+ intel_uncore_posting_read(&dev_priv->uncore, SDEIMR);
}
u32 i915_pipestat_enable_mask(struct drm_i915_private *dev_priv,
@@ -533,8 +533,8 @@ void i915_enable_pipestat(struct drm_i915_private *dev_priv,
dev_priv->pipestat_irq_mask[pipe] |= status_mask;
enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
- I915_WRITE(reg, enable_mask | status_mask);
- POSTING_READ(reg);
+ intel_uncore_write(&dev_priv->uncore, reg, enable_mask | status_mask);
+ intel_uncore_posting_read(&dev_priv->uncore, reg);
}
void i915_disable_pipestat(struct drm_i915_private *dev_priv,
@@ -556,8 +556,8 @@ void i915_disable_pipestat(struct drm_i915_private *dev_priv,
dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
- I915_WRITE(reg, enable_mask | status_mask);
- POSTING_READ(reg);
+ intel_uncore_write(&dev_priv->uncore, reg, enable_mask | status_mask);
+ intel_uncore_posting_read(&dev_priv->uncore, reg);
}
static bool i915_has_asle(struct drm_i915_private *dev_priv)
@@ -715,28 +715,18 @@ u32 g4x_get_vblank_counter(struct drm_crtc *crtc)
if (!vblank->max_vblank_count)
return 0;
- return I915_READ(PIPE_FRMCOUNT_G4X(pipe));
+ return intel_uncore_read(&dev_priv->uncore, PIPE_FRMCOUNT_G4X(pipe));
}
-/*
- * On certain encoders on certain platforms, pipe
- * scanline register will not work to get the scanline,
- * since the timings are driven from the PORT or issues
- * with scanline register updates.
- * This function will use Framestamp and current
- * timestamp registers to calculate the scanline.
- */
-static u32 __intel_get_crtc_scanline_from_timestamp(struct intel_crtc *crtc)
+static u32 intel_crtc_scanlines_since_frame_timestamp(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct drm_vblank_crtc *vblank =
&crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
const struct drm_display_mode *mode = &vblank->hwmode;
- u32 vblank_start = mode->crtc_vblank_start;
- u32 vtotal = mode->crtc_vtotal;
u32 htotal = mode->crtc_htotal;
u32 clock = mode->crtc_clock;
- u32 scanline, scan_prev_time, scan_curr_time, scan_post_time;
+ u32 scan_prev_time, scan_curr_time, scan_post_time;
/*
* To avoid the race condition where we might cross into the
@@ -763,8 +753,28 @@ static u32 __intel_get_crtc_scanline_from_timestamp(struct intel_crtc *crtc)
PIPE_FRMTMSTMP(crtc->pipe));
} while (scan_post_time != scan_prev_time);
- scanline = div_u64(mul_u32_u32(scan_curr_time - scan_prev_time,
- clock), 1000 * htotal);
+ return div_u64(mul_u32_u32(scan_curr_time - scan_prev_time,
+ clock), 1000 * htotal);
+}
+
+/*
+ * On certain encoders on certain platforms, pipe
+ * scanline register will not work to get the scanline,
+ * since the timings are driven from the PORT or issues
+ * with scanline register updates.
+ * This function will use Framestamp and current
+ * timestamp registers to calculate the scanline.
+ */
+static u32 __intel_get_crtc_scanline_from_timestamp(struct intel_crtc *crtc)
+{
+ struct drm_vblank_crtc *vblank =
+ &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
+ const struct drm_display_mode *mode = &vblank->hwmode;
+ u32 vblank_start = mode->crtc_vblank_start;
+ u32 vtotal = mode->crtc_vtotal;
+ u32 scanline;
+
+ scanline = intel_crtc_scanlines_since_frame_timestamp(crtc);
scanline = min(scanline, vtotal - 1);
scanline = (scanline + vblank_start) % vtotal;
@@ -883,7 +893,20 @@ static bool i915_get_crtc_scanoutpos(struct drm_crtc *_crtc,
if (stime)
*stime = ktime_get();
- if (use_scanline_counter) {
+ if (crtc->mode_flags & I915_MODE_FLAG_VRR) {
+ int scanlines = intel_crtc_scanlines_since_frame_timestamp(crtc);
+
+ position = __intel_get_crtc_scanline(crtc);
+
+ /*
+ * Already exiting vblank? If so, shift our position
+ * so it looks like we're already apporaching the full
+ * vblank end. This should make the generated timestamp
+ * more or less match when the active portion will start.
+ */
+ if (position >= vbl_start && scanlines < position)
+ position = min(crtc->vmax_vblank_start + scanlines, vtotal - 1);
+ } else if (use_scanline_counter) {
/* No obvious pixelcount register. Only query vertical
* scanout position from Display scan line register.
*/
@@ -1004,9 +1027,9 @@ static void ivb_parity_work(struct work_struct *work)
if (drm_WARN_ON(&dev_priv->drm, !dev_priv->l3_parity.which_slice))
goto out;
- misccpctl = I915_READ(GEN7_MISCCPCTL);
- I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
- POSTING_READ(GEN7_MISCCPCTL);
+ misccpctl = intel_uncore_read(&dev_priv->uncore, GEN7_MISCCPCTL);
+ intel_uncore_write(&dev_priv->uncore, GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
+ intel_uncore_posting_read(&dev_priv->uncore, GEN7_MISCCPCTL);
while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
i915_reg_t reg;
@@ -1020,13 +1043,13 @@ static void ivb_parity_work(struct work_struct *work)
reg = GEN7_L3CDERRST1(slice);
- error_status = I915_READ(reg);
+ error_status = intel_uncore_read(&dev_priv->uncore, reg);
row = GEN7_PARITY_ERROR_ROW(error_status);
bank = GEN7_PARITY_ERROR_BANK(error_status);
subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
- I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
- POSTING_READ(reg);
+ intel_uncore_write(&dev_priv->uncore, reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
+ intel_uncore_posting_read(&dev_priv->uncore, reg);
parity_event[0] = I915_L3_PARITY_UEVENT "=1";
parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
@@ -1047,7 +1070,7 @@ static void ivb_parity_work(struct work_struct *work)
kfree(parity_event[1]);
}
- I915_WRITE(GEN7_MISCCPCTL, misccpctl);
+ intel_uncore_write(&dev_priv->uncore, GEN7_MISCCPCTL, misccpctl);
out:
drm_WARN_ON(&dev_priv->drm, dev_priv->l3_parity.which_slice);
@@ -1062,17 +1085,12 @@ static bool gen11_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
{
switch (pin) {
case HPD_PORT_TC1:
- return val & GEN11_HOTPLUG_CTL_LONG_DETECT(HPD_PORT_TC1);
case HPD_PORT_TC2:
- return val & GEN11_HOTPLUG_CTL_LONG_DETECT(HPD_PORT_TC2);
case HPD_PORT_TC3:
- return val & GEN11_HOTPLUG_CTL_LONG_DETECT(HPD_PORT_TC3);
case HPD_PORT_TC4:
- return val & GEN11_HOTPLUG_CTL_LONG_DETECT(HPD_PORT_TC4);
case HPD_PORT_TC5:
- return val & GEN11_HOTPLUG_CTL_LONG_DETECT(HPD_PORT_TC5);
case HPD_PORT_TC6:
- return val & GEN11_HOTPLUG_CTL_LONG_DETECT(HPD_PORT_TC6);
+ return val & GEN11_HOTPLUG_CTL_LONG_DETECT(pin);
default:
return false;
}
@@ -1096,13 +1114,10 @@ static bool icp_ddi_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
{
switch (pin) {
case HPD_PORT_A:
- return val & SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(HPD_PORT_A);
case HPD_PORT_B:
- return val & SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(HPD_PORT_B);
case HPD_PORT_C:
- return val & SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(HPD_PORT_C);
case HPD_PORT_D:
- return val & SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(HPD_PORT_D);
+ return val & SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(pin);
default:
return false;
}
@@ -1112,17 +1127,12 @@ static bool icp_tc_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
{
switch (pin) {
case HPD_PORT_TC1:
- return val & ICP_TC_HPD_LONG_DETECT(HPD_PORT_TC1);
case HPD_PORT_TC2:
- return val & ICP_TC_HPD_LONG_DETECT(HPD_PORT_TC2);
case HPD_PORT_TC3:
- return val & ICP_TC_HPD_LONG_DETECT(HPD_PORT_TC3);
case HPD_PORT_TC4:
- return val & ICP_TC_HPD_LONG_DETECT(HPD_PORT_TC4);
case HPD_PORT_TC5:
- return val & ICP_TC_HPD_LONG_DETECT(HPD_PORT_TC5);
case HPD_PORT_TC6:
- return val & ICP_TC_HPD_LONG_DETECT(HPD_PORT_TC6);
+ return val & ICP_TC_HPD_LONG_DETECT(pin);
default:
return false;
}
@@ -1337,7 +1347,7 @@ static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
display_pipe_crc_irq_handler(dev_priv, pipe,
- I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
+ intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_1_IVB(pipe)),
0, 0, 0, 0);
}
@@ -1345,11 +1355,11 @@ static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
display_pipe_crc_irq_handler(dev_priv, pipe,
- I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
- I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
- I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
- I915_READ(PIPE_CRC_RES_4_IVB(pipe)),
- I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
+ intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_1_IVB(pipe)),
+ intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_2_IVB(pipe)),
+ intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_3_IVB(pipe)),
+ intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_4_IVB(pipe)),
+ intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_5_IVB(pipe)));
}
static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
@@ -1358,19 +1368,19 @@ static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
u32 res1, res2;
if (INTEL_GEN(dev_priv) >= 3)
- res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
+ res1 = intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_RES1_I915(pipe));
else
res1 = 0;
if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
- res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
+ res2 = intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_RES2_G4X(pipe));
else
res2 = 0;
display_pipe_crc_irq_handler(dev_priv, pipe,
- I915_READ(PIPE_CRC_RES_RED(pipe)),
- I915_READ(PIPE_CRC_RES_GREEN(pipe)),
- I915_READ(PIPE_CRC_RES_BLUE(pipe)),
+ intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_RED(pipe)),
+ intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_GREEN(pipe)),
+ intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_BLUE(pipe)),
res1, res2);
}
@@ -1379,7 +1389,7 @@ static void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv)
enum pipe pipe;
for_each_pipe(dev_priv, pipe) {
- I915_WRITE(PIPESTAT(pipe),
+ intel_uncore_write(&dev_priv->uncore, PIPESTAT(pipe),
PIPESTAT_INT_STATUS_MASK |
PIPE_FIFO_UNDERRUN_STATUS);
@@ -1433,7 +1443,7 @@ static void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv,
continue;
reg = PIPESTAT(pipe);
- pipe_stats[pipe] = I915_READ(reg) & status_mask;
+ pipe_stats[pipe] = intel_uncore_read(&dev_priv->uncore, reg) & status_mask;
enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
/*
@@ -1446,8 +1456,8 @@ static void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv,
* an interrupt is still pending.
*/
if (pipe_stats[pipe]) {
- I915_WRITE(reg, pipe_stats[pipe]);
- I915_WRITE(reg, enable_mask);
+ intel_uncore_write(&dev_priv->uncore, reg, pipe_stats[pipe]);
+ intel_uncore_write(&dev_priv->uncore, reg, enable_mask);
}
}
spin_unlock(&dev_priv->irq_lock);
@@ -1530,6 +1540,9 @@ static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv,
if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
intel_handle_vblank(dev_priv, pipe);
+ if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV)
+ flip_done_handler(dev_priv, pipe);
+
if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
i9xx_pipe_crc_irq_handler(dev_priv, pipe);
@@ -1563,18 +1576,18 @@ static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv)
* bits can itself generate a new hotplug interrupt :(
*/
for (i = 0; i < 10; i++) {
- u32 tmp = I915_READ(PORT_HOTPLUG_STAT) & hotplug_status_mask;
+ u32 tmp = intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_STAT) & hotplug_status_mask;
if (tmp == 0)
return hotplug_status;
hotplug_status |= tmp;
- I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
+ intel_uncore_write(&dev_priv->uncore, PORT_HOTPLUG_STAT, hotplug_status);
}
drm_WARN_ONCE(&dev_priv->drm, 1,
"PORT_HOTPLUG_STAT did not clear (0x%08x)\n",
- I915_READ(PORT_HOTPLUG_STAT));
+ intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_STAT));
return hotplug_status;
}
@@ -1623,9 +1636,9 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
u32 hotplug_status = 0;
u32 ier = 0;
- gt_iir = I915_READ(GTIIR);
- pm_iir = I915_READ(GEN6_PMIIR);
- iir = I915_READ(VLV_IIR);
+ gt_iir = intel_uncore_read(&dev_priv->uncore, GTIIR);
+ pm_iir = intel_uncore_read(&dev_priv->uncore, GEN6_PMIIR);
+ iir = intel_uncore_read(&dev_priv->uncore, VLV_IIR);
if (gt_iir == 0 && pm_iir == 0 && iir == 0)
break;
@@ -1645,14 +1658,14 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
* don't end up clearing all the VLV_IIR, GT_IIR, GEN6_PMIIR
* bits this time around.
*/
- I915_WRITE(VLV_MASTER_IER, 0);
- ier = I915_READ(VLV_IER);
- I915_WRITE(VLV_IER, 0);
+ intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, 0);
+ ier = intel_uncore_read(&dev_priv->uncore, VLV_IER);
+ intel_uncore_write(&dev_priv->uncore, VLV_IER, 0);
if (gt_iir)
- I915_WRITE(GTIIR, gt_iir);
+ intel_uncore_write(&dev_priv->uncore, GTIIR, gt_iir);
if (pm_iir)
- I915_WRITE(GEN6_PMIIR, pm_iir);
+ intel_uncore_write(&dev_priv->uncore, GEN6_PMIIR, pm_iir);
if (iir & I915_DISPLAY_PORT_INTERRUPT)
hotplug_status = i9xx_hpd_irq_ack(dev_priv);
@@ -1670,10 +1683,10 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
* from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
*/
if (iir)
- I915_WRITE(VLV_IIR, iir);
+ intel_uncore_write(&dev_priv->uncore, VLV_IIR, iir);
- I915_WRITE(VLV_IER, ier);
- I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
+ intel_uncore_write(&dev_priv->uncore, VLV_IER, ier);
+ intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
if (gt_iir)
gen6_gt_irq_handler(&dev_priv->gt, gt_iir);
@@ -1710,8 +1723,8 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
u32 hotplug_status = 0;
u32 ier = 0;
- master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
- iir = I915_READ(VLV_IIR);
+ master_ctl = intel_uncore_read(&dev_priv->uncore, GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
+ iir = intel_uncore_read(&dev_priv->uncore, VLV_IIR);
if (master_ctl == 0 && iir == 0)
break;
@@ -1731,9 +1744,9 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
* don't end up clearing all the VLV_IIR and GEN8_MASTER_IRQ_CONTROL
* bits this time around.
*/
- I915_WRITE(GEN8_MASTER_IRQ, 0);
- ier = I915_READ(VLV_IER);
- I915_WRITE(VLV_IER, 0);
+ intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, 0);
+ ier = intel_uncore_read(&dev_priv->uncore, VLV_IER);
+ intel_uncore_write(&dev_priv->uncore, VLV_IER, 0);
gen8_gt_irq_handler(&dev_priv->gt, master_ctl);
@@ -1754,10 +1767,10 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
* from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
*/
if (iir)
- I915_WRITE(VLV_IIR, iir);
+ intel_uncore_write(&dev_priv->uncore, VLV_IIR, iir);
- I915_WRITE(VLV_IER, ier);
- I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
+ intel_uncore_write(&dev_priv->uncore, VLV_IER, ier);
+ intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
if (hotplug_status)
i9xx_hpd_irq_handler(dev_priv, hotplug_status);
@@ -1783,7 +1796,7 @@ static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv,
* zero. Not acking leads to "The master control interrupt lied (SDE)!"
* errors.
*/
- dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
+ dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG);
if (!hotplug_trigger) {
u32 mask = PORTA_HOTPLUG_STATUS_MASK |
PORTD_HOTPLUG_STATUS_MASK |
@@ -1792,7 +1805,7 @@ static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv,
dig_hotplug_reg &= ~mask;
}
- I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
+ intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG, dig_hotplug_reg);
if (!hotplug_trigger)
return;
@@ -1837,7 +1850,7 @@ static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
for_each_pipe(dev_priv, pipe)
drm_dbg(&dev_priv->drm, " pipe %c FDI IIR: 0x%08x\n",
pipe_name(pipe),
- I915_READ(FDI_RX_IIR(pipe)));
+ intel_uncore_read(&dev_priv->uncore, FDI_RX_IIR(pipe)));
}
if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
@@ -1856,7 +1869,7 @@ static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
static void ivb_err_int_handler(struct drm_i915_private *dev_priv)
{
- u32 err_int = I915_READ(GEN7_ERR_INT);
+ u32 err_int = intel_uncore_read(&dev_priv->uncore, GEN7_ERR_INT);
enum pipe pipe;
if (err_int & ERR_INT_POISON)
@@ -1874,12 +1887,12 @@ static void ivb_err_int_handler(struct drm_i915_private *dev_priv)
}
}
- I915_WRITE(GEN7_ERR_INT, err_int);
+ intel_uncore_write(&dev_priv->uncore, GEN7_ERR_INT, err_int);
}
static void cpt_serr_int_handler(struct drm_i915_private *dev_priv)
{
- u32 serr_int = I915_READ(SERR_INT);
+ u32 serr_int = intel_uncore_read(&dev_priv->uncore, SERR_INT);
enum pipe pipe;
if (serr_int & SERR_INT_POISON)
@@ -1889,7 +1902,7 @@ static void cpt_serr_int_handler(struct drm_i915_private *dev_priv)
if (serr_int & SERR_INT_TRANS_FIFO_UNDERRUN(pipe))
intel_pch_fifo_underrun_irq_handler(dev_priv, pipe);
- I915_WRITE(SERR_INT, serr_int);
+ intel_uncore_write(&dev_priv->uncore, SERR_INT, serr_int);
}
static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
@@ -1922,7 +1935,7 @@ static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
for_each_pipe(dev_priv, pipe)
drm_dbg(&dev_priv->drm, " pipe %c FDI IIR: 0x%08x\n",
pipe_name(pipe),
- I915_READ(FDI_RX_IIR(pipe)));
+ intel_uncore_read(&dev_priv->uncore, FDI_RX_IIR(pipe)));
}
if (pch_iir & SDE_ERROR_CPT)
@@ -1938,8 +1951,8 @@ static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
if (ddi_hotplug_trigger) {
u32 dig_hotplug_reg;
- dig_hotplug_reg = I915_READ(SHOTPLUG_CTL_DDI);
- I915_WRITE(SHOTPLUG_CTL_DDI, dig_hotplug_reg);
+ dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, SHOTPLUG_CTL_DDI);
+ intel_uncore_write(&dev_priv->uncore, SHOTPLUG_CTL_DDI, dig_hotplug_reg);
intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
ddi_hotplug_trigger, dig_hotplug_reg,
@@ -1950,8 +1963,8 @@ static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
if (tc_hotplug_trigger) {
u32 dig_hotplug_reg;
- dig_hotplug_reg = I915_READ(SHOTPLUG_CTL_TC);
- I915_WRITE(SHOTPLUG_CTL_TC, dig_hotplug_reg);
+ dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, SHOTPLUG_CTL_TC);
+ intel_uncore_write(&dev_priv->uncore, SHOTPLUG_CTL_TC, dig_hotplug_reg);
intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
tc_hotplug_trigger, dig_hotplug_reg,
@@ -1976,8 +1989,8 @@ static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
if (hotplug_trigger) {
u32 dig_hotplug_reg;
- dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
- I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
+ dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG);
+ intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG, dig_hotplug_reg);
intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
hotplug_trigger, dig_hotplug_reg,
@@ -1988,8 +2001,8 @@ static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
if (hotplug2_trigger) {
u32 dig_hotplug_reg;
- dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG2);
- I915_WRITE(PCH_PORT_HOTPLUG2, dig_hotplug_reg);
+ dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG2);
+ intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG2, dig_hotplug_reg);
intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
hotplug2_trigger, dig_hotplug_reg,
@@ -2009,8 +2022,8 @@ static void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv,
{
u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
- dig_hotplug_reg = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
- I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg);
+ dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, DIGITAL_PORT_HOTPLUG_CNTRL);
+ intel_uncore_write(&dev_priv->uncore, DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg);
intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
hotplug_trigger, dig_hotplug_reg,
@@ -2042,6 +2055,9 @@ static void ilk_display_irq_handler(struct drm_i915_private *dev_priv,
if (de_iir & DE_PIPE_VBLANK(pipe))
intel_handle_vblank(dev_priv, pipe);
+ if (de_iir & DE_PLANE_FLIP_DONE(pipe))
+ flip_done_handler(dev_priv, pipe);
+
if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
@@ -2051,7 +2067,7 @@ static void ilk_display_irq_handler(struct drm_i915_private *dev_priv,
/* check event from PCH */
if (de_iir & DE_PCH_EVENT) {
- u32 pch_iir = I915_READ(SDEIIR);
+ u32 pch_iir = intel_uncore_read(&dev_priv->uncore, SDEIIR);
if (HAS_PCH_CPT(dev_priv))
cpt_irq_handler(dev_priv, pch_iir);
@@ -2059,7 +2075,7 @@ static void ilk_display_irq_handler(struct drm_i915_private *dev_priv,
ibx_irq_handler(dev_priv, pch_iir);
/* should clear PCH hotplug event before clear CPU irq */
- I915_WRITE(SDEIIR, pch_iir);
+ intel_uncore_write(&dev_priv->uncore, SDEIIR, pch_iir);
}
if (IS_GEN(dev_priv, 5) && de_iir & DE_PCU_EVENT)
@@ -2079,10 +2095,10 @@ static void ivb_display_irq_handler(struct drm_i915_private *dev_priv,
ivb_err_int_handler(dev_priv);
if (de_iir & DE_EDP_PSR_INT_HSW) {
- u32 psr_iir = I915_READ(EDP_PSR_IIR);
+ u32 psr_iir = intel_uncore_read(&dev_priv->uncore, EDP_PSR_IIR);
intel_psr_irq_handler(dev_priv, psr_iir);
- I915_WRITE(EDP_PSR_IIR, psr_iir);
+ intel_uncore_write(&dev_priv->uncore, EDP_PSR_IIR, psr_iir);
}
if (de_iir & DE_AUX_CHANNEL_A_IVB)
@@ -2092,18 +2108,21 @@ static void ivb_display_irq_handler(struct drm_i915_private *dev_priv,
intel_opregion_asle_intr(dev_priv);
for_each_pipe(dev_priv, pipe) {
- if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)))
+ if (de_iir & DE_PIPE_VBLANK_IVB(pipe))
intel_handle_vblank(dev_priv, pipe);
+
+ if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe))
+ flip_done_handler(dev_priv, pipe);
}
/* check event from PCH */
if (!HAS_PCH_NOP(dev_priv) && (de_iir & DE_PCH_EVENT_IVB)) {
- u32 pch_iir = I915_READ(SDEIIR);
+ u32 pch_iir = intel_uncore_read(&dev_priv->uncore, SDEIIR);
cpt_irq_handler(dev_priv, pch_iir);
/* clear PCH hotplug event before clear CPU irq */
- I915_WRITE(SDEIIR, pch_iir);
+ intel_uncore_write(&dev_priv->uncore, SDEIIR, pch_iir);
}
}
@@ -2190,8 +2209,8 @@ static void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv,
{
u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
- dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
- I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
+ dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG);
+ intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG, dig_hotplug_reg);
intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
hotplug_trigger, dig_hotplug_reg,
@@ -2210,8 +2229,8 @@ static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
if (trigger_tc) {
u32 dig_hotplug_reg;
- dig_hotplug_reg = I915_READ(GEN11_TC_HOTPLUG_CTL);
- I915_WRITE(GEN11_TC_HOTPLUG_CTL, dig_hotplug_reg);
+ dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, GEN11_TC_HOTPLUG_CTL);
+ intel_uncore_write(&dev_priv->uncore, GEN11_TC_HOTPLUG_CTL, dig_hotplug_reg);
intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
trigger_tc, dig_hotplug_reg,
@@ -2222,8 +2241,8 @@ static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
if (trigger_tbt) {
u32 dig_hotplug_reg;
- dig_hotplug_reg = I915_READ(GEN11_TBT_HOTPLUG_CTL);
- I915_WRITE(GEN11_TBT_HOTPLUG_CTL, dig_hotplug_reg);
+ dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, GEN11_TBT_HOTPLUG_CTL);
+ intel_uncore_write(&dev_priv->uncore, GEN11_TBT_HOTPLUG_CTL, dig_hotplug_reg);
intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
trigger_tbt, dig_hotplug_reg,
@@ -2300,8 +2319,8 @@ gen8_de_misc_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
else
iir_reg = EDP_PSR_IIR;
- psr_iir = I915_READ(iir_reg);
- I915_WRITE(iir_reg, psr_iir);
+ psr_iir = intel_uncore_read(&dev_priv->uncore, iir_reg);
+ intel_uncore_write(&dev_priv->uncore, iir_reg, psr_iir);
if (psr_iir)
found = true;
@@ -2325,7 +2344,7 @@ static void gen11_dsi_te_interrupt_handler(struct drm_i915_private *dev_priv,
* Incase of dual link, TE comes from DSI_1
* this is to check if dual link is enabled
*/
- val = I915_READ(TRANS_DDI_FUNC_CTL2(TRANSCODER_DSI_0));
+ val = intel_uncore_read(&dev_priv->uncore, TRANS_DDI_FUNC_CTL2(TRANSCODER_DSI_0));
val &= PORT_SYNC_MODE_ENABLE;
/*
@@ -2337,7 +2356,7 @@ static void gen11_dsi_te_interrupt_handler(struct drm_i915_private *dev_priv,
dsi_trans = (port == PORT_A) ? TRANSCODER_DSI_0 : TRANSCODER_DSI_1;
/* Check if DSI configured in command mode */
- val = I915_READ(DSI_TRANS_FUNC_CONF(dsi_trans));
+ val = intel_uncore_read(&dev_priv->uncore, DSI_TRANS_FUNC_CONF(dsi_trans));
val = val & OP_MODE_MASK;
if (val != CMD_MODE_NO_GATE && val != CMD_MODE_TE_GATE) {
@@ -2346,7 +2365,7 @@ static void gen11_dsi_te_interrupt_handler(struct drm_i915_private *dev_priv,
}
/* Get PIPE for handling VBLANK event */
- val = I915_READ(TRANS_DDI_FUNC_CTL(dsi_trans));
+ val = intel_uncore_read(&dev_priv->uncore, TRANS_DDI_FUNC_CTL(dsi_trans));
switch (val & TRANS_DDI_EDP_INPUT_MASK) {
case TRANS_DDI_EDP_INPUT_A_ON:
pipe = PIPE_A;
@@ -2366,8 +2385,16 @@ static void gen11_dsi_te_interrupt_handler(struct drm_i915_private *dev_priv,
/* clear TE in dsi IIR */
port = (te_trigger & DSI1_TE) ? PORT_B : PORT_A;
- tmp = I915_READ(DSI_INTR_IDENT_REG(port));
- I915_WRITE(DSI_INTR_IDENT_REG(port), tmp);
+ tmp = intel_uncore_read(&dev_priv->uncore, DSI_INTR_IDENT_REG(port));
+ intel_uncore_write(&dev_priv->uncore, DSI_INTR_IDENT_REG(port), tmp);
+}
+
+static u32 gen8_de_pipe_flip_done_mask(struct drm_i915_private *i915)
+{
+ if (INTEL_GEN(i915) >= 9)
+ return GEN9_PIPE_PLANE1_FLIP_DONE;
+ else
+ return GEN8_PIPE_PRIMARY_FLIP_DONE;
}
static irqreturn_t
@@ -2378,9 +2405,9 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
enum pipe pipe;
if (master_ctl & GEN8_DE_MISC_IRQ) {
- iir = I915_READ(GEN8_DE_MISC_IIR);
+ iir = intel_uncore_read(&dev_priv->uncore, GEN8_DE_MISC_IIR);
if (iir) {
- I915_WRITE(GEN8_DE_MISC_IIR, iir);
+ intel_uncore_write(&dev_priv->uncore, GEN8_DE_MISC_IIR, iir);
ret = IRQ_HANDLED;
gen8_de_misc_irq_handler(dev_priv, iir);
} else {
@@ -2390,9 +2417,9 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
}
if (INTEL_GEN(dev_priv) >= 11 && (master_ctl & GEN11_DE_HPD_IRQ)) {
- iir = I915_READ(GEN11_DE_HPD_IIR);
+ iir = intel_uncore_read(&dev_priv->uncore, GEN11_DE_HPD_IIR);
if (iir) {
- I915_WRITE(GEN11_DE_HPD_IIR, iir);
+ intel_uncore_write(&dev_priv->uncore, GEN11_DE_HPD_IIR, iir);
ret = IRQ_HANDLED;
gen11_hpd_irq_handler(dev_priv, iir);
} else {
@@ -2402,11 +2429,11 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
}
if (master_ctl & GEN8_DE_PORT_IRQ) {
- iir = I915_READ(GEN8_DE_PORT_IIR);
+ iir = intel_uncore_read(&dev_priv->uncore, GEN8_DE_PORT_IIR);
if (iir) {
bool found = false;
- I915_WRITE(GEN8_DE_PORT_IIR, iir);
+ intel_uncore_write(&dev_priv->uncore, GEN8_DE_PORT_IIR, iir);
ret = IRQ_HANDLED;
if (iir & gen8_de_port_aux_mask(dev_priv)) {
@@ -2459,7 +2486,7 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
continue;
- iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
+ iir = intel_uncore_read(&dev_priv->uncore, GEN8_DE_PIPE_IIR(pipe));
if (!iir) {
drm_err(&dev_priv->drm,
"The master control interrupt lied (DE PIPE)!\n");
@@ -2467,12 +2494,12 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
}
ret = IRQ_HANDLED;
- I915_WRITE(GEN8_DE_PIPE_IIR(pipe), iir);
+ intel_uncore_write(&dev_priv->uncore, GEN8_DE_PIPE_IIR(pipe), iir);
if (iir & GEN8_PIPE_VBLANK)
intel_handle_vblank(dev_priv, pipe);
- if (iir & GEN9_PIPE_PLANE1_FLIP_DONE)
+ if (iir & gen8_de_pipe_flip_done_mask(dev_priv))
flip_done_handler(dev_priv, pipe);
if (iir & GEN8_PIPE_CDCLK_CRC_DONE)
@@ -2496,9 +2523,9 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
* scheme also closed the SDE interrupt handling race we've seen
* on older pch-split platforms. But this needs testing.
*/
- iir = I915_READ(SDEIIR);
+ iir = intel_uncore_read(&dev_priv->uncore, SDEIIR);
if (iir) {
- I915_WRITE(SDEIIR, iir);
+ intel_uncore_write(&dev_priv->uncore, SDEIIR, iir);
ret = IRQ_HANDLED;
if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
@@ -2741,7 +2768,7 @@ int i915gm_enable_vblank(struct drm_crtc *crtc)
* only when vblank interrupts are actually enabled.
*/
if (dev_priv->vblank_enabled++ == 0)
- I915_WRITE(SCPD0, _MASKED_BIT_ENABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
+ intel_uncore_write(&dev_priv->uncore, SCPD0, _MASKED_BIT_ENABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
return i8xx_enable_vblank(crtc);
}
@@ -2798,16 +2825,16 @@ static bool gen11_dsi_configure_te(struct intel_crtc *intel_crtc,
else
port = PORT_A;
- tmp = I915_READ(DSI_INTR_MASK_REG(port));
+ tmp = intel_uncore_read(&dev_priv->uncore, DSI_INTR_MASK_REG(port));
if (enable)
tmp &= ~DSI_TE_EVENT;
else
tmp |= DSI_TE_EVENT;
- I915_WRITE(DSI_INTR_MASK_REG(port), tmp);
+ intel_uncore_write(&dev_priv->uncore, DSI_INTR_MASK_REG(port), tmp);
- tmp = I915_READ(DSI_INTR_IDENT_REG(port));
- I915_WRITE(DSI_INTR_IDENT_REG(port), tmp);
+ tmp = intel_uncore_read(&dev_priv->uncore, DSI_INTR_IDENT_REG(port));
+ intel_uncore_write(&dev_priv->uncore, DSI_INTR_IDENT_REG(port), tmp);
return true;
}
@@ -2835,19 +2862,6 @@ int bdw_enable_vblank(struct drm_crtc *crtc)
return 0;
}
-void skl_enable_flip_done(struct intel_crtc *crtc)
-{
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
- enum pipe pipe = crtc->pipe;
- unsigned long irqflags;
-
- spin_lock_irqsave(&i915->irq_lock, irqflags);
-
- bdw_enable_pipe_irq(i915, pipe, GEN9_PIPE_PLANE1_FLIP_DONE);
-
- spin_unlock_irqrestore(&i915->irq_lock, irqflags);
-}
-
/* Called from drm generic code, passed 'crtc' which
* we use as a pipe index
*/
@@ -2869,7 +2883,7 @@ void i915gm_disable_vblank(struct drm_crtc *crtc)
i8xx_disable_vblank(crtc);
if (--dev_priv->vblank_enabled == 0)
- I915_WRITE(SCPD0, _MASKED_BIT_DISABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
+ intel_uncore_write(&dev_priv->uncore, SCPD0, _MASKED_BIT_DISABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
}
void i965_disable_vblank(struct drm_crtc *crtc)
@@ -2912,19 +2926,6 @@ void bdw_disable_vblank(struct drm_crtc *crtc)
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
-void skl_disable_flip_done(struct intel_crtc *crtc)
-{
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
- enum pipe pipe = crtc->pipe;
- unsigned long irqflags;
-
- spin_lock_irqsave(&i915->irq_lock, irqflags);
-
- bdw_disable_pipe_irq(i915, pipe, GEN9_PIPE_PLANE1_FLIP_DONE);
-
- spin_unlock_irqrestore(&i915->irq_lock, irqflags);
-}
-
static void ibx_irq_reset(struct drm_i915_private *dev_priv)
{
struct intel_uncore *uncore = &dev_priv->uncore;
@@ -2935,7 +2936,7 @@ static void ibx_irq_reset(struct drm_i915_private *dev_priv)
GEN3_IRQ_RESET(uncore, SDE);
if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
- I915_WRITE(SERR_INT, 0xffffffff);
+ intel_uncore_write(&dev_priv->uncore, SERR_INT, 0xffffffff);
}
static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
@@ -2948,7 +2949,7 @@ static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK);
i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0);
- intel_uncore_write(uncore, PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
+ intel_uncore_write(uncore, PORT_HOTPLUG_STAT, intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_STAT));
i9xx_pipestat_irq_reset(dev_priv);
@@ -3011,8 +3012,8 @@ static void ilk_irq_reset(struct drm_i915_private *dev_priv)
static void valleyview_irq_reset(struct drm_i915_private *dev_priv)
{
- I915_WRITE(VLV_MASTER_IER, 0);
- POSTING_READ(VLV_MASTER_IER);
+ intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, 0);
+ intel_uncore_posting_read(&dev_priv->uncore, VLV_MASTER_IER);
gen5_gt_irq_reset(&dev_priv->gt);
@@ -3117,13 +3118,10 @@ void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
u8 pipe_mask)
{
struct intel_uncore *uncore = &dev_priv->uncore;
-
- u32 extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
+ u32 extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN |
+ gen8_de_pipe_flip_done_mask(dev_priv);
enum pipe pipe;
- if (INTEL_GEN(dev_priv) >= 9)
- extra_ier |= GEN9_PIPE_PLANE1_FLIP_DONE;
-
spin_lock_irq(&dev_priv->irq_lock);
if (!intel_irqs_enabled(dev_priv)) {
@@ -3165,8 +3163,8 @@ static void cherryview_irq_reset(struct drm_i915_private *dev_priv)
{
struct intel_uncore *uncore = &dev_priv->uncore;
- I915_WRITE(GEN8_MASTER_IRQ, 0);
- POSTING_READ(GEN8_MASTER_IRQ);
+ intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, 0);
+ intel_uncore_posting_read(&dev_priv->uncore, GEN8_MASTER_IRQ);
gen8_gt_irq_reset(&dev_priv->gt);
@@ -3212,7 +3210,7 @@ static void ibx_hpd_detection_setup(struct drm_i915_private *dev_priv)
* duration to 2ms (which is the minimum in the Display Port spec).
* The pulse duration bits are reserved on LPT+.
*/
- hotplug = I915_READ(PCH_PORT_HOTPLUG);
+ hotplug = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG);
hotplug &= ~(PORTA_HOTPLUG_ENABLE |
PORTB_HOTPLUG_ENABLE |
PORTC_HOTPLUG_ENABLE |
@@ -3221,7 +3219,7 @@ static void ibx_hpd_detection_setup(struct drm_i915_private *dev_priv)
PORTC_PULSE_DURATION_MASK |
PORTD_PULSE_DURATION_MASK);
hotplug |= intel_hpd_hotplug_enables(dev_priv, ibx_hotplug_enables);
- I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
+ intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG, hotplug);
}
static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv)
@@ -3270,20 +3268,20 @@ static void icp_ddi_hpd_detection_setup(struct drm_i915_private *dev_priv)
{
u32 hotplug;
- hotplug = I915_READ(SHOTPLUG_CTL_DDI);
+ hotplug = intel_uncore_read(&dev_priv->uncore, SHOTPLUG_CTL_DDI);
hotplug &= ~(SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_A) |
SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_B) |
SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_C) |
SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_D));
hotplug |= intel_hpd_hotplug_enables(dev_priv, icp_ddi_hotplug_enables);
- I915_WRITE(SHOTPLUG_CTL_DDI, hotplug);
+ intel_uncore_write(&dev_priv->uncore, SHOTPLUG_CTL_DDI, hotplug);
}
static void icp_tc_hpd_detection_setup(struct drm_i915_private *dev_priv)
{
u32 hotplug;
- hotplug = I915_READ(SHOTPLUG_CTL_TC);
+ hotplug = intel_uncore_read(&dev_priv->uncore, SHOTPLUG_CTL_TC);
hotplug &= ~(ICP_TC_HPD_ENABLE(HPD_PORT_TC1) |
ICP_TC_HPD_ENABLE(HPD_PORT_TC2) |
ICP_TC_HPD_ENABLE(HPD_PORT_TC3) |
@@ -3291,7 +3289,7 @@ static void icp_tc_hpd_detection_setup(struct drm_i915_private *dev_priv)
ICP_TC_HPD_ENABLE(HPD_PORT_TC5) |
ICP_TC_HPD_ENABLE(HPD_PORT_TC6));
hotplug |= intel_hpd_hotplug_enables(dev_priv, icp_tc_hotplug_enables);
- I915_WRITE(SHOTPLUG_CTL_TC, hotplug);
+ intel_uncore_write(&dev_priv->uncore, SHOTPLUG_CTL_TC, hotplug);
}
static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv)
@@ -3302,7 +3300,7 @@ static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv)
hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.pch_hpd);
if (INTEL_PCH_TYPE(dev_priv) <= PCH_TGP)
- I915_WRITE(SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ);
+ intel_uncore_write(&dev_priv->uncore, SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ);
ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
@@ -3330,12 +3328,12 @@ static void dg1_hpd_irq_setup(struct drm_i915_private *dev_priv)
{
u32 val;
- val = I915_READ(SOUTH_CHICKEN1);
+ val = intel_uncore_read(&dev_priv->uncore, SOUTH_CHICKEN1);
val |= (INVERT_DDIA_HPD |
INVERT_DDIB_HPD |
INVERT_DDIC_HPD |
INVERT_DDID_HPD);
- I915_WRITE(SOUTH_CHICKEN1, val);
+ intel_uncore_write(&dev_priv->uncore, SOUTH_CHICKEN1, val);
icp_hpd_irq_setup(dev_priv);
}
@@ -3344,7 +3342,7 @@ static void gen11_tc_hpd_detection_setup(struct drm_i915_private *dev_priv)
{
u32 hotplug;
- hotplug = I915_READ(GEN11_TC_HOTPLUG_CTL);
+ hotplug = intel_uncore_read(&dev_priv->uncore, GEN11_TC_HOTPLUG_CTL);
hotplug &= ~(GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC1) |
GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC2) |
GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC3) |
@@ -3352,14 +3350,14 @@ static void gen11_tc_hpd_detection_setup(struct drm_i915_private *dev_priv)
GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC5) |
GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC6));
hotplug |= intel_hpd_hotplug_enables(dev_priv, gen11_hotplug_enables);
- I915_WRITE(GEN11_TC_HOTPLUG_CTL, hotplug);
+ intel_uncore_write(&dev_priv->uncore, GEN11_TC_HOTPLUG_CTL, hotplug);
}
static void gen11_tbt_hpd_detection_setup(struct drm_i915_private *dev_priv)
{
u32 hotplug;
- hotplug = I915_READ(GEN11_TBT_HOTPLUG_CTL);
+ hotplug = intel_uncore_read(&dev_priv->uncore, GEN11_TBT_HOTPLUG_CTL);
hotplug &= ~(GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC1) |
GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC2) |
GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC3) |
@@ -3367,7 +3365,7 @@ static void gen11_tbt_hpd_detection_setup(struct drm_i915_private *dev_priv)
GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC5) |
GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC6));
hotplug |= intel_hpd_hotplug_enables(dev_priv, gen11_hotplug_enables);
- I915_WRITE(GEN11_TBT_HOTPLUG_CTL, hotplug);
+ intel_uncore_write(&dev_priv->uncore, GEN11_TBT_HOTPLUG_CTL, hotplug);
}
static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv)
@@ -3378,11 +3376,11 @@ static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv)
enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.hpd);
hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.hpd);
- val = I915_READ(GEN11_DE_HPD_IMR);
+ val = intel_uncore_read(&dev_priv->uncore, GEN11_DE_HPD_IMR);
val &= ~hotplug_irqs;
val |= ~enabled_irqs & hotplug_irqs;
- I915_WRITE(GEN11_DE_HPD_IMR, val);
- POSTING_READ(GEN11_DE_HPD_IMR);
+ intel_uncore_write(&dev_priv->uncore, GEN11_DE_HPD_IMR, val);
+ intel_uncore_posting_read(&dev_priv->uncore, GEN11_DE_HPD_IMR);
gen11_tc_hpd_detection_setup(dev_priv);
gen11_tbt_hpd_detection_setup(dev_priv);
@@ -3425,25 +3423,25 @@ static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv)
/* Display WA #1179 WaHardHangonHotPlug: cnp */
if (HAS_PCH_CNP(dev_priv)) {
- val = I915_READ(SOUTH_CHICKEN1);
+ val = intel_uncore_read(&dev_priv->uncore, SOUTH_CHICKEN1);
val &= ~CHASSIS_CLK_REQ_DURATION_MASK;
val |= CHASSIS_CLK_REQ_DURATION(0xf);
- I915_WRITE(SOUTH_CHICKEN1, val);
+ intel_uncore_write(&dev_priv->uncore, SOUTH_CHICKEN1, val);
}
/* Enable digital hotplug on the PCH */
- hotplug = I915_READ(PCH_PORT_HOTPLUG);
+ hotplug = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG);
hotplug &= ~(PORTA_HOTPLUG_ENABLE |
PORTB_HOTPLUG_ENABLE |
PORTC_HOTPLUG_ENABLE |
PORTD_HOTPLUG_ENABLE);
hotplug |= intel_hpd_hotplug_enables(dev_priv, spt_hotplug_enables);
- I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
+ intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG, hotplug);
- hotplug = I915_READ(PCH_PORT_HOTPLUG2);
+ hotplug = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG2);
hotplug &= ~PORTE_HOTPLUG_ENABLE;
hotplug |= intel_hpd_hotplug_enables(dev_priv, spt_hotplug2_enables);
- I915_WRITE(PCH_PORT_HOTPLUG2, hotplug);
+ intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG2, hotplug);
}
static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv)
@@ -3451,7 +3449,7 @@ static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv)
u32 hotplug_irqs, enabled_irqs;
if (INTEL_PCH_TYPE(dev_priv) >= PCH_CNP)
- I915_WRITE(SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ);
+ intel_uncore_write(&dev_priv->uncore, SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ);
enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.pch_hpd);
hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.pch_hpd);
@@ -3482,11 +3480,11 @@ static void ilk_hpd_detection_setup(struct drm_i915_private *dev_priv)
* duration to 2ms (which is the minimum in the Display Port spec)
* The pulse duration bits are reserved on HSW+.
*/
- hotplug = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
+ hotplug = intel_uncore_read(&dev_priv->uncore, DIGITAL_PORT_HOTPLUG_CNTRL);
hotplug &= ~(DIGITAL_PORTA_HOTPLUG_ENABLE |
DIGITAL_PORTA_PULSE_DURATION_MASK);
hotplug |= intel_hpd_hotplug_enables(dev_priv, ilk_hotplug_enables);
- I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug);
+ intel_uncore_write(&dev_priv->uncore, DIGITAL_PORT_HOTPLUG_CNTRL, hotplug);
}
static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv)
@@ -3536,7 +3534,7 @@ static void bxt_hpd_detection_setup(struct drm_i915_private *dev_priv)
{
u32 hotplug;
- hotplug = I915_READ(PCH_PORT_HOTPLUG);
+ hotplug = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG);
hotplug &= ~(PORTA_HOTPLUG_ENABLE |
PORTB_HOTPLUG_ENABLE |
PORTC_HOTPLUG_ENABLE |
@@ -3544,7 +3542,7 @@ static void bxt_hpd_detection_setup(struct drm_i915_private *dev_priv)
BXT_DDIB_HPD_INVERT |
BXT_DDIC_HPD_INVERT);
hotplug |= intel_hpd_hotplug_enables(dev_priv, bxt_hotplug_enables);
- I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
+ intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG, hotplug);
}
static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv)
@@ -3598,6 +3596,9 @@ static void ilk_irq_postinstall(struct drm_i915_private *dev_priv)
DE_PCH_EVENT_IVB | DE_AUX_CHANNEL_A_IVB);
extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB |
+ DE_PLANE_FLIP_DONE_IVB(PLANE_C) |
+ DE_PLANE_FLIP_DONE_IVB(PLANE_B) |
+ DE_PLANE_FLIP_DONE_IVB(PLANE_A) |
DE_DP_A_HOTPLUG_IVB);
} else {
display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
@@ -3605,6 +3606,8 @@ static void ilk_irq_postinstall(struct drm_i915_private *dev_priv)
DE_PIPEA_CRC_DONE | DE_POISON);
extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK |
DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
+ DE_PLANE_FLIP_DONE(PLANE_A) |
+ DE_PLANE_FLIP_DONE(PLANE_B) |
DE_DP_A_HOTPLUG);
}
@@ -3664,8 +3667,8 @@ static void valleyview_irq_postinstall(struct drm_i915_private *dev_priv)
vlv_display_irq_postinstall(dev_priv);
spin_unlock_irq(&dev_priv->irq_lock);
- I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
- POSTING_READ(VLV_MASTER_IER);
+ intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
+ intel_uncore_posting_read(&dev_priv->uncore, VLV_MASTER_IER);
}
static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
@@ -3695,11 +3698,9 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
de_port_masked |= DSI0_TE | DSI1_TE;
}
- de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
- GEN8_PIPE_FIFO_UNDERRUN;
-
- if (INTEL_GEN(dev_priv) >= 9)
- de_pipe_enables |= GEN9_PIPE_PLANE1_FLIP_DONE;
+ de_pipe_enables = de_pipe_masked |
+ GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN |
+ gen8_de_pipe_flip_done_mask(dev_priv);
de_port_enables = de_port_masked;
if (IS_GEN9_LP(dev_priv))
@@ -3778,14 +3779,14 @@ static void gen11_irq_postinstall(struct drm_i915_private *dev_priv)
GEN3_IRQ_INIT(uncore, GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked);
- I915_WRITE(GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE);
+ intel_uncore_write(&dev_priv->uncore, GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE);
if (HAS_MASTER_UNIT_IRQ(dev_priv)) {
dg1_master_intr_enable(uncore->regs);
- POSTING_READ(DG1_MSTR_UNIT_INTR);
+ intel_uncore_posting_read(&dev_priv->uncore, DG1_MSTR_UNIT_INTR);
} else {
gen11_master_intr_enable(uncore->regs);
- POSTING_READ(GEN11_GFX_MSTR_IRQ);
+ intel_uncore_posting_read(&dev_priv->uncore, GEN11_GFX_MSTR_IRQ);
}
}
@@ -3798,8 +3799,8 @@ static void cherryview_irq_postinstall(struct drm_i915_private *dev_priv)
vlv_display_irq_postinstall(dev_priv);
spin_unlock_irq(&dev_priv->irq_lock);
- I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
- POSTING_READ(GEN8_MASTER_IRQ);
+ intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
+ intel_uncore_posting_read(&dev_priv->uncore, GEN8_MASTER_IRQ);
}
static void i8xx_irq_reset(struct drm_i915_private *dev_priv)
@@ -3889,11 +3890,11 @@ static void i9xx_error_irq_ack(struct drm_i915_private *dev_priv,
{
u32 emr;
- *eir = I915_READ(EIR);
+ *eir = intel_uncore_read(&dev_priv->uncore, EIR);
- I915_WRITE(EIR, *eir);
+ intel_uncore_write(&dev_priv->uncore, EIR, *eir);
- *eir_stuck = I915_READ(EIR);
+ *eir_stuck = intel_uncore_read(&dev_priv->uncore, EIR);
if (*eir_stuck == 0)
return;
@@ -3907,9 +3908,9 @@ static void i9xx_error_irq_ack(struct drm_i915_private *dev_priv,
* (or by a GPU reset) so we mask any bit that
* remains set.
*/
- emr = I915_READ(EMR);
- I915_WRITE(EMR, 0xffffffff);
- I915_WRITE(EMR, emr | *eir_stuck);
+ emr = intel_uncore_read(&dev_priv->uncore, EMR);
+ intel_uncore_write(&dev_priv->uncore, EMR, 0xffffffff);
+ intel_uncore_write(&dev_priv->uncore, EMR, emr | *eir_stuck);
}
static void i9xx_error_irq_handler(struct drm_i915_private *dev_priv,
@@ -3975,7 +3976,7 @@ static void i915_irq_reset(struct drm_i915_private *dev_priv)
if (I915_HAS_HOTPLUG(dev_priv)) {
i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
- I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
+ intel_uncore_write(&dev_priv->uncore, PORT_HOTPLUG_STAT, intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_STAT));
}
i9xx_pipestat_irq_reset(dev_priv);
@@ -3989,7 +3990,7 @@ static void i915_irq_postinstall(struct drm_i915_private *dev_priv)
struct intel_uncore *uncore = &dev_priv->uncore;
u32 enable_mask;
- I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE |
+ intel_uncore_write(&dev_priv->uncore, EMR, ~(I915_ERROR_PAGE_TABLE |
I915_ERROR_MEMORY_REFRESH));
/* Unmask the interrupts that we always want on. */
@@ -4042,7 +4043,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
u32 hotplug_status = 0;
u32 iir;
- iir = I915_READ(GEN2_IIR);
+ iir = intel_uncore_read(&dev_priv->uncore, GEN2_IIR);
if (iir == 0)
break;
@@ -4059,7 +4060,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
if (iir & I915_MASTER_ERROR_INTERRUPT)
i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
- I915_WRITE(GEN2_IIR, iir);
+ intel_uncore_write(&dev_priv->uncore, GEN2_IIR, iir);
if (iir & I915_USER_INTERRUPT)
intel_engine_signal_breadcrumbs(dev_priv->gt.engine[RCS0]);
@@ -4085,7 +4086,7 @@ static void i965_irq_reset(struct drm_i915_private *dev_priv)
struct intel_uncore *uncore = &dev_priv->uncore;
i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
- I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
+ intel_uncore_write(&dev_priv->uncore, PORT_HOTPLUG_STAT, intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_STAT));
i9xx_pipestat_irq_reset(dev_priv);
@@ -4112,7 +4113,7 @@ static void i965_irq_postinstall(struct drm_i915_private *dev_priv)
error_mask = ~(I915_ERROR_PAGE_TABLE |
I915_ERROR_MEMORY_REFRESH);
}
- I915_WRITE(EMR, error_mask);
+ intel_uncore_write(&dev_priv->uncore, EMR, error_mask);
/* Unmask the interrupts that we always want on. */
dev_priv->irq_mask =
@@ -4188,7 +4189,7 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
u32 hotplug_status = 0;
u32 iir;
- iir = I915_READ(GEN2_IIR);
+ iir = intel_uncore_read(&dev_priv->uncore, GEN2_IIR);
if (iir == 0)
break;
@@ -4204,7 +4205,7 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
if (iir & I915_MASTER_ERROR_INTERRUPT)
i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
- I915_WRITE(GEN2_IIR, iir);
+ intel_uncore_write(&dev_priv->uncore, GEN2_IIR, iir);
if (iir & I915_USER_INTERRUPT)
intel_engine_signal_breadcrumbs(dev_priv->gt.engine[RCS0]);
diff --git a/drivers/gpu/drm/i915/i915_irq.h b/drivers/gpu/drm/i915/i915_irq.h
index 2efe609519ca..25f25cd95818 100644
--- a/drivers/gpu/drm/i915/i915_irq.h
+++ b/drivers/gpu/drm/i915/i915_irq.h
@@ -118,9 +118,6 @@ void i965_disable_vblank(struct drm_crtc *crtc);
void ilk_disable_vblank(struct drm_crtc *crtc);
void bdw_disable_vblank(struct drm_crtc *crtc);
-void skl_enable_flip_done(struct intel_crtc *crtc);
-void skl_disable_flip_done(struct intel_crtc *crtc);
-
void gen2_irq_reset(struct intel_uncore *uncore);
void gen3_irq_reset(struct intel_uncore *uncore, i915_reg_t imr,
i915_reg_t iir, i915_reg_t ier);
diff --git a/drivers/gpu/drm/i915/i915_mitigations.c b/drivers/gpu/drm/i915/i915_mitigations.c
new file mode 100644
index 000000000000..84f12598d145
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_mitigations.c
@@ -0,0 +1,146 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#include <linux/kernel.h>
+#include <linux/moduleparam.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+
+#include "i915_drv.h"
+#include "i915_mitigations.h"
+
+static unsigned long mitigations __read_mostly = ~0UL;
+
+enum {
+ CLEAR_RESIDUALS = 0,
+};
+
+static const char * const names[] = {
+ [CLEAR_RESIDUALS] = "residuals",
+};
+
+bool i915_mitigate_clear_residuals(void)
+{
+ return READ_ONCE(mitigations) & BIT(CLEAR_RESIDUALS);
+}
+
+static int mitigations_set(const char *val, const struct kernel_param *kp)
+{
+ unsigned long new = ~0UL;
+ char *str, *sep, *tok;
+ bool first = true;
+ int err = 0;
+
+ BUILD_BUG_ON(ARRAY_SIZE(names) >= BITS_PER_TYPE(mitigations));
+
+ str = kstrdup(val, GFP_KERNEL);
+ if (!str)
+ return -ENOMEM;
+
+ for (sep = str; (tok = strsep(&sep, ","));) {
+ bool enable = true;
+ int i;
+
+ /* Be tolerant of leading/trailing whitespace */
+ tok = strim(tok);
+
+ if (first) {
+ first = false;
+
+ if (!strcmp(tok, "auto"))
+ continue;
+
+ new = 0;
+ if (!strcmp(tok, "off"))
+ continue;
+ }
+
+ if (*tok == '!') {
+ enable = !enable;
+ tok++;
+ }
+
+ if (!strncmp(tok, "no", 2)) {
+ enable = !enable;
+ tok += 2;
+ }
+
+ if (*tok == '\0')
+ continue;
+
+ for (i = 0; i < ARRAY_SIZE(names); i++) {
+ if (!strcmp(tok, names[i])) {
+ if (enable)
+ new |= BIT(i);
+ else
+ new &= ~BIT(i);
+ break;
+ }
+ }
+ if (i == ARRAY_SIZE(names)) {
+ pr_err("Bad \"%s.mitigations=%s\", '%s' is unknown\n",
+ DRIVER_NAME, val, tok);
+ err = -EINVAL;
+ break;
+ }
+ }
+ kfree(str);
+ if (err)
+ return err;
+
+ WRITE_ONCE(mitigations, new);
+ return 0;
+}
+
+static int mitigations_get(char *buffer, const struct kernel_param *kp)
+{
+ unsigned long local = READ_ONCE(mitigations);
+ int count, i;
+ bool enable;
+
+ if (!local)
+ return scnprintf(buffer, PAGE_SIZE, "%s\n", "off");
+
+ if (local & BIT(BITS_PER_LONG - 1)) {
+ count = scnprintf(buffer, PAGE_SIZE, "%s,", "auto");
+ enable = false;
+ } else {
+ enable = true;
+ count = 0;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(names); i++) {
+ if ((local & BIT(i)) != enable)
+ continue;
+
+ count += scnprintf(buffer + count, PAGE_SIZE - count,
+ "%s%s,", enable ? "" : "!", names[i]);
+ }
+
+ buffer[count - 1] = '\n';
+ return count;
+}
+
+static const struct kernel_param_ops ops = {
+ .set = mitigations_set,
+ .get = mitigations_get,
+};
+
+module_param_cb_unsafe(mitigations, &ops, NULL, 0600);
+MODULE_PARM_DESC(mitigations,
+"Selectively enable security mitigations for all Intel® GPUs in the system.\n"
+"\n"
+" auto -- enables all mitigations required for the platform [default]\n"
+" off -- disables all mitigations\n"
+"\n"
+"Individual mitigations can be enabled by passing a comma-separated string,\n"
+"e.g. mitigations=residuals to enable only clearing residuals or\n"
+"mitigations=auto,noresiduals to disable only the clear residual mitigation.\n"
+"Either '!' or 'no' may be used to switch from enabling the mitigation to\n"
+"disabling it.\n"
+"\n"
+"Active mitigations for Ivybridge, Baytrail, Haswell:\n"
+" residuals -- clear all thread-local registers between contexts"
+);
diff --git a/drivers/gpu/drm/i915/i915_mitigations.h b/drivers/gpu/drm/i915/i915_mitigations.h
new file mode 100644
index 000000000000..1359d8135287
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_mitigations.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#ifndef __I915_MITIGATIONS_H__
+#define __I915_MITIGATIONS_H__
+
+#include <linux/types.h>
+
+bool i915_mitigate_clear_residuals(void);
+
+#endif /* __I915_MITIGATIONS_H__ */
diff --git a/drivers/gpu/drm/i915/i915_mm.c b/drivers/gpu/drm/i915/i915_mm.c
index 43039dc8c607..666808cb3a32 100644
--- a/drivers/gpu/drm/i915/i915_mm.c
+++ b/drivers/gpu/drm/i915/i915_mm.c
@@ -62,7 +62,7 @@ static int remap_sg(pte_t *pte, unsigned long addr, void *data)
{
struct remap_pfn *r = data;
- if (GEM_WARN_ON(!r->sgt.pfn))
+ if (GEM_WARN_ON(!r->sgt.sgp))
return -EINVAL;
/* Special PTE are not associated with any struct page */
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index 7f139ea4a90b..6939634e56ed 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -185,7 +185,7 @@ i915_param_named_unsafe(inject_probe_failure, uint, 0400,
i915_param_named(enable_dpcd_backlight, int, 0400,
"Enable support for DPCD backlight control"
- "(-1=use per-VBT LFP backlight type setting [default], 0=disabled, 1=enabled)");
+ "(-1=use per-VBT LFP backlight type setting [default], 0=disabled, 1=enable, 2=force VESA interface, 3=force Intel interface)");
#if IS_ENABLED(CONFIG_DRM_I915_GVT)
i915_param_named(enable_gvt, bool, 0400,
diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h
index 330c03e2b4f7..f031966af5b7 100644
--- a/drivers/gpu/drm/i915/i915_params.h
+++ b/drivers/gpu/drm/i915/i915_params.h
@@ -32,6 +32,7 @@ struct drm_printer;
#define ENABLE_GUC_SUBMISSION BIT(0)
#define ENABLE_GUC_LOAD_HUC BIT(1)
+#define ENABLE_GUC_MASK GENMASK(1, 0)
/*
* Invoke param, a function-like macro, for each i915 param, with arguments:
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index 11fe790b1969..020b5f561f07 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -455,6 +455,7 @@ static const struct intel_device_info snb_m_gt2_info = {
.has_llc = 1, \
.has_rc6 = 1, \
.has_rc6p = 1, \
+ .has_reset_engine = true, \
.has_rps = true, \
.dma_mask_size = 40, \
.ppgtt_type = INTEL_PPGTT_ALIASING, \
@@ -513,6 +514,7 @@ static const struct intel_device_info vlv_info = {
.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B),
.has_runtime_pm = 1,
.has_rc6 = 1,
+ .has_reset_engine = true,
.has_rps = true,
.display.has_gmch = 1,
.display.has_hotplug = 1,
@@ -571,8 +573,7 @@ static const struct intel_device_info hsw_gt3_info = {
.dma_mask_size = 39, \
.ppgtt_type = INTEL_PPGTT_FULL, \
.ppgtt_size = 48, \
- .has_64bit_reloc = 1, \
- .has_reset_engine = 1
+ .has_64bit_reloc = 1
#define BDW_PLATFORM \
GEN8_FEATURES, \
@@ -639,7 +640,6 @@ static const struct intel_device_info chv_info = {
GEN8_FEATURES, \
GEN(9), \
GEN9_DEFAULT_PAGE_SIZES, \
- .has_logical_ring_preemption = 1, \
.display.has_csr = 1, \
.has_gt_uc = 1, \
.display.has_hdcp = 1, \
@@ -700,7 +700,6 @@ static const struct intel_device_info skl_gt4_info = {
.has_rps = true, \
.display.has_dp_mst = 1, \
.has_logical_ring_contexts = 1, \
- .has_logical_ring_preemption = 1, \
.has_gt_uc = 1, \
.dma_mask_size = 39, \
.ppgtt_type = INTEL_PPGTT_FULL, \
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 649c26518d26..112ba5f2ce90 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -198,8 +198,11 @@
#include "gem/i915_gem_context.h"
#include "gt/intel_engine_pm.h"
#include "gt/intel_engine_user.h"
+#include "gt/intel_execlists_submission.h"
+#include "gt/intel_gpu_commands.h"
#include "gt/intel_gt.h"
-#include "gt/intel_lrc_reg.h"
+#include "gt/intel_gt_clock_utils.h"
+#include "gt/intel_lrc.h"
#include "gt/intel_ring.h"
#include "i915_drv.h"
@@ -1635,7 +1638,8 @@ static int alloc_noa_wait(struct i915_perf_stream *stream)
struct drm_i915_gem_object *bo;
struct i915_vma *vma;
const u64 delay_ticks = 0xffffffffffffffff -
- i915_cs_timestamp_ns_to_ticks(i915, atomic64_read(&stream->perf->noa_programming_delay));
+ intel_gt_ns_to_clock_interval(stream->perf->i915->ggtt.vm.gt,
+ atomic64_read(&stream->perf->noa_programming_delay));
const u32 base = stream->engine->mmio_base;
#define CS_GPR(x) GEN8_RING_CS_GPR(base, x)
u32 *batch, *ts0, *cs, *jump;
@@ -3516,7 +3520,8 @@ err:
static u64 oa_exponent_to_ns(struct i915_perf *perf, int exponent)
{
- return i915_cs_timestamp_ticks_to_ns(perf->i915, 2ULL << exponent);
+ return intel_gt_clock_interval_to_ns(perf->i915->ggtt.vm.gt,
+ 2ULL << exponent);
}
/**
@@ -4370,11 +4375,11 @@ void i915_perf_init(struct drm_i915_private *i915)
if (perf->ops.enable_metric_set) {
mutex_init(&perf->lock);
- oa_sample_rate_hard_limit =
- RUNTIME_INFO(i915)->cs_timestamp_frequency_hz / 2;
+ /* Choose a representative limit */
+ oa_sample_rate_hard_limit = i915->gt.clock_frequency / 2;
mutex_init(&perf->metrics_lock);
- idr_init(&perf->metrics_idr);
+ idr_init_base(&perf->metrics_idr, 1);
/* We set up some ratelimit state to potentially throttle any
* _NOTES about spurious, invalid OA reports which we don't
diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
index d76685ce0399..2b88c0baa1bf 100644
--- a/drivers/gpu/drm/i915/i915_pmu.c
+++ b/drivers/gpu/drm/i915/i915_pmu.c
@@ -26,8 +26,6 @@
BIT(I915_SAMPLE_WAIT) | \
BIT(I915_SAMPLE_SEMA))
-#define ENGINE_SAMPLE_BITS (1 << I915_PMU_SAMPLE_BITS)
-
static cpumask_t i915_pmu_cpumask;
static unsigned int i915_pmu_target_cpu = -1;
@@ -56,17 +54,42 @@ static bool is_engine_config(u64 config)
return config < __I915_PMU_OTHER(0);
}
-static unsigned int config_enabled_bit(u64 config)
+static unsigned int other_bit(const u64 config)
+{
+ unsigned int val;
+
+ switch (config) {
+ case I915_PMU_ACTUAL_FREQUENCY:
+ val = __I915_PMU_ACTUAL_FREQUENCY_ENABLED;
+ break;
+ case I915_PMU_REQUESTED_FREQUENCY:
+ val = __I915_PMU_REQUESTED_FREQUENCY_ENABLED;
+ break;
+ case I915_PMU_RC6_RESIDENCY:
+ val = __I915_PMU_RC6_RESIDENCY_ENABLED;
+ break;
+ default:
+ /*
+ * Events that do not require sampling, or tracking state
+ * transitions between enabled and disabled can be ignored.
+ */
+ return -1;
+ }
+
+ return I915_ENGINE_SAMPLE_COUNT + val;
+}
+
+static unsigned int config_bit(const u64 config)
{
if (is_engine_config(config))
return engine_config_sample(config);
else
- return ENGINE_SAMPLE_BITS + (config - __I915_PMU_OTHER(0));
+ return other_bit(config);
}
-static u64 config_enabled_mask(u64 config)
+static u64 config_mask(u64 config)
{
- return BIT_ULL(config_enabled_bit(config));
+ return BIT_ULL(config_bit(config));
}
static bool is_engine_event(struct perf_event *event)
@@ -74,15 +97,15 @@ static bool is_engine_event(struct perf_event *event)
return is_engine_config(event->attr.config);
}
-static unsigned int event_enabled_bit(struct perf_event *event)
+static unsigned int event_bit(struct perf_event *event)
{
- return config_enabled_bit(event->attr.config);
+ return config_bit(event->attr.config);
}
static bool pmu_needs_timer(struct i915_pmu *pmu, bool gpu_active)
{
struct drm_i915_private *i915 = container_of(pmu, typeof(*i915), pmu);
- u64 enable;
+ u32 enable;
/*
* Only some counters need the sampling timer.
@@ -95,8 +118,8 @@ static bool pmu_needs_timer(struct i915_pmu *pmu, bool gpu_active)
* Mask out all the ones which do not need the timer, or in
* other words keep all the ones that could need the timer.
*/
- enable &= config_enabled_mask(I915_PMU_ACTUAL_FREQUENCY) |
- config_enabled_mask(I915_PMU_REQUESTED_FREQUENCY) |
+ enable &= config_mask(I915_PMU_ACTUAL_FREQUENCY) |
+ config_mask(I915_PMU_REQUESTED_FREQUENCY) |
ENGINE_SAMPLE_MASK;
/*
@@ -137,11 +160,9 @@ static u64 __get_rc6(struct intel_gt *gt)
return val;
}
-#if IS_ENABLED(CONFIG_PM)
-
-static inline s64 ktime_since(const ktime_t kt)
+static inline s64 ktime_since_raw(const ktime_t kt)
{
- return ktime_to_ns(ktime_sub(ktime_get(), kt));
+ return ktime_to_ns(ktime_sub(ktime_get_raw(), kt));
}
static u64 get_rc6(struct intel_gt *gt)
@@ -170,7 +191,7 @@ static u64 get_rc6(struct intel_gt *gt)
* on top of the last known real value, as the approximated RC6
* counter value.
*/
- val = ktime_since(pmu->sleep_last);
+ val = ktime_since_raw(pmu->sleep_last);
val += pmu->sample[__I915_SAMPLE_RC6].cur;
}
@@ -184,26 +205,26 @@ static u64 get_rc6(struct intel_gt *gt)
return val;
}
-static void park_rc6(struct drm_i915_private *i915)
+static void init_rc6(struct i915_pmu *pmu)
{
- struct i915_pmu *pmu = &i915->pmu;
+ struct drm_i915_private *i915 = container_of(pmu, typeof(*i915), pmu);
+ intel_wakeref_t wakeref;
- if (pmu->enable & config_enabled_mask(I915_PMU_RC6_RESIDENCY))
+ with_intel_runtime_pm(i915->gt.uncore->rpm, wakeref) {
pmu->sample[__I915_SAMPLE_RC6].cur = __get_rc6(&i915->gt);
-
- pmu->sleep_last = ktime_get();
+ pmu->sample[__I915_SAMPLE_RC6_LAST_REPORTED].cur =
+ pmu->sample[__I915_SAMPLE_RC6].cur;
+ pmu->sleep_last = ktime_get_raw();
+ }
}
-#else
-
-static u64 get_rc6(struct intel_gt *gt)
+static void park_rc6(struct drm_i915_private *i915)
{
- return __get_rc6(gt);
-}
-
-static void park_rc6(struct drm_i915_private *i915) {}
+ struct i915_pmu *pmu = &i915->pmu;
-#endif
+ pmu->sample[__I915_SAMPLE_RC6].cur = __get_rc6(&i915->gt);
+ pmu->sleep_last = ktime_get_raw();
+}
static void __i915_pmu_maybe_start_timer(struct i915_pmu *pmu)
{
@@ -343,8 +364,8 @@ add_sample_mult(struct i915_pmu_sample *sample, u32 val, u32 mul)
static bool frequency_sampling_enabled(struct i915_pmu *pmu)
{
return pmu->enable &
- (config_enabled_mask(I915_PMU_ACTUAL_FREQUENCY) |
- config_enabled_mask(I915_PMU_REQUESTED_FREQUENCY));
+ (config_mask(I915_PMU_ACTUAL_FREQUENCY) |
+ config_mask(I915_PMU_REQUESTED_FREQUENCY));
}
static void
@@ -362,7 +383,7 @@ frequency_sample(struct intel_gt *gt, unsigned int period_ns)
if (!intel_gt_pm_get_if_awake(gt))
return;
- if (pmu->enable & config_enabled_mask(I915_PMU_ACTUAL_FREQUENCY)) {
+ if (pmu->enable & config_mask(I915_PMU_ACTUAL_FREQUENCY)) {
u32 val;
/*
@@ -384,7 +405,7 @@ frequency_sample(struct intel_gt *gt, unsigned int period_ns)
intel_gpu_freq(rps, val), period_ns / 1000);
}
- if (pmu->enable & config_enabled_mask(I915_PMU_REQUESTED_FREQUENCY)) {
+ if (pmu->enable & config_mask(I915_PMU_REQUESTED_FREQUENCY)) {
add_sample_mult(&pmu->sample[__I915_SAMPLE_FREQ_REQ],
intel_gpu_freq(rps, rps->cur_freq),
period_ns / 1000);
@@ -471,6 +492,8 @@ config_status(struct drm_i915_private *i915, u64 config)
if (!HAS_RC6(i915))
return -ENODEV;
break;
+ case I915_PMU_SOFTWARE_GT_AWAKE_TIME:
+ break;
default:
return -ENOENT;
}
@@ -578,6 +601,9 @@ static u64 __i915_pmu_event_read(struct perf_event *event)
case I915_PMU_RC6_RESIDENCY:
val = get_rc6(&i915->gt);
break;
+ case I915_PMU_SOFTWARE_GT_AWAKE_TIME:
+ val = ktime_to_ns(intel_gt_get_awake_time(&i915->gt));
+ break;
}
}
@@ -610,12 +636,14 @@ static void i915_pmu_enable(struct perf_event *event)
{
struct drm_i915_private *i915 =
container_of(event->pmu, typeof(*i915), pmu.base);
- unsigned int bit = event_enabled_bit(event);
struct i915_pmu *pmu = &i915->pmu;
- intel_wakeref_t wakeref;
unsigned long flags;
+ unsigned int bit;
+
+ bit = event_bit(event);
+ if (bit == -1)
+ goto update;
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
spin_lock_irqsave(&pmu->lock, flags);
/*
@@ -626,13 +654,6 @@ static void i915_pmu_enable(struct perf_event *event)
GEM_BUG_ON(bit >= ARRAY_SIZE(pmu->enable_count));
GEM_BUG_ON(pmu->enable_count[bit] == ~0);
- if (pmu->enable_count[bit] == 0 &&
- config_enabled_mask(I915_PMU_RC6_RESIDENCY) & BIT_ULL(bit)) {
- pmu->sample[__I915_SAMPLE_RC6_LAST_REPORTED].cur = 0;
- pmu->sample[__I915_SAMPLE_RC6].cur = __get_rc6(&i915->gt);
- pmu->sleep_last = ktime_get();
- }
-
pmu->enable |= BIT_ULL(bit);
pmu->enable_count[bit]++;
@@ -667,24 +688,26 @@ static void i915_pmu_enable(struct perf_event *event)
spin_unlock_irqrestore(&pmu->lock, flags);
+update:
/*
* Store the current counter value so we can report the correct delta
* for all listeners. Even when the event was already enabled and has
* an existing non-zero value.
*/
local64_set(&event->hw.prev_count, __i915_pmu_event_read(event));
-
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
}
static void i915_pmu_disable(struct perf_event *event)
{
struct drm_i915_private *i915 =
container_of(event->pmu, typeof(*i915), pmu.base);
- unsigned int bit = event_enabled_bit(event);
+ unsigned int bit = event_bit(event);
struct i915_pmu *pmu = &i915->pmu;
unsigned long flags;
+ if (bit == -1)
+ return;
+
spin_lock_irqsave(&pmu->lock, flags);
if (is_engine_event(event)) {
@@ -881,6 +904,7 @@ create_event_attributes(struct i915_pmu *pmu)
__event(I915_PMU_REQUESTED_FREQUENCY, "requested-frequency", "M"),
__event(I915_PMU_INTERRUPTS, "interrupts", NULL),
__event(I915_PMU_RC6_RESIDENCY, "rc6-residency", "ns"),
+ __event(I915_PMU_SOFTWARE_GT_AWAKE_TIME, "software-gt-awake-time", "ns"),
};
static const struct {
enum drm_i915_pmu_engine_sample sample;
@@ -1130,6 +1154,7 @@ void i915_pmu_register(struct drm_i915_private *i915)
hrtimer_init(&pmu->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
pmu->timer.function = i915_sample;
pmu->cpuhp.cpu = -1;
+ init_rc6(pmu);
if (!is_igp(i915)) {
pmu->name = kasprintf(GFP_KERNEL,
diff --git a/drivers/gpu/drm/i915/i915_pmu.h b/drivers/gpu/drm/i915/i915_pmu.h
index 8405d6da5b9a..60f9595f902c 100644
--- a/drivers/gpu/drm/i915/i915_pmu.h
+++ b/drivers/gpu/drm/i915/i915_pmu.h
@@ -14,6 +14,21 @@
struct drm_i915_private;
+/**
+ * Non-engine events that we need to track enabled-disabled transition and
+ * current state.
+ */
+enum i915_pmu_tracked_events {
+ __I915_PMU_ACTUAL_FREQUENCY_ENABLED = 0,
+ __I915_PMU_REQUESTED_FREQUENCY_ENABLED,
+ __I915_PMU_RC6_RESIDENCY_ENABLED,
+ __I915_PMU_TRACKED_EVENT_COUNT, /* count marker */
+};
+
+/**
+ * Slots used from the sampling timer (non-engine events) with some extras for
+ * convenience.
+ */
enum {
__I915_SAMPLE_FREQ_ACT = 0,
__I915_SAMPLE_FREQ_REQ,
@@ -28,8 +43,7 @@ enum {
* It is also used to know to needed number of event reference counters.
*/
#define I915_PMU_MASK_BITS \
- ((1 << I915_PMU_SAMPLE_BITS) + \
- (I915_PMU_LAST + 1 - __I915_PMU_OTHER(0)))
+ (I915_ENGINE_SAMPLE_COUNT + __I915_PMU_TRACKED_EVENT_COUNT)
#define I915_ENGINE_SAMPLE_COUNT (I915_SAMPLE_SEMA + 1)
@@ -66,18 +80,17 @@ struct i915_pmu {
*/
struct hrtimer timer;
/**
- * @enable: Bitmask of all currently enabled events.
+ * @enable: Bitmask of specific enabled events.
+ *
+ * For some events we need to track their state and do some internal
+ * house keeping.
*
- * Bits are derived from uAPI event numbers in a way that low 16 bits
- * correspond to engine event _sample_ _type_ (I915_SAMPLE_QUEUED is
- * bit 0), and higher bits correspond to other events (for instance
- * I915_PMU_ACTUAL_FREQUENCY is bit 16 etc).
+ * Each engine event sampler type and event listed in enum
+ * i915_pmu_tracked_events gets a bit in this field.
*
- * In other words, low 16 bits are not per engine but per engine
- * sampler type, while the upper bits are directly mapped to other
- * event types.
+ * Low bits are engine samplers and other events continue from there.
*/
- u64 enable;
+ u32 enable;
/**
* @timer_last:
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 5375b219cc3b..598abd2f5b5f 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -2928,8 +2928,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define HDPORT_STATE _MMIO(0x45050)
#define HDPORT_DPLL_USED_MASK REG_GENMASK(14, 12)
-#define HDPORT_PHY_USED_DP(phy) REG_BIT(2 * (phy) + 2)
-#define HDPORT_PHY_USED_HDMI(phy) REG_BIT(2 * (phy) + 1)
+#define HDPORT_DDI_USED(phy) REG_BIT(2 * (phy) + 1)
#define HDPORT_ENABLED REG_BIT(0)
/* Make render/texture TLB fetches lower priorty than associated data
@@ -4347,12 +4346,13 @@ enum {
#define _TRANS_VRR_CTL_B 0x61420
#define _TRANS_VRR_CTL_C 0x62420
#define _TRANS_VRR_CTL_D 0x63420
-#define TRANS_VRR_CTL(trans) _MMIO_TRANS2(trans, _TRANS_VRR_CTL_A)
-#define VRR_CTL_VRR_ENABLE REG_BIT(31)
-#define VRR_CTL_IGN_MAX_SHIFT REG_BIT(30)
-#define VRR_CTL_FLIP_LINE_EN REG_BIT(29)
-#define VRR_CTL_LINE_COUNT_MASK REG_GENMASK(10, 3)
-#define VRR_CTL_SW_FULLLINE_COUNT REG_BIT(0)
+#define TRANS_VRR_CTL(trans) _MMIO_TRANS2(trans, _TRANS_VRR_CTL_A)
+#define VRR_CTL_VRR_ENABLE REG_BIT(31)
+#define VRR_CTL_IGN_MAX_SHIFT REG_BIT(30)
+#define VRR_CTL_FLIP_LINE_EN REG_BIT(29)
+#define VRR_CTL_PIPELINE_FULL_MASK REG_GENMASK(10, 3)
+#define VRR_CTL_PIPELINE_FULL(x) REG_FIELD_PREP(VRR_CTL_PIPELINE_FULL_MASK, (x))
+#define VRR_CTL_PIPELINE_FULL_OVERRIDE REG_BIT(0)
#define _TRANS_VRR_VMAX_A 0x60424
#define _TRANS_VRR_VMAX_B 0x61424
@@ -6578,6 +6578,7 @@ enum {
#define TGL_CURSOR_D_OFFSET 0x73080
/* Display A control */
+#define _DSPAADDR_VLV 0x7017C /* vlv/chv */
#define _DSPACNTR 0x70180
#define DISPLAY_PLANE_ENABLE (1 << 31)
#define DISPLAY_PLANE_DISABLE 0
@@ -6614,6 +6615,7 @@ enum {
#define DISPPLANE_ROTATE_180 (1 << 15)
#define DISPPLANE_TRICKLE_FEED_DISABLE (1 << 14) /* Ironlake */
#define DISPPLANE_TILED (1 << 10)
+#define DISPPLANE_ASYNC_FLIP (1 << 9) /* g4x+ */
#define DISPPLANE_MIRROR (1 << 8) /* CHV pipe B */
#define _DSPAADDR 0x70184
#define _DSPASTRIDE 0x70188
@@ -6625,6 +6627,7 @@ enum {
#define _DSPASURFLIVE 0x701AC
#define _DSPAGAMC 0x701E0
+#define DSPADDR_VLV(plane) _MMIO_PIPE2(plane, _DSPAADDR_VLV)
#define DSPCNTR(plane) _MMIO_PIPE2(plane, _DSPACNTR)
#define DSPADDR(plane) _MMIO_PIPE2(plane, _DSPAADDR)
#define DSPSTRIDE(plane) _MMIO_PIPE2(plane, _DSPASTRIDE)
@@ -7070,6 +7073,8 @@ enum {
#define _PLANE_KEYMAX_1_A 0x701a0
#define _PLANE_KEYMAX_2_A 0x702a0
#define PLANE_KEYMAX_ALPHA(a) ((a) << 24)
+#define _PLANE_CC_VAL_1_A 0x701b4
+#define _PLANE_CC_VAL_2_A 0x702b4
#define _PLANE_AUX_DIST_1_A 0x701c0
#define _PLANE_AUX_DIST_2_A 0x702c0
#define _PLANE_AUX_OFFSET_1_A 0x701c4
@@ -7111,6 +7116,13 @@ enum {
#define _PLANE_NV12_BUF_CFG_1_A 0x70278
#define _PLANE_NV12_BUF_CFG_2_A 0x70378
+#define _PLANE_CC_VAL_1_B 0x711b4
+#define _PLANE_CC_VAL_2_B 0x712b4
+#define _PLANE_CC_VAL_1(pipe) _PIPE(pipe, _PLANE_CC_VAL_1_A, _PLANE_CC_VAL_1_B)
+#define _PLANE_CC_VAL_2(pipe) _PIPE(pipe, _PLANE_CC_VAL_2_A, _PLANE_CC_VAL_2_B)
+#define PLANE_CC_VAL(pipe, plane) \
+ _MMIO_PLANE(plane, _PLANE_CC_VAL_1(pipe), _PLANE_CC_VAL_2(pipe))
+
/* Input CSC Register Definitions */
#define _PLANE_INPUT_CSC_RY_GY_1_A 0x701E0
#define _PLANE_INPUT_CSC_RY_GY_2_A 0x702E0
@@ -9872,6 +9884,7 @@ enum skl_power_gate {
_PORTD_HDCP2_BASE, \
_PORTE_HDCP2_BASE, \
_PORTF_HDCP2_BASE) + (x))
+
#define PORT_HDCP2_AUTH(port) _PORT_HDCP2_BASE(port, 0x98)
#define _TRANSA_HDCP2_AUTH 0x66498
#define _TRANSB_HDCP2_AUTH 0x66598
@@ -9911,6 +9924,44 @@ enum skl_power_gate {
TRANS_HDCP2_STATUS(trans) : \
PORT_HDCP2_STATUS(port))
+#define _PIPEA_HDCP2_STREAM_STATUS 0x668C0
+#define _PIPEB_HDCP2_STREAM_STATUS 0x665C0
+#define _PIPEC_HDCP2_STREAM_STATUS 0x666C0
+#define _PIPED_HDCP2_STREAM_STATUS 0x667C0
+#define PIPE_HDCP2_STREAM_STATUS(pipe) _MMIO(_PICK((pipe), \
+ _PIPEA_HDCP2_STREAM_STATUS, \
+ _PIPEB_HDCP2_STREAM_STATUS, \
+ _PIPEC_HDCP2_STREAM_STATUS, \
+ _PIPED_HDCP2_STREAM_STATUS))
+
+#define _TRANSA_HDCP2_STREAM_STATUS 0x664C0
+#define _TRANSB_HDCP2_STREAM_STATUS 0x665C0
+#define TRANS_HDCP2_STREAM_STATUS(trans) _MMIO_TRANS(trans, \
+ _TRANSA_HDCP2_STREAM_STATUS, \
+ _TRANSB_HDCP2_STREAM_STATUS)
+#define STREAM_ENCRYPTION_STATUS BIT(31)
+#define STREAM_TYPE_STATUS BIT(30)
+#define HDCP2_STREAM_STATUS(dev_priv, trans, port) \
+ (INTEL_GEN(dev_priv) >= 12 ? \
+ TRANS_HDCP2_STREAM_STATUS(trans) : \
+ PIPE_HDCP2_STREAM_STATUS(pipe))
+
+#define _PORTA_HDCP2_AUTH_STREAM 0x66F00
+#define _PORTB_HDCP2_AUTH_STREAM 0x66F04
+#define PORT_HDCP2_AUTH_STREAM(port) _MMIO_PORT(port, \
+ _PORTA_HDCP2_AUTH_STREAM, \
+ _PORTB_HDCP2_AUTH_STREAM)
+#define _TRANSA_HDCP2_AUTH_STREAM 0x66F00
+#define _TRANSB_HDCP2_AUTH_STREAM 0x66F04
+#define TRANS_HDCP2_AUTH_STREAM(trans) _MMIO_TRANS(trans, \
+ _TRANSA_HDCP2_AUTH_STREAM, \
+ _TRANSB_HDCP2_AUTH_STREAM)
+#define AUTH_STREAM_TYPE BIT(31)
+#define HDCP2_AUTH_STREAM(dev_priv, trans, port) \
+ (INTEL_GEN(dev_priv) >= 12 ? \
+ TRANS_HDCP2_AUTH_STREAM(trans) : \
+ PORT_HDCP2_AUTH_STREAM(port))
+
/* Per-pipe DDI Function Control */
#define _TRANS_DDI_FUNC_CTL_A 0x60400
#define _TRANS_DDI_FUNC_CTL_B 0x61400
@@ -9960,6 +10011,7 @@ enum skl_power_gate {
#define TRANS_DDI_DP_VC_PAYLOAD_ALLOC (1 << 8)
#define TRANS_DDI_HDMI_SCRAMBLER_CTS_ENABLE (1 << 7)
#define TRANS_DDI_HDMI_SCRAMBLER_RESET_FREQ (1 << 6)
+#define TRANS_DDI_HDCP_SELECT REG_BIT(5)
#define TRANS_DDI_BFI_ENABLE (1 << 4)
#define TRANS_DDI_HIGH_TMDS_CHAR_RATE (1 << 4)
#define TRANS_DDI_HDMI_SCRAMBLING (1 << 0)
@@ -10851,8 +10903,10 @@ enum skl_power_gate {
#define CNL_DRAM_RANK_3 (0x2 << 9)
#define CNL_DRAM_RANK_4 (0x3 << 9)
-/* Please see hsw_read_dcomp() and hsw_write_dcomp() before using this register,
- * since on HSW we can't write to it using I915_WRITE. */
+/*
+ * Please see hsw_read_dcomp() and hsw_write_dcomp() before using this register,
+ * since on HSW we can't write to it using intel_uncore_write.
+ */
#define D_COMP_HSW _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5F0C)
#define D_COMP_BDW _MMIO(0x138144)
#define D_COMP_RCOMP_IN_PROGRESS (1 << 9)
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index 5385b081a376..22e39d938f17 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -33,6 +33,7 @@
#include "gem/i915_gem_context.h"
#include "gt/intel_breadcrumbs.h"
#include "gt/intel_context.h"
+#include "gt/intel_gpu_commands.h"
#include "gt/intel_ring.h"
#include "gt/intel_rps.h"
@@ -275,7 +276,7 @@ static void remove_from_engine(struct i915_request *rq)
bool i915_request_retire(struct i915_request *rq)
{
- if (!i915_request_completed(rq))
+ if (!__i915_request_is_complete(rq))
return false;
RQ_TRACE(rq, "\n");
@@ -306,10 +307,8 @@ bool i915_request_retire(struct i915_request *rq)
spin_unlock_irq(&rq->lock);
}
- if (i915_request_has_waitboost(rq)) {
- GEM_BUG_ON(!atomic_read(&rq->engine->gt->rps.num_waiters));
+ if (test_and_set_bit(I915_FENCE_FLAG_BOOST, &rq->fence.flags))
atomic_dec(&rq->engine->gt->rps.num_waiters);
- }
/*
* We only loosely track inflight requests across preemption,
@@ -321,7 +320,8 @@ bool i915_request_retire(struct i915_request *rq)
* after removing the breadcrumb and signaling it, so that we do not
* inadvertently attach the breadcrumb to a completed request.
*/
- remove_from_engine(rq);
+ if (!list_empty(&rq->sched.link))
+ remove_from_engine(rq);
GEM_BUG_ON(!llist_empty(&rq->execute_cb));
__list_del_entry(&rq->link); /* poison neither prev/next (RCU walks) */
@@ -342,8 +342,7 @@ void i915_request_retire_upto(struct i915_request *rq)
struct i915_request *tmp;
RQ_TRACE(rq, "\n");
-
- GEM_BUG_ON(!i915_request_completed(rq));
+ GEM_BUG_ON(!__i915_request_is_complete(rq));
do {
tmp = list_first_entry(&tl->requests, typeof(*tmp), link);
@@ -488,6 +487,8 @@ void __i915_request_skip(struct i915_request *rq)
if (rq->infix == rq->postfix)
return;
+ RQ_TRACE(rq, "error: %d\n", rq->fence.error);
+
/*
* As this request likely depends on state from the lost
* context, clear out all the user operations leaving the
@@ -513,6 +514,17 @@ void i915_request_set_error_once(struct i915_request *rq, int error)
} while (!try_cmpxchg(&rq->fence.error, &old, error));
}
+void i915_request_mark_eio(struct i915_request *rq)
+{
+ if (__i915_request_is_complete(rq))
+ return;
+
+ GEM_BUG_ON(i915_request_signaled(rq));
+
+ i915_request_set_error_once(rq, -EIO);
+ i915_request_mark_complete(rq);
+}
+
bool __i915_request_submit(struct i915_request *request)
{
struct intel_engine_cs *engine = request->engine;
@@ -539,12 +551,10 @@ bool __i915_request_submit(struct i915_request *request)
* dropped upon retiring. (Otherwise if resubmit a *retired*
* request, this would be a horrible use-after-free.)
*/
- if (i915_request_completed(request))
- goto xfer;
-
- if (unlikely(intel_context_is_closed(request->context) &&
- !intel_engine_has_heartbeat(engine)))
- intel_context_set_banned(request->context);
+ if (__i915_request_is_complete(request)) {
+ list_del_init(&request->sched.link);
+ goto active;
+ }
if (unlikely(intel_context_is_banned(request->context)))
i915_request_set_error_once(request, -EIO);
@@ -579,11 +589,11 @@ bool __i915_request_submit(struct i915_request *request)
engine->serial++;
result = true;
-xfer:
- if (!test_and_set_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags)) {
- list_move_tail(&request->sched.link, &engine->active.requests);
- clear_bit(I915_FENCE_FLAG_PQUEUE, &request->fence.flags);
- }
+ GEM_BUG_ON(test_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags));
+ list_move_tail(&request->sched.link, &engine->active.requests);
+active:
+ clear_bit(I915_FENCE_FLAG_PQUEUE, &request->fence.flags);
+ set_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags);
/*
* XXX Rollback bonded-execution on __i915_request_unsubmit()?
@@ -643,7 +653,7 @@ void __i915_request_unsubmit(struct i915_request *request)
i915_request_cancel_breadcrumb(request);
/* We've already spun, don't charge on resubmitting. */
- if (request->sched.semaphores && i915_request_started(request))
+ if (request->sched.semaphores && __i915_request_has_started(request))
request->sched.semaphores = 0;
/*
@@ -855,7 +865,7 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp)
RCU_INIT_POINTER(rq->timeline, tl);
RCU_INIT_POINTER(rq->hwsp_cacheline, tl->hwsp_cacheline);
rq->hwsp_seqno = tl->hwsp_seqno;
- GEM_BUG_ON(i915_request_completed(rq));
+ GEM_BUG_ON(__i915_request_is_complete(rq));
rq->rcustate = get_state_synchronize_rcu(); /* acts as smp_mb() */
@@ -961,15 +971,22 @@ i915_request_await_start(struct i915_request *rq, struct i915_request *signal)
if (i915_request_started(signal))
return 0;
+ /*
+ * The caller holds a reference on @signal, but we do not serialise
+ * against it being retired and removed from the lists.
+ *
+ * We do not hold a reference to the request before @signal, and
+ * so must be very careful to ensure that it is not _recycled_ as
+ * we follow the link backwards.
+ */
fence = NULL;
rcu_read_lock();
- spin_lock_irq(&signal->lock);
do {
struct list_head *pos = READ_ONCE(signal->link.prev);
struct i915_request *prev;
/* Confirm signal has not been retired, the link is valid */
- if (unlikely(i915_request_started(signal)))
+ if (unlikely(__i915_request_has_started(signal)))
break;
/* Is signal the earliest request on its timeline? */
@@ -994,7 +1011,6 @@ i915_request_await_start(struct i915_request *rq, struct i915_request *signal)
fence = &prev->fence;
} while (0);
- spin_unlock_irq(&signal->lock);
rcu_read_unlock();
if (!fence)
return 0;
@@ -1511,7 +1527,7 @@ __i915_request_add_to_timeline(struct i915_request *rq)
*/
prev = to_request(__i915_active_fence_set(&timeline->last_request,
&rq->fence));
- if (prev && !i915_request_completed(prev)) {
+ if (prev && !__i915_request_is_complete(prev)) {
/*
* The requests are supposed to be kept in order. However,
* we need to be wary in case the timeline->last_request
@@ -1582,6 +1598,12 @@ struct i915_request *__i915_request_commit(struct i915_request *rq)
return __i915_request_add_to_timeline(rq);
}
+void __i915_request_queue_bh(struct i915_request *rq)
+{
+ i915_sw_fence_commit(&rq->semaphore);
+ i915_sw_fence_commit(&rq->submit);
+}
+
void __i915_request_queue(struct i915_request *rq,
const struct i915_sched_attr *attr)
{
@@ -1598,8 +1620,10 @@ void __i915_request_queue(struct i915_request *rq,
*/
if (attr && rq->engine->schedule)
rq->engine->schedule(rq, attr);
- i915_sw_fence_commit(&rq->semaphore);
- i915_sw_fence_commit(&rq->submit);
+
+ local_bh_disable();
+ __i915_request_queue_bh(rq);
+ local_bh_enable(); /* kick tasklets */
}
void i915_request_add(struct i915_request *rq)
@@ -1823,7 +1847,7 @@ long i915_request_wait(struct i915_request *rq,
* for unhappy HW.
*/
if (i915_request_is_ready(rq))
- intel_engine_flush_submission(rq->engine);
+ __intel_engine_flush_submission(rq->engine, false);
for (;;) {
set_current_state(state);
@@ -1855,6 +1879,106 @@ out:
return timeout;
}
+static int print_sched_attr(const struct i915_sched_attr *attr,
+ char *buf, int x, int len)
+{
+ if (attr->priority == I915_PRIORITY_INVALID)
+ return x;
+
+ x += snprintf(buf + x, len - x,
+ " prio=%d", attr->priority);
+
+ return x;
+}
+
+static char queue_status(const struct i915_request *rq)
+{
+ if (i915_request_is_active(rq))
+ return 'E';
+
+ if (i915_request_is_ready(rq))
+ return intel_engine_is_virtual(rq->engine) ? 'V' : 'R';
+
+ return 'U';
+}
+
+static const char *run_status(const struct i915_request *rq)
+{
+ if (__i915_request_is_complete(rq))
+ return "!";
+
+ if (__i915_request_has_started(rq))
+ return "*";
+
+ if (!i915_sw_fence_signaled(&rq->semaphore))
+ return "&";
+
+ return "";
+}
+
+static const char *fence_status(const struct i915_request *rq)
+{
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags))
+ return "+";
+
+ if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &rq->fence.flags))
+ return "-";
+
+ return "";
+}
+
+void i915_request_show(struct drm_printer *m,
+ const struct i915_request *rq,
+ const char *prefix,
+ int indent)
+{
+ const char *name = rq->fence.ops->get_timeline_name((struct dma_fence *)&rq->fence);
+ char buf[80] = "";
+ int x = 0;
+
+ /*
+ * The prefix is used to show the queue status, for which we use
+ * the following flags:
+ *
+ * U [Unready]
+ * - initial status upon being submitted by the user
+ *
+ * - the request is not ready for execution as it is waiting
+ * for external fences
+ *
+ * R [Ready]
+ * - all fences the request was waiting on have been signaled,
+ * and the request is now ready for execution and will be
+ * in a backend queue
+ *
+ * - a ready request may still need to wait on semaphores
+ * [internal fences]
+ *
+ * V [Ready/virtual]
+ * - same as ready, but queued over multiple backends
+ *
+ * E [Executing]
+ * - the request has been transferred from the backend queue and
+ * submitted for execution on HW
+ *
+ * - a completed request may still be regarded as executing, its
+ * status may not be updated until it is retired and removed
+ * from the lists
+ */
+
+ x = print_sched_attr(&rq->sched.attr, buf, x, sizeof(buf));
+
+ drm_printf(m, "%s%.*s%c %llx:%lld%s%s %s @ %dms: %s\n",
+ prefix, indent, " ",
+ queue_status(rq),
+ rq->fence.context, rq->fence.seqno,
+ run_status(rq),
+ fence_status(rq),
+ buf,
+ jiffies_to_msecs(jiffies - rq->emitted_jiffies),
+ name);
+}
+
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/mock_request.c"
#include "selftests/i915_request.c"
diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
index 620b6fab2c5c..1bfe214a47e9 100644
--- a/drivers/gpu/drm/i915/i915_request.h
+++ b/drivers/gpu/drm/i915/i915_request.h
@@ -43,6 +43,7 @@
struct drm_file;
struct drm_i915_gem_object;
+struct drm_printer;
struct i915_request;
struct i915_capture_list {
@@ -308,12 +309,14 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp);
struct i915_request * __must_check
i915_request_create(struct intel_context *ce);
-void i915_request_set_error_once(struct i915_request *rq, int error);
void __i915_request_skip(struct i915_request *rq);
+void i915_request_set_error_once(struct i915_request *rq, int error);
+void i915_request_mark_eio(struct i915_request *rq);
struct i915_request *__i915_request_commit(struct i915_request *request);
void __i915_request_queue(struct i915_request *rq,
const struct i915_sched_attr *attr);
+void __i915_request_queue_bh(struct i915_request *rq);
bool i915_request_retire(struct i915_request *rq);
void i915_request_retire_upto(struct i915_request *rq);
@@ -371,6 +374,11 @@ long i915_request_wait(struct i915_request *rq,
#define I915_WAIT_PRIORITY BIT(1) /* small priority bump for the request */
#define I915_WAIT_ALL BIT(2) /* used by i915_gem_object_wait() */
+void i915_request_show(struct drm_printer *m,
+ const struct i915_request *rq,
+ const char *prefix,
+ int indent);
+
static inline bool i915_request_signaled(const struct i915_request *rq)
{
/* The request may live longer than its HWSP, so check flags first! */
@@ -434,7 +442,7 @@ static inline u32 hwsp_seqno(const struct i915_request *rq)
static inline bool __i915_request_has_started(const struct i915_request *rq)
{
- return i915_seqno_passed(hwsp_seqno(rq), rq->fence.seqno - 1);
+ return i915_seqno_passed(__hwsp_seqno(rq), rq->fence.seqno - 1);
}
/**
@@ -465,11 +473,19 @@ static inline bool __i915_request_has_started(const struct i915_request *rq)
*/
static inline bool i915_request_started(const struct i915_request *rq)
{
+ bool result;
+
if (i915_request_signaled(rq))
return true;
- /* Remember: started but may have since been preempted! */
- return __i915_request_has_started(rq);
+ result = true;
+ rcu_read_lock(); /* the HWSP may be freed at runtime */
+ if (likely(!i915_request_signaled(rq)))
+ /* Remember: started but may have since been preempted! */
+ result = __i915_request_has_started(rq);
+ rcu_read_unlock();
+
+ return result;
}
/**
@@ -482,10 +498,16 @@ static inline bool i915_request_started(const struct i915_request *rq)
*/
static inline bool i915_request_is_running(const struct i915_request *rq)
{
+ bool result;
+
if (!i915_request_is_active(rq))
return false;
- return __i915_request_has_started(rq);
+ rcu_read_lock();
+ result = __i915_request_has_started(rq) && i915_request_is_active(rq);
+ rcu_read_unlock();
+
+ return result;
}
/**
@@ -509,12 +531,25 @@ static inline bool i915_request_is_ready(const struct i915_request *rq)
return !list_empty(&rq->sched.link);
}
+static inline bool __i915_request_is_complete(const struct i915_request *rq)
+{
+ return i915_seqno_passed(__hwsp_seqno(rq), rq->fence.seqno);
+}
+
static inline bool i915_request_completed(const struct i915_request *rq)
{
+ bool result;
+
if (i915_request_signaled(rq))
return true;
- return i915_seqno_passed(hwsp_seqno(rq), rq->fence.seqno);
+ result = true;
+ rcu_read_lock(); /* the HWSP may be freed at runtime */
+ if (likely(!i915_request_signaled(rq)))
+ result = __i915_request_is_complete(rq);
+ rcu_read_unlock();
+
+ return result;
}
static inline void i915_request_mark_complete(struct i915_request *rq)
diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
index cbb880b10c65..7144239f08df 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.c
+++ b/drivers/gpu/drm/i915/i915_scheduler.c
@@ -458,14 +458,10 @@ int i915_sched_node_add_dependency(struct i915_sched_node *node,
if (!dep)
return -ENOMEM;
- local_bh_disable();
-
if (!__i915_sched_node_add_dependency(node, signal, dep,
flags | I915_DEPENDENCY_ALLOC))
i915_dependency_free(dep);
- local_bh_enable(); /* kick submission tasklet */
-
return 0;
}
@@ -504,6 +500,34 @@ void i915_sched_node_fini(struct i915_sched_node *node)
spin_unlock_irq(&schedule_lock);
}
+void i915_request_show_with_schedule(struct drm_printer *m,
+ const struct i915_request *rq,
+ const char *prefix,
+ int indent)
+{
+ struct i915_dependency *dep;
+
+ i915_request_show(m, rq, prefix, indent);
+ if (i915_request_completed(rq))
+ return;
+
+ rcu_read_lock();
+ for_each_signaler(dep, rq) {
+ const struct i915_request *signaler =
+ node_to_request(dep->signaler);
+
+ /* Dependencies along the same timeline are expected. */
+ if (signaler->timeline == rq->timeline)
+ continue;
+
+ if (__i915_request_is_complete(signaler))
+ continue;
+
+ i915_request_show(m, signaler, prefix, indent + 2);
+ }
+ rcu_read_unlock();
+}
+
static void i915_global_scheduler_shrink(void)
{
kmem_cache_shrink(global.slab_dependencies);
diff --git a/drivers/gpu/drm/i915/i915_scheduler.h b/drivers/gpu/drm/i915/i915_scheduler.h
index 6f0bf00fc569..4501e5ac2637 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.h
+++ b/drivers/gpu/drm/i915/i915_scheduler.h
@@ -13,6 +13,8 @@
#include "i915_scheduler_types.h"
+struct drm_printer;
+
#define priolist_for_each_request(it, plist, idx) \
for (idx = 0; idx < ARRAY_SIZE((plist)->requests); idx++) \
list_for_each_entry(it, &(plist)->requests[idx], sched.link)
@@ -54,4 +56,9 @@ static inline void i915_priolist_free(struct i915_priolist *p)
__i915_priolist_free(p);
}
+void i915_request_show_with_schedule(struct drm_printer *m,
+ const struct i915_request *rq,
+ const char *prefix,
+ int indent);
+
#endif /* _I915_SCHEDULER_H_ */
diff --git a/drivers/gpu/drm/i915/i915_scheduler_types.h b/drivers/gpu/drm/i915/i915_scheduler_types.h
index f72e6c397b08..343ed44d5ed4 100644
--- a/drivers/gpu/drm/i915/i915_scheduler_types.h
+++ b/drivers/gpu/drm/i915/i915_scheduler_types.h
@@ -81,4 +81,14 @@ struct i915_dependency {
#define I915_DEPENDENCY_WEAK BIT(2)
};
+#define for_each_waiter(p__, rq__) \
+ list_for_each_entry_lockless(p__, \
+ &(rq__)->sched.waiters_list, \
+ wait_link)
+
+#define for_each_signaler(p__, rq__) \
+ list_for_each_entry_rcu(p__, \
+ &(rq__)->sched.signalers_list, \
+ signal_link)
+
#endif /* _I915_SCHEDULER_TYPES_H_ */
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index db2111fc809e..63212df33c9e 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -24,6 +24,7 @@
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
+#include "display/intel_de.h"
#include "display/intel_fbc.h"
#include "display/intel_gmbus.h"
#include "display/intel_vga.h"
@@ -39,21 +40,21 @@ static void intel_save_swf(struct drm_i915_private *dev_priv)
/* Scratch space */
if (IS_GEN(dev_priv, 2) && IS_MOBILE(dev_priv)) {
for (i = 0; i < 7; i++) {
- dev_priv->regfile.saveSWF0[i] = I915_READ(SWF0(i));
- dev_priv->regfile.saveSWF1[i] = I915_READ(SWF1(i));
+ dev_priv->regfile.saveSWF0[i] = intel_de_read(dev_priv, SWF0(i));
+ dev_priv->regfile.saveSWF1[i] = intel_de_read(dev_priv, SWF1(i));
}
for (i = 0; i < 3; i++)
- dev_priv->regfile.saveSWF3[i] = I915_READ(SWF3(i));
+ dev_priv->regfile.saveSWF3[i] = intel_de_read(dev_priv, SWF3(i));
} else if (IS_GEN(dev_priv, 2)) {
for (i = 0; i < 7; i++)
- dev_priv->regfile.saveSWF1[i] = I915_READ(SWF1(i));
+ dev_priv->regfile.saveSWF1[i] = intel_de_read(dev_priv, SWF1(i));
} else if (HAS_GMCH(dev_priv)) {
for (i = 0; i < 16; i++) {
- dev_priv->regfile.saveSWF0[i] = I915_READ(SWF0(i));
- dev_priv->regfile.saveSWF1[i] = I915_READ(SWF1(i));
+ dev_priv->regfile.saveSWF0[i] = intel_de_read(dev_priv, SWF0(i));
+ dev_priv->regfile.saveSWF1[i] = intel_de_read(dev_priv, SWF1(i));
}
for (i = 0; i < 3; i++)
- dev_priv->regfile.saveSWF3[i] = I915_READ(SWF3(i));
+ dev_priv->regfile.saveSWF3[i] = intel_de_read(dev_priv, SWF3(i));
}
}
@@ -64,21 +65,21 @@ static void intel_restore_swf(struct drm_i915_private *dev_priv)
/* Scratch space */
if (IS_GEN(dev_priv, 2) && IS_MOBILE(dev_priv)) {
for (i = 0; i < 7; i++) {
- I915_WRITE(SWF0(i), dev_priv->regfile.saveSWF0[i]);
- I915_WRITE(SWF1(i), dev_priv->regfile.saveSWF1[i]);
+ intel_de_write(dev_priv, SWF0(i), dev_priv->regfile.saveSWF0[i]);
+ intel_de_write(dev_priv, SWF1(i), dev_priv->regfile.saveSWF1[i]);
}
for (i = 0; i < 3; i++)
- I915_WRITE(SWF3(i), dev_priv->regfile.saveSWF3[i]);
+ intel_de_write(dev_priv, SWF3(i), dev_priv->regfile.saveSWF3[i]);
} else if (IS_GEN(dev_priv, 2)) {
for (i = 0; i < 7; i++)
- I915_WRITE(SWF1(i), dev_priv->regfile.saveSWF1[i]);
+ intel_de_write(dev_priv, SWF1(i), dev_priv->regfile.saveSWF1[i]);
} else if (HAS_GMCH(dev_priv)) {
for (i = 0; i < 16; i++) {
- I915_WRITE(SWF0(i), dev_priv->regfile.saveSWF0[i]);
- I915_WRITE(SWF1(i), dev_priv->regfile.saveSWF1[i]);
+ intel_de_write(dev_priv, SWF0(i), dev_priv->regfile.saveSWF0[i]);
+ intel_de_write(dev_priv, SWF1(i), dev_priv->regfile.saveSWF1[i]);
}
for (i = 0; i < 3; i++)
- I915_WRITE(SWF3(i), dev_priv->regfile.saveSWF3[i]);
+ intel_de_write(dev_priv, SWF3(i), dev_priv->regfile.saveSWF3[i]);
}
}
@@ -88,7 +89,7 @@ void i915_save_display(struct drm_i915_private *dev_priv)
/* Display arbitration control */
if (INTEL_GEN(dev_priv) <= 4)
- dev_priv->regfile.saveDSPARB = I915_READ(DSPARB);
+ dev_priv->regfile.saveDSPARB = intel_de_read(dev_priv, DSPARB);
if (IS_GEN(dev_priv, 4))
pci_read_config_word(pdev, GCDGMBUS,
@@ -109,7 +110,7 @@ void i915_restore_display(struct drm_i915_private *dev_priv)
/* Display arbitration */
if (INTEL_GEN(dev_priv) <= 4)
- I915_WRITE(DSPARB, dev_priv->regfile.saveDSPARB);
+ intel_de_write(dev_priv, DSPARB, dev_priv->regfile.saveDSPARB);
/* only restore FBC info on the platform that supports FBC*/
intel_fbc_global_disable(dev_priv);
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c
index 038d4c6884c5..2744558f3050 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence.c
+++ b/drivers/gpu/drm/i915/i915_sw_fence.c
@@ -18,10 +18,15 @@
#define I915_SW_FENCE_BUG_ON(expr) BUILD_BUG_ON_INVALID(expr)
#endif
-#define I915_SW_FENCE_FLAG_ALLOC BIT(3) /* after WQ_FLAG_* for safety */
-
static DEFINE_SPINLOCK(i915_sw_fence_lock);
+#define WQ_FLAG_BITS \
+ BITS_PER_TYPE(typeof_member(struct wait_queue_entry, flags))
+
+/* after WQ_FLAG_* for safety */
+#define I915_SW_FENCE_FLAG_FENCE BIT(WQ_FLAG_BITS - 1)
+#define I915_SW_FENCE_FLAG_ALLOC BIT(WQ_FLAG_BITS - 2)
+
enum {
DEBUG_FENCE_IDLE = 0,
DEBUG_FENCE_NOTIFY,
@@ -154,10 +159,10 @@ static void __i915_sw_fence_wake_up_all(struct i915_sw_fence *fence,
spin_lock_irqsave_nested(&x->lock, flags, 1 + !!continuation);
if (continuation) {
list_for_each_entry_safe(pos, next, &x->head, entry) {
- if (pos->func == autoremove_wake_function)
- pos->func(pos, TASK_NORMAL, 0, continuation);
- else
+ if (pos->flags & I915_SW_FENCE_FLAG_FENCE)
list_move_tail(&pos->entry, continuation);
+ else
+ pos->func(pos, TASK_NORMAL, 0, continuation);
}
} else {
LIST_HEAD(extra);
@@ -166,9 +171,9 @@ static void __i915_sw_fence_wake_up_all(struct i915_sw_fence *fence,
list_for_each_entry_safe(pos, next, &x->head, entry) {
int wake_flags;
- wake_flags = fence->error;
- if (pos->func == autoremove_wake_function)
- wake_flags = 0;
+ wake_flags = 0;
+ if (pos->flags & I915_SW_FENCE_FLAG_FENCE)
+ wake_flags = fence->error;
pos->func(pos, TASK_NORMAL, wake_flags, &extra);
}
@@ -332,8 +337,8 @@ static int __i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence,
struct i915_sw_fence *signaler,
wait_queue_entry_t *wq, gfp_t gfp)
{
+ unsigned int pending;
unsigned long flags;
- int pending;
debug_fence_assert(fence);
might_sleep_if(gfpflags_allow_blocking(gfp));
@@ -349,7 +354,7 @@ static int __i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence,
if (unlikely(i915_sw_fence_check_if_after(fence, signaler)))
return -EINVAL;
- pending = 0;
+ pending = I915_SW_FENCE_FLAG_FENCE;
if (!wq) {
wq = kmalloc(sizeof(*wq), gfp);
if (!wq) {
diff --git a/drivers/gpu/drm/i915/i915_utils.c b/drivers/gpu/drm/i915/i915_utils.c
index 4c305d838016..f9e780dee9de 100644
--- a/drivers/gpu/drm/i915/i915_utils.c
+++ b/drivers/gpu/drm/i915/i915_utils.c
@@ -87,7 +87,7 @@ bool i915_error_injected(void)
void cancel_timer(struct timer_list *t)
{
- if (!READ_ONCE(t->expires))
+ if (!timer_active(t))
return;
del_timer(t);
diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h
index 54773371e6bd..abd4dcd9f79c 100644
--- a/drivers/gpu/drm/i915/i915_utils.h
+++ b/drivers/gpu/drm/i915/i915_utils.h
@@ -438,9 +438,14 @@ static inline void __add_taint_for_CI(unsigned int taint)
void cancel_timer(struct timer_list *t);
void set_timer_ms(struct timer_list *t, unsigned long timeout);
+static inline bool timer_active(const struct timer_list *t)
+{
+ return READ_ONCE(t->expires);
+}
+
static inline bool timer_expired(const struct timer_list *t)
{
- return READ_ONCE(t->expires) && !timer_pending(t);
+ return timer_active(t) && !timer_pending(t);
}
/*
diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h
index 5b3a3c653454..a64adc8c883b 100644
--- a/drivers/gpu/drm/i915/i915_vma.h
+++ b/drivers/gpu/drm/i915/i915_vma.h
@@ -363,6 +363,21 @@ i915_vma_unpin_fence(struct i915_vma *vma)
void i915_vma_parked(struct intel_gt *gt);
+static inline bool i915_vma_is_scanout(const struct i915_vma *vma)
+{
+ return test_bit(I915_VMA_SCANOUT_BIT, __i915_vma_flags(vma));
+}
+
+static inline void i915_vma_mark_scanout(struct i915_vma *vma)
+{
+ set_bit(I915_VMA_SCANOUT_BIT, __i915_vma_flags(vma));
+}
+
+static inline void i915_vma_clear_scanout(struct i915_vma *vma)
+{
+ clear_bit(I915_VMA_SCANOUT_BIT, __i915_vma_flags(vma));
+}
+
#define for_each_until(cond) if (cond) break; else
/**
diff --git a/drivers/gpu/drm/i915/i915_vma_types.h b/drivers/gpu/drm/i915/i915_vma_types.h
index 9e9082dc8f4b..f5cb848b7a7e 100644
--- a/drivers/gpu/drm/i915/i915_vma_types.h
+++ b/drivers/gpu/drm/i915/i915_vma_types.h
@@ -249,6 +249,9 @@ struct i915_vma {
#define I915_VMA_USERFAULT ((int)BIT(I915_VMA_USERFAULT_BIT))
#define I915_VMA_GGTT_WRITE ((int)BIT(I915_VMA_GGTT_WRITE_BIT))
+#define I915_VMA_SCANOUT_BIT 18
+#define I915_VMA_SCANOUT ((int)BIT(I915_VMA_SCANOUT_BIT))
+
struct i915_active active;
#define I915_VMA_PAGES_BIAS 24
diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c
index e67cec8fa2aa..f2d5ae59081e 100644
--- a/drivers/gpu/drm/i915/intel_device_info.c
+++ b/drivers/gpu/drm/i915/intel_device_info.c
@@ -104,7 +104,7 @@ void intel_device_info_print_static(const struct intel_device_info *info,
drm_printf(p, "ppgtt-type: %d\n", info->ppgtt_type);
drm_printf(p, "dma_mask_size: %u\n", info->dma_mask_size);
-#define PRINT_FLAG(name) drm_printf(p, "%s: %s\n", #name, yesno(info->name));
+#define PRINT_FLAG(name) drm_printf(p, "%s: %s\n", #name, yesno(info->name))
DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG);
#undef PRINT_FLAG
@@ -117,150 +117,6 @@ void intel_device_info_print_runtime(const struct intel_runtime_info *info,
struct drm_printer *p)
{
drm_printf(p, "rawclk rate: %u kHz\n", info->rawclk_freq);
- drm_printf(p, "CS timestamp frequency: %u Hz\n",
- info->cs_timestamp_frequency_hz);
-}
-
-static u32 read_reference_ts_freq(struct drm_i915_private *dev_priv)
-{
- u32 ts_override = intel_uncore_read(&dev_priv->uncore,
- GEN9_TIMESTAMP_OVERRIDE);
- u32 base_freq, frac_freq;
-
- base_freq = ((ts_override & GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DIVIDER_MASK) >>
- GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DIVIDER_SHIFT) + 1;
- base_freq *= 1000000;
-
- frac_freq = ((ts_override &
- GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DENOMINATOR_MASK) >>
- GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DENOMINATOR_SHIFT);
- frac_freq = 1000000 / (frac_freq + 1);
-
- return base_freq + frac_freq;
-}
-
-static u32 gen10_get_crystal_clock_freq(struct drm_i915_private *dev_priv,
- u32 rpm_config_reg)
-{
- u32 f19_2_mhz = 19200000;
- u32 f24_mhz = 24000000;
- u32 crystal_clock = (rpm_config_reg &
- GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK) >>
- GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT;
-
- switch (crystal_clock) {
- case GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_19_2_MHZ:
- return f19_2_mhz;
- case GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_24_MHZ:
- return f24_mhz;
- default:
- MISSING_CASE(crystal_clock);
- return 0;
- }
-}
-
-static u32 gen11_get_crystal_clock_freq(struct drm_i915_private *dev_priv,
- u32 rpm_config_reg)
-{
- u32 f19_2_mhz = 19200000;
- u32 f24_mhz = 24000000;
- u32 f25_mhz = 25000000;
- u32 f38_4_mhz = 38400000;
- u32 crystal_clock = (rpm_config_reg &
- GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK) >>
- GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT;
-
- switch (crystal_clock) {
- case GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_24_MHZ:
- return f24_mhz;
- case GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_19_2_MHZ:
- return f19_2_mhz;
- case GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_38_4_MHZ:
- return f38_4_mhz;
- case GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_25_MHZ:
- return f25_mhz;
- default:
- MISSING_CASE(crystal_clock);
- return 0;
- }
-}
-
-static u32 read_timestamp_frequency(struct drm_i915_private *dev_priv)
-{
- struct intel_uncore *uncore = &dev_priv->uncore;
- u32 f12_5_mhz = 12500000;
- u32 f19_2_mhz = 19200000;
- u32 f24_mhz = 24000000;
-
- if (INTEL_GEN(dev_priv) <= 4) {
- /* PRMs say:
- *
- * "The value in this register increments once every 16
- * hclks." (through the “Clocking Configuration”
- * (“CLKCFG”) MCHBAR register)
- */
- return RUNTIME_INFO(dev_priv)->rawclk_freq * 1000 / 16;
- } else if (INTEL_GEN(dev_priv) <= 8) {
- /* PRMs say:
- *
- * "The PCU TSC counts 10ns increments; this timestamp
- * reflects bits 38:3 of the TSC (i.e. 80ns granularity,
- * rolling over every 1.5 hours).
- */
- return f12_5_mhz;
- } else if (INTEL_GEN(dev_priv) <= 9) {
- u32 ctc_reg = intel_uncore_read(uncore, CTC_MODE);
- u32 freq = 0;
-
- if ((ctc_reg & CTC_SOURCE_PARAMETER_MASK) == CTC_SOURCE_DIVIDE_LOGIC) {
- freq = read_reference_ts_freq(dev_priv);
- } else {
- freq = IS_GEN9_LP(dev_priv) ? f19_2_mhz : f24_mhz;
-
- /* Now figure out how the command stream's timestamp
- * register increments from this frequency (it might
- * increment only every few clock cycle).
- */
- freq >>= 3 - ((ctc_reg & CTC_SHIFT_PARAMETER_MASK) >>
- CTC_SHIFT_PARAMETER_SHIFT);
- }
-
- return freq;
- } else if (INTEL_GEN(dev_priv) <= 12) {
- u32 ctc_reg = intel_uncore_read(uncore, CTC_MODE);
- u32 freq = 0;
-
- /* First figure out the reference frequency. There are 2 ways
- * we can compute the frequency, either through the
- * TIMESTAMP_OVERRIDE register or through RPM_CONFIG. CTC_MODE
- * tells us which one we should use.
- */
- if ((ctc_reg & CTC_SOURCE_PARAMETER_MASK) == CTC_SOURCE_DIVIDE_LOGIC) {
- freq = read_reference_ts_freq(dev_priv);
- } else {
- u32 rpm_config_reg = intel_uncore_read(uncore, RPM_CONFIG0);
-
- if (INTEL_GEN(dev_priv) <= 10)
- freq = gen10_get_crystal_clock_freq(dev_priv,
- rpm_config_reg);
- else
- freq = gen11_get_crystal_clock_freq(dev_priv,
- rpm_config_reg);
-
- /* Now figure out how the command stream's timestamp
- * register increments from this frequency (it might
- * increment only every few clock cycle).
- */
- freq >>= 3 - ((rpm_config_reg &
- GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_MASK) >>
- GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_SHIFT);
- }
-
- return freq;
- }
-
- MISSING_CASE("Unknown gen, unable to read command streamer timestamp frequency\n");
- return 0;
}
#undef INTEL_VGA_DEVICE
@@ -505,19 +361,6 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
runtime->rawclk_freq = intel_read_rawclk(dev_priv);
drm_dbg(&dev_priv->drm, "rawclk rate: %d kHz\n", runtime->rawclk_freq);
- /* Initialize command stream timestamp frequency */
- runtime->cs_timestamp_frequency_hz =
- read_timestamp_frequency(dev_priv);
- if (runtime->cs_timestamp_frequency_hz) {
- runtime->cs_timestamp_period_ns =
- i915_cs_timestamp_ticks_to_ns(dev_priv, 1);
- drm_dbg(&dev_priv->drm,
- "CS timestamp wraparound in %lldms\n",
- div_u64(mul_u32_u32(runtime->cs_timestamp_period_ns,
- S32_MAX),
- USEC_PER_SEC));
- }
-
if (!HAS_DISPLAY(dev_priv)) {
dev_priv->drm.driver_features &= ~(DRIVER_MODESET |
DRIVER_ATOMIC);
diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h
index d92fa041c700..cf2d528c6e9b 100644
--- a/drivers/gpu/drm/i915/intel_device_info.h
+++ b/drivers/gpu/drm/i915/intel_device_info.h
@@ -123,7 +123,6 @@ enum intel_ppgtt_type {
func(has_llc); \
func(has_logical_ring_contexts); \
func(has_logical_ring_elsq); \
- func(has_logical_ring_preemption); \
func(has_master_unit_irq); \
func(has_pooled_eu); \
func(has_rc6); \
@@ -224,9 +223,6 @@ struct intel_runtime_info {
u8 num_scalers[I915_MAX_PIPES];
u32 rawclk_freq;
-
- u32 cs_timestamp_frequency_hz;
- u32 cs_timestamp_period_ns;
};
struct intel_driver_caps {
diff --git a/drivers/gpu/drm/i915/intel_dram.c b/drivers/gpu/drm/i915/intel_dram.c
index 4754296a250e..73d256fc6830 100644
--- a/drivers/gpu/drm/i915/intel_dram.c
+++ b/drivers/gpu/drm/i915/intel_dram.c
@@ -5,6 +5,7 @@
#include "i915_drv.h"
#include "intel_dram.h"
+#include "intel_sideband.h"
struct dram_dimm_info {
u16 size;
@@ -201,22 +202,12 @@ skl_dram_get_channels_info(struct drm_i915_private *i915)
return -EINVAL;
}
- /*
- * If any of the channel is single rank channel, worst case output
- * will be same as if single rank memory, so consider single rank
- * memory.
- */
- if (ch0.ranks == 1 || ch1.ranks == 1)
- dram_info->ranks = 1;
- else
- dram_info->ranks = max(ch0.ranks, ch1.ranks);
-
- if (dram_info->ranks == 0) {
+ if (ch0.ranks == 0 && ch1.ranks == 0) {
drm_info(&i915->drm, "couldn't get memory rank information\n");
return -EINVAL;
}
- dram_info->is_16gb_dimm = ch0.is_16gb_dimm || ch1.is_16gb_dimm;
+ dram_info->wm_lv_0_adjust_needed = ch0.is_16gb_dimm || ch1.is_16gb_dimm;
dram_info->symmetric_memory = intel_is_dram_symmetric(&ch0, &ch1);
@@ -269,16 +260,12 @@ skl_get_dram_info(struct drm_i915_private *i915)
mem_freq_khz = DIV_ROUND_UP((val & SKL_REQ_DATA_MASK) *
SKL_MEMORY_FREQ_MULTIPLIER_HZ, 1000);
- dram_info->bandwidth_kbps = dram_info->num_channels *
- mem_freq_khz * 8;
-
- if (dram_info->bandwidth_kbps == 0) {
+ if (dram_info->num_channels * mem_freq_khz == 0) {
drm_info(&i915->drm,
"Couldn't get system memory bandwidth\n");
return -EINVAL;
}
- dram_info->valid = true;
return 0;
}
@@ -365,7 +352,7 @@ static int bxt_get_dram_info(struct drm_i915_private *i915)
struct dram_info *dram_info = &i915->dram_info;
u32 dram_channels;
u32 mem_freq_khz, val;
- u8 num_active_channels;
+ u8 num_active_channels, valid_ranks = 0;
int i;
val = intel_uncore_read(&i915->uncore, BXT_P_CR_MC_BIOS_REQ_0_0_0);
@@ -375,10 +362,7 @@ static int bxt_get_dram_info(struct drm_i915_private *i915)
dram_channels = val & BXT_DRAM_CHANNEL_ACTIVE_MASK;
num_active_channels = hweight32(dram_channels);
- /* Each active bit represents 4-byte channel */
- dram_info->bandwidth_kbps = (mem_freq_khz * num_active_channels * 4);
-
- if (dram_info->bandwidth_kbps == 0) {
+ if (mem_freq_khz * num_active_channels == 0) {
drm_info(&i915->drm,
"Couldn't get system memory bandwidth\n");
return -EINVAL;
@@ -410,57 +394,125 @@ static int bxt_get_dram_info(struct drm_i915_private *i915)
dimm.size, dimm.width, dimm.ranks,
intel_dram_type_str(type));
- /*
- * If any of the channel is single rank channel,
- * worst case output will be same as if single rank
- * memory, so consider single rank memory.
- */
- if (dram_info->ranks == 0)
- dram_info->ranks = dimm.ranks;
- else if (dimm.ranks == 1)
- dram_info->ranks = 1;
+ if (valid_ranks == 0)
+ valid_ranks = dimm.ranks;
if (type != INTEL_DRAM_UNKNOWN)
dram_info->type = type;
}
- if (dram_info->type == INTEL_DRAM_UNKNOWN || dram_info->ranks == 0) {
+ if (dram_info->type == INTEL_DRAM_UNKNOWN || valid_ranks == 0) {
drm_info(&i915->drm, "couldn't get memory information\n");
return -EINVAL;
}
- dram_info->valid = true;
+ return 0;
+}
+
+static int icl_pcode_read_mem_global_info(struct drm_i915_private *dev_priv)
+{
+ struct dram_info *dram_info = &dev_priv->dram_info;
+ u32 val = 0;
+ int ret;
+
+ ret = sandybridge_pcode_read(dev_priv,
+ ICL_PCODE_MEM_SUBSYSYSTEM_INFO |
+ ICL_PCODE_MEM_SS_READ_GLOBAL_INFO,
+ &val, NULL);
+ if (ret)
+ return ret;
+
+ if (IS_GEN(dev_priv, 12)) {
+ switch (val & 0xf) {
+ case 0:
+ dram_info->type = INTEL_DRAM_DDR4;
+ break;
+ case 3:
+ dram_info->type = INTEL_DRAM_LPDDR4;
+ break;
+ case 4:
+ dram_info->type = INTEL_DRAM_DDR3;
+ break;
+ case 5:
+ dram_info->type = INTEL_DRAM_LPDDR3;
+ break;
+ default:
+ MISSING_CASE(val & 0xf);
+ return -1;
+ }
+ } else {
+ switch (val & 0xf) {
+ case 0:
+ dram_info->type = INTEL_DRAM_DDR4;
+ break;
+ case 1:
+ dram_info->type = INTEL_DRAM_DDR3;
+ break;
+ case 2:
+ dram_info->type = INTEL_DRAM_LPDDR3;
+ break;
+ case 3:
+ dram_info->type = INTEL_DRAM_LPDDR4;
+ break;
+ default:
+ MISSING_CASE(val & 0xf);
+ return -1;
+ }
+ }
+
+ dram_info->num_channels = (val & 0xf0) >> 4;
+ dram_info->num_qgv_points = (val & 0xf00) >> 8;
return 0;
}
+static int gen11_get_dram_info(struct drm_i915_private *i915)
+{
+ int ret = skl_get_dram_info(i915);
+
+ if (ret)
+ return ret;
+
+ return icl_pcode_read_mem_global_info(i915);
+}
+
+static int gen12_get_dram_info(struct drm_i915_private *i915)
+{
+ /* Always needed for GEN12+ */
+ i915->dram_info.wm_lv_0_adjust_needed = true;
+
+ return icl_pcode_read_mem_global_info(i915);
+}
+
void intel_dram_detect(struct drm_i915_private *i915)
{
struct dram_info *dram_info = &i915->dram_info;
int ret;
/*
- * Assume 16Gb DIMMs are present until proven otherwise.
- * This is only used for the level 0 watermark latency
- * w/a which does not apply to bxt/glk.
+ * Assume level 0 watermark latency adjustment is needed until proven
+ * otherwise, this w/a is not needed by bxt/glk.
*/
- dram_info->is_16gb_dimm = !IS_GEN9_LP(i915);
+ dram_info->wm_lv_0_adjust_needed = !IS_GEN9_LP(i915);
if (INTEL_GEN(i915) < 9 || !HAS_DISPLAY(i915))
return;
- if (IS_GEN9_LP(i915))
+ if (INTEL_GEN(i915) >= 12)
+ ret = gen12_get_dram_info(i915);
+ else if (INTEL_GEN(i915) >= 11)
+ ret = gen11_get_dram_info(i915);
+ else if (IS_GEN9_LP(i915))
ret = bxt_get_dram_info(i915);
else
ret = skl_get_dram_info(i915);
if (ret)
return;
- drm_dbg_kms(&i915->drm, "DRAM bandwidth: %u kBps, channels: %u\n",
- dram_info->bandwidth_kbps, dram_info->num_channels);
+ drm_dbg_kms(&i915->drm, "DRAM channels: %u\n", dram_info->num_channels);
- drm_dbg_kms(&i915->drm, "DRAM ranks: %u, 16Gb DIMMs: %s\n",
- dram_info->ranks, yesno(dram_info->is_16gb_dimm));
+ drm_dbg_kms(&i915->drm, "Watermark level 0 adjustment needed: %s\n",
+ yesno(dram_info->wm_lv_0_adjust_needed));
}
static u32 gen9_edram_size_mb(struct drm_i915_private *i915, u32 cap)
diff --git a/drivers/gpu/drm/i915/intel_memory_region.c b/drivers/gpu/drm/i915/intel_memory_region.c
index b326993a1026..1bfcdd89b241 100644
--- a/drivers/gpu/drm/i915/intel_memory_region.c
+++ b/drivers/gpu/drm/i915/intel_memory_region.c
@@ -10,7 +10,7 @@
#define REGION_MAP(type, inst) \
BIT((type) + INTEL_MEMORY_TYPE_SHIFT) | BIT(inst)
-const u32 intel_region_map[] = {
+static const u32 intel_region_map[] = {
[INTEL_REGION_SMEM] = REGION_MAP(INTEL_MEMORY_SYSTEM, 0),
[INTEL_REGION_LMEM] = REGION_MAP(INTEL_MEMORY_LOCAL, 0),
[INTEL_REGION_STOLEN] = REGION_MAP(INTEL_MEMORY_STOLEN, 0),
diff --git a/drivers/gpu/drm/i915/intel_memory_region.h b/drivers/gpu/drm/i915/intel_memory_region.h
index 232490d89a83..6ffc0673f005 100644
--- a/drivers/gpu/drm/i915/intel_memory_region.h
+++ b/drivers/gpu/drm/i915/intel_memory_region.h
@@ -51,21 +51,16 @@ enum intel_region_id {
for (id = 0; id < ARRAY_SIZE((i915)->mm.regions); id++) \
for_each_if((mr) = (i915)->mm.regions[id])
-/**
- * Memory regions encoded as type | instance
- */
-extern const u32 intel_region_map[];
-
struct intel_memory_region_ops {
unsigned int flags;
int (*init)(struct intel_memory_region *mem);
void (*release)(struct intel_memory_region *mem);
- struct drm_i915_gem_object *
- (*create_object)(struct intel_memory_region *mem,
- resource_size_t size,
- unsigned int flags);
+ int (*init_object)(struct intel_memory_region *mem,
+ struct drm_i915_gem_object *obj,
+ resource_size_t size,
+ unsigned int flags);
};
struct intel_memory_region {
diff --git a/drivers/gpu/drm/i915/intel_pch.c b/drivers/gpu/drm/i915/intel_pch.c
index f31c0dabd0cc..ecaf314d60b6 100644
--- a/drivers/gpu/drm/i915/intel_pch.c
+++ b/drivers/gpu/drm/i915/intel_pch.c
@@ -143,8 +143,9 @@ static bool intel_is_virt_pch(unsigned short id,
sdevice == PCI_SUBDEVICE_ID_QEMU));
}
-static unsigned short
-intel_virt_detect_pch(const struct drm_i915_private *dev_priv)
+static void
+intel_virt_detect_pch(const struct drm_i915_private *dev_priv,
+ unsigned short *pch_id, enum intel_pch *pch_type)
{
unsigned short id = 0;
@@ -181,12 +182,21 @@ intel_virt_detect_pch(const struct drm_i915_private *dev_priv)
else
drm_dbg_kms(&dev_priv->drm, "Assuming no PCH\n");
- return id;
+ *pch_type = intel_pch_type(dev_priv, id);
+
+ /* Sanity check virtual PCH id */
+ if (drm_WARN_ON(&dev_priv->drm,
+ id && *pch_type == PCH_NONE))
+ id = 0;
+
+ *pch_id = id;
}
void intel_detect_pch(struct drm_i915_private *dev_priv)
{
struct pci_dev *pch = NULL;
+ unsigned short id;
+ enum intel_pch pch_type;
/* DG1 has south engine display on the same PCI device */
if (IS_DG1(dev_priv)) {
@@ -206,9 +216,6 @@ void intel_detect_pch(struct drm_i915_private *dev_priv)
* of only checking the first one.
*/
while ((pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, pch))) {
- unsigned short id;
- enum intel_pch pch_type;
-
if (pch->vendor != PCI_VENDOR_ID_INTEL)
continue;
@@ -221,14 +228,7 @@ void intel_detect_pch(struct drm_i915_private *dev_priv)
break;
} else if (intel_is_virt_pch(id, pch->subsystem_vendor,
pch->subsystem_device)) {
- id = intel_virt_detect_pch(dev_priv);
- pch_type = intel_pch_type(dev_priv, id);
-
- /* Sanity check virtual PCH id */
- if (drm_WARN_ON(&dev_priv->drm,
- id && pch_type == PCH_NONE))
- id = 0;
-
+ intel_virt_detect_pch(dev_priv, &id, &pch_type);
dev_priv->pch_type = pch_type;
dev_priv->pch_id = id;
break;
@@ -244,10 +244,15 @@ void intel_detect_pch(struct drm_i915_private *dev_priv)
"Display disabled, reverting to NOP PCH\n");
dev_priv->pch_type = PCH_NOP;
dev_priv->pch_id = 0;
+ } else if (!pch) {
+ if (run_as_guest() && HAS_DISPLAY(dev_priv)) {
+ intel_virt_detect_pch(dev_priv, &id, &pch_type);
+ dev_priv->pch_type = pch_type;
+ dev_priv->pch_id = id;
+ } else {
+ drm_dbg_kms(&dev_priv->drm, "No PCH found.\n");
+ }
}
- if (!pch)
- drm_dbg_kms(&dev_priv->drm, "No PCH found.\n");
-
pci_dev_put(pch);
}
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index a20b5051f18c..0c3e63f27c29 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -82,24 +82,24 @@ static void gen9_init_clock_gating(struct drm_i915_private *dev_priv)
* Must match Sampler, Pixel Back End, and Media. See
* WaCompressedResourceSamplerPbeMediaNewHashMode.
*/
- I915_WRITE(CHICKEN_PAR1_1,
- I915_READ(CHICKEN_PAR1_1) |
+ intel_uncore_write(&dev_priv->uncore, CHICKEN_PAR1_1,
+ intel_uncore_read(&dev_priv->uncore, CHICKEN_PAR1_1) |
SKL_DE_COMPRESSED_HASH_MODE);
}
/* See Bspec note for PSR2_CTL bit 31, Wa#828:skl,bxt,kbl,cfl */
- I915_WRITE(CHICKEN_PAR1_1,
- I915_READ(CHICKEN_PAR1_1) | SKL_EDP_PSR_FIX_RDWRAP);
+ intel_uncore_write(&dev_priv->uncore, CHICKEN_PAR1_1,
+ intel_uncore_read(&dev_priv->uncore, CHICKEN_PAR1_1) | SKL_EDP_PSR_FIX_RDWRAP);
/* WaEnableChickenDCPR:skl,bxt,kbl,glk,cfl */
- I915_WRITE(GEN8_CHICKEN_DCPR_1,
- I915_READ(GEN8_CHICKEN_DCPR_1) | MASK_WAKEMEM);
+ intel_uncore_write(&dev_priv->uncore, GEN8_CHICKEN_DCPR_1,
+ intel_uncore_read(&dev_priv->uncore, GEN8_CHICKEN_DCPR_1) | MASK_WAKEMEM);
/*
* WaFbcWakeMemOn:skl,bxt,kbl,glk,cfl
* Display WA #0859: skl,bxt,kbl,glk,cfl
*/
- I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
+ intel_uncore_write(&dev_priv->uncore, DISP_ARB_CTL, intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL) |
DISP_FBC_MEMORY_WAKE);
}
@@ -108,21 +108,21 @@ static void bxt_init_clock_gating(struct drm_i915_private *dev_priv)
gen9_init_clock_gating(dev_priv);
/* WaDisableSDEUnitClockGating:bxt */
- I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
+ intel_uncore_write(&dev_priv->uncore, GEN8_UCGCTL6, intel_uncore_read(&dev_priv->uncore, GEN8_UCGCTL6) |
GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
/*
* FIXME:
* GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ applies on 3x6 GT SKUs only.
*/
- I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
+ intel_uncore_write(&dev_priv->uncore, GEN8_UCGCTL6, intel_uncore_read(&dev_priv->uncore, GEN8_UCGCTL6) |
GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ);
/*
* Wa: Backlight PWM may stop in the asserted state, causing backlight
* to stay fully on.
*/
- I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) |
+ intel_uncore_write(&dev_priv->uncore, GEN9_CLKGATE_DIS_0, intel_uncore_read(&dev_priv->uncore, GEN9_CLKGATE_DIS_0) |
PWM1_GATING_DIS | PWM2_GATING_DIS);
/*
@@ -131,20 +131,20 @@ static void bxt_init_clock_gating(struct drm_i915_private *dev_priv)
* is off and a MMIO access is attempted by any privilege
* application, using batch buffers or any other means.
*/
- I915_WRITE(RM_TIMEOUT, MMIO_TIMEOUT_US(950));
+ intel_uncore_write(&dev_priv->uncore, RM_TIMEOUT, MMIO_TIMEOUT_US(950));
/*
* WaFbcTurnOffFbcWatermark:bxt
* Display WA #0562: bxt
*/
- I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
+ intel_uncore_write(&dev_priv->uncore, DISP_ARB_CTL, intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL) |
DISP_FBC_WM_DIS);
/*
* WaFbcHighMemBwCorruptionAvoidance:bxt
* Display WA #0883: bxt
*/
- I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
+ intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN, intel_uncore_read(&dev_priv->uncore, ILK_DPFC_CHICKEN) |
ILK_DPFC_DISABLE_DUMMY0);
}
@@ -157,7 +157,7 @@ static void glk_init_clock_gating(struct drm_i915_private *dev_priv)
* Backlight PWM may stop in the asserted state, causing backlight
* to stay fully on.
*/
- I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) |
+ intel_uncore_write(&dev_priv->uncore, GEN9_CLKGATE_DIS_0, intel_uncore_read(&dev_priv->uncore, GEN9_CLKGATE_DIS_0) |
PWM1_GATING_DIS | PWM2_GATING_DIS);
}
@@ -165,7 +165,7 @@ static void pnv_get_mem_freq(struct drm_i915_private *dev_priv)
{
u32 tmp;
- tmp = I915_READ(CLKCFG);
+ tmp = intel_uncore_read(&dev_priv->uncore, CLKCFG);
switch (tmp & CLKCFG_FSB_MASK) {
case CLKCFG_FSB_533:
@@ -195,7 +195,7 @@ static void pnv_get_mem_freq(struct drm_i915_private *dev_priv)
}
/* detect pineview DDR3 setting */
- tmp = I915_READ(CSHRDDR3CTL);
+ tmp = intel_uncore_read(&dev_priv->uncore, CSHRDDR3CTL);
dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0;
}
@@ -366,39 +366,39 @@ static bool _intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enabl
u32 val;
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
- was_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
- I915_WRITE(FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0);
- POSTING_READ(FW_BLC_SELF_VLV);
+ was_enabled = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
+ intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0);
+ intel_uncore_posting_read(&dev_priv->uncore, FW_BLC_SELF_VLV);
} else if (IS_G4X(dev_priv) || IS_I965GM(dev_priv)) {
- was_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
- I915_WRITE(FW_BLC_SELF, enable ? FW_BLC_SELF_EN : 0);
- POSTING_READ(FW_BLC_SELF);
+ was_enabled = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF) & FW_BLC_SELF_EN;
+ intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF, enable ? FW_BLC_SELF_EN : 0);
+ intel_uncore_posting_read(&dev_priv->uncore, FW_BLC_SELF);
} else if (IS_PINEVIEW(dev_priv)) {
- val = I915_READ(DSPFW3);
+ val = intel_uncore_read(&dev_priv->uncore, DSPFW3);
was_enabled = val & PINEVIEW_SELF_REFRESH_EN;
if (enable)
val |= PINEVIEW_SELF_REFRESH_EN;
else
val &= ~PINEVIEW_SELF_REFRESH_EN;
- I915_WRITE(DSPFW3, val);
- POSTING_READ(DSPFW3);
+ intel_uncore_write(&dev_priv->uncore, DSPFW3, val);
+ intel_uncore_posting_read(&dev_priv->uncore, DSPFW3);
} else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv)) {
- was_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
+ was_enabled = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF) & FW_BLC_SELF_EN;
val = enable ? _MASKED_BIT_ENABLE(FW_BLC_SELF_EN) :
_MASKED_BIT_DISABLE(FW_BLC_SELF_EN);
- I915_WRITE(FW_BLC_SELF, val);
- POSTING_READ(FW_BLC_SELF);
+ intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF, val);
+ intel_uncore_posting_read(&dev_priv->uncore, FW_BLC_SELF);
} else if (IS_I915GM(dev_priv)) {
/*
* FIXME can't find a bit like this for 915G, and
* and yet it does have the related watermark in
* FW_BLC_SELF. What's going on?
*/
- was_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
+ was_enabled = intel_uncore_read(&dev_priv->uncore, INSTPM) & INSTPM_SELF_EN;
val = enable ? _MASKED_BIT_ENABLE(INSTPM_SELF_EN) :
_MASKED_BIT_DISABLE(INSTPM_SELF_EN);
- I915_WRITE(INSTPM, val);
- POSTING_READ(INSTPM);
+ intel_uncore_write(&dev_priv->uncore, INSTPM, val);
+ intel_uncore_posting_read(&dev_priv->uncore, INSTPM);
} else {
return false;
}
@@ -494,20 +494,20 @@ static void vlv_get_fifo_size(struct intel_crtc_state *crtc_state)
switch (pipe) {
case PIPE_A:
- dsparb = I915_READ(DSPARB);
- dsparb2 = I915_READ(DSPARB2);
+ dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB);
+ dsparb2 = intel_uncore_read(&dev_priv->uncore, DSPARB2);
sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 0, 0);
sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 8, 4);
break;
case PIPE_B:
- dsparb = I915_READ(DSPARB);
- dsparb2 = I915_READ(DSPARB2);
+ dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB);
+ dsparb2 = intel_uncore_read(&dev_priv->uncore, DSPARB2);
sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 16, 8);
sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 24, 12);
break;
case PIPE_C:
- dsparb2 = I915_READ(DSPARB2);
- dsparb3 = I915_READ(DSPARB3);
+ dsparb2 = intel_uncore_read(&dev_priv->uncore, DSPARB2);
+ dsparb3 = intel_uncore_read(&dev_priv->uncore, DSPARB3);
sprite0_start = VLV_FIFO_START(dsparb3, dsparb2, 0, 16);
sprite1_start = VLV_FIFO_START(dsparb3, dsparb2, 8, 20);
break;
@@ -525,7 +525,7 @@ static void vlv_get_fifo_size(struct intel_crtc_state *crtc_state)
static int i9xx_get_fifo_size(struct drm_i915_private *dev_priv,
enum i9xx_plane_id i9xx_plane)
{
- u32 dsparb = I915_READ(DSPARB);
+ u32 dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB);
int size;
size = dsparb & 0x7f;
@@ -541,7 +541,7 @@ static int i9xx_get_fifo_size(struct drm_i915_private *dev_priv,
static int i830_get_fifo_size(struct drm_i915_private *dev_priv,
enum i9xx_plane_id i9xx_plane)
{
- u32 dsparb = I915_READ(DSPARB);
+ u32 dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB);
int size;
size = dsparb & 0x1ff;
@@ -558,7 +558,7 @@ static int i830_get_fifo_size(struct drm_i915_private *dev_priv,
static int i845_get_fifo_size(struct drm_i915_private *dev_priv,
enum i9xx_plane_id i9xx_plane)
{
- u32 dsparb = I915_READ(DSPARB);
+ u32 dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB);
int size;
size = dsparb & 0x7f;
@@ -911,38 +911,38 @@ static void pnv_update_wm(struct intel_crtc *unused_crtc)
wm = intel_calculate_wm(clock, &pnv_display_wm,
pnv_display_wm.fifo_size,
cpp, latency->display_sr);
- reg = I915_READ(DSPFW1);
+ reg = intel_uncore_read(&dev_priv->uncore, DSPFW1);
reg &= ~DSPFW_SR_MASK;
reg |= FW_WM(wm, SR);
- I915_WRITE(DSPFW1, reg);
+ intel_uncore_write(&dev_priv->uncore, DSPFW1, reg);
drm_dbg_kms(&dev_priv->drm, "DSPFW1 register is %x\n", reg);
/* cursor SR */
wm = intel_calculate_wm(clock, &pnv_cursor_wm,
pnv_display_wm.fifo_size,
4, latency->cursor_sr);
- reg = I915_READ(DSPFW3);
+ reg = intel_uncore_read(&dev_priv->uncore, DSPFW3);
reg &= ~DSPFW_CURSOR_SR_MASK;
reg |= FW_WM(wm, CURSOR_SR);
- I915_WRITE(DSPFW3, reg);
+ intel_uncore_write(&dev_priv->uncore, DSPFW3, reg);
/* Display HPLL off SR */
wm = intel_calculate_wm(clock, &pnv_display_hplloff_wm,
pnv_display_hplloff_wm.fifo_size,
cpp, latency->display_hpll_disable);
- reg = I915_READ(DSPFW3);
+ reg = intel_uncore_read(&dev_priv->uncore, DSPFW3);
reg &= ~DSPFW_HPLL_SR_MASK;
reg |= FW_WM(wm, HPLL_SR);
- I915_WRITE(DSPFW3, reg);
+ intel_uncore_write(&dev_priv->uncore, DSPFW3, reg);
/* cursor HPLL off SR */
wm = intel_calculate_wm(clock, &pnv_cursor_hplloff_wm,
pnv_display_hplloff_wm.fifo_size,
4, latency->cursor_hpll_disable);
- reg = I915_READ(DSPFW3);
+ reg = intel_uncore_read(&dev_priv->uncore, DSPFW3);
reg &= ~DSPFW_HPLL_CURSOR_MASK;
reg |= FW_WM(wm, HPLL_CURSOR);
- I915_WRITE(DSPFW3, reg);
+ intel_uncore_write(&dev_priv->uncore, DSPFW3, reg);
drm_dbg_kms(&dev_priv->drm, "DSPFW3 register is %x\n", reg);
intel_set_memory_cxsr(dev_priv, true);
@@ -976,25 +976,25 @@ static void g4x_write_wm_values(struct drm_i915_private *dev_priv,
for_each_pipe(dev_priv, pipe)
trace_g4x_wm(intel_get_crtc_for_pipe(dev_priv, pipe), wm);
- I915_WRITE(DSPFW1,
+ intel_uncore_write(&dev_priv->uncore, DSPFW1,
FW_WM(wm->sr.plane, SR) |
FW_WM(wm->pipe[PIPE_B].plane[PLANE_CURSOR], CURSORB) |
FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY], PLANEB) |
FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY], PLANEA));
- I915_WRITE(DSPFW2,
+ intel_uncore_write(&dev_priv->uncore, DSPFW2,
(wm->fbc_en ? DSPFW_FBC_SR_EN : 0) |
FW_WM(wm->sr.fbc, FBC_SR) |
FW_WM(wm->hpll.fbc, FBC_HPLL_SR) |
FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEB) |
FW_WM(wm->pipe[PIPE_A].plane[PLANE_CURSOR], CURSORA) |
FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0], SPRITEA));
- I915_WRITE(DSPFW3,
+ intel_uncore_write(&dev_priv->uncore, DSPFW3,
(wm->hpll_en ? DSPFW_HPLL_SR_EN : 0) |
FW_WM(wm->sr.cursor, CURSOR_SR) |
FW_WM(wm->hpll.cursor, HPLL_CURSOR) |
FW_WM(wm->hpll.plane, HPLL_SR));
- POSTING_READ(DSPFW1);
+ intel_uncore_posting_read(&dev_priv->uncore, DSPFW1);
}
#define FW_WM_VLV(value, plane) \
@@ -1008,7 +1008,7 @@ static void vlv_write_wm_values(struct drm_i915_private *dev_priv,
for_each_pipe(dev_priv, pipe) {
trace_vlv_wm(intel_get_crtc_for_pipe(dev_priv, pipe), wm);
- I915_WRITE(VLV_DDL(pipe),
+ intel_uncore_write(&dev_priv->uncore, VLV_DDL(pipe),
(wm->ddl[pipe].plane[PLANE_CURSOR] << DDL_CURSOR_SHIFT) |
(wm->ddl[pipe].plane[PLANE_SPRITE1] << DDL_SPRITE_SHIFT(1)) |
(wm->ddl[pipe].plane[PLANE_SPRITE0] << DDL_SPRITE_SHIFT(0)) |
@@ -1020,35 +1020,35 @@ static void vlv_write_wm_values(struct drm_i915_private *dev_priv,
* high order bits so that there are no out of bounds values
* present in the registers during the reprogramming.
*/
- I915_WRITE(DSPHOWM, 0);
- I915_WRITE(DSPHOWM1, 0);
- I915_WRITE(DSPFW4, 0);
- I915_WRITE(DSPFW5, 0);
- I915_WRITE(DSPFW6, 0);
+ intel_uncore_write(&dev_priv->uncore, DSPHOWM, 0);
+ intel_uncore_write(&dev_priv->uncore, DSPHOWM1, 0);
+ intel_uncore_write(&dev_priv->uncore, DSPFW4, 0);
+ intel_uncore_write(&dev_priv->uncore, DSPFW5, 0);
+ intel_uncore_write(&dev_priv->uncore, DSPFW6, 0);
- I915_WRITE(DSPFW1,
+ intel_uncore_write(&dev_priv->uncore, DSPFW1,
FW_WM(wm->sr.plane, SR) |
FW_WM(wm->pipe[PIPE_B].plane[PLANE_CURSOR], CURSORB) |
FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_PRIMARY], PLANEB) |
FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_PRIMARY], PLANEA));
- I915_WRITE(DSPFW2,
+ intel_uncore_write(&dev_priv->uncore, DSPFW2,
FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_SPRITE1], SPRITEB) |
FW_WM(wm->pipe[PIPE_A].plane[PLANE_CURSOR], CURSORA) |
FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_SPRITE0], SPRITEA));
- I915_WRITE(DSPFW3,
+ intel_uncore_write(&dev_priv->uncore, DSPFW3,
FW_WM(wm->sr.cursor, CURSOR_SR));
if (IS_CHERRYVIEW(dev_priv)) {
- I915_WRITE(DSPFW7_CHV,
+ intel_uncore_write(&dev_priv->uncore, DSPFW7_CHV,
FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE1], SPRITED) |
FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEC));
- I915_WRITE(DSPFW8_CHV,
+ intel_uncore_write(&dev_priv->uncore, DSPFW8_CHV,
FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_SPRITE1], SPRITEF) |
FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_SPRITE0], SPRITEE));
- I915_WRITE(DSPFW9_CHV,
+ intel_uncore_write(&dev_priv->uncore, DSPFW9_CHV,
FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_PRIMARY], PLANEC) |
FW_WM(wm->pipe[PIPE_C].plane[PLANE_CURSOR], CURSORC));
- I915_WRITE(DSPHOWM,
+ intel_uncore_write(&dev_priv->uncore, DSPHOWM,
FW_WM(wm->sr.plane >> 9, SR_HI) |
FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE1] >> 8, SPRITEF_HI) |
FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE0] >> 8, SPRITEE_HI) |
@@ -1060,10 +1060,10 @@ static void vlv_write_wm_values(struct drm_i915_private *dev_priv,
FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0] >> 8, SPRITEA_HI) |
FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY] >> 8, PLANEA_HI));
} else {
- I915_WRITE(DSPFW7,
+ intel_uncore_write(&dev_priv->uncore, DSPFW7,
FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE1], SPRITED) |
FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEC));
- I915_WRITE(DSPHOWM,
+ intel_uncore_write(&dev_priv->uncore, DSPHOWM,
FW_WM(wm->sr.plane >> 9, SR_HI) |
FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE1] >> 8, SPRITED_HI) |
FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0] >> 8, SPRITEC_HI) |
@@ -1073,7 +1073,7 @@ static void vlv_write_wm_values(struct drm_i915_private *dev_priv,
FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY] >> 8, PLANEA_HI));
}
- POSTING_READ(DSPFW1);
+ intel_uncore_posting_read(&dev_priv->uncore, DSPFW1);
}
#undef FW_WM_VLV
@@ -2310,14 +2310,14 @@ static void i965_update_wm(struct intel_crtc *unused_crtc)
srwm);
/* 965 has limitations... */
- I915_WRITE(DSPFW1, FW_WM(srwm, SR) |
+ intel_uncore_write(&dev_priv->uncore, DSPFW1, FW_WM(srwm, SR) |
FW_WM(8, CURSORB) |
FW_WM(8, PLANEB) |
FW_WM(8, PLANEA));
- I915_WRITE(DSPFW2, FW_WM(8, CURSORA) |
+ intel_uncore_write(&dev_priv->uncore, DSPFW2, FW_WM(8, CURSORA) |
FW_WM(8, PLANEC_OLD));
/* update cursor SR watermark */
- I915_WRITE(DSPFW3, FW_WM(cursor_sr, CURSOR_SR));
+ intel_uncore_write(&dev_priv->uncore, DSPFW3, FW_WM(cursor_sr, CURSOR_SR));
if (cxsr_enabled)
intel_set_memory_cxsr(dev_priv, true);
@@ -2447,10 +2447,10 @@ static void i9xx_update_wm(struct intel_crtc *unused_crtc)
srwm = 1;
if (IS_I945G(dev_priv) || IS_I945GM(dev_priv))
- I915_WRITE(FW_BLC_SELF,
+ intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF,
FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
else
- I915_WRITE(FW_BLC_SELF, srwm & 0x3f);
+ intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF, srwm & 0x3f);
}
drm_dbg_kms(&dev_priv->drm,
@@ -2464,8 +2464,8 @@ static void i9xx_update_wm(struct intel_crtc *unused_crtc)
fwater_lo = fwater_lo | (1 << 24) | (1 << 8);
fwater_hi = fwater_hi | (1 << 8);
- I915_WRITE(FW_BLC, fwater_lo);
- I915_WRITE(FW_BLC2, fwater_hi);
+ intel_uncore_write(&dev_priv->uncore, FW_BLC, fwater_lo);
+ intel_uncore_write(&dev_priv->uncore, FW_BLC2, fwater_hi);
if (enabled)
intel_set_memory_cxsr(dev_priv, true);
@@ -2488,13 +2488,13 @@ static void i845_update_wm(struct intel_crtc *unused_crtc)
&i845_wm_info,
dev_priv->display.get_fifo_size(dev_priv, PLANE_A),
4, pessimal_latency_ns);
- fwater_lo = I915_READ(FW_BLC) & ~0xfff;
+ fwater_lo = intel_uncore_read(&dev_priv->uncore, FW_BLC) & ~0xfff;
fwater_lo |= (3<<8) | planea_wm;
drm_dbg_kms(&dev_priv->drm,
"Setting FIFO watermarks - A: %d\n", planea_wm);
- I915_WRITE(FW_BLC, fwater_lo);
+ intel_uncore_write(&dev_priv->uncore, FW_BLC, fwater_lo);
}
/* latency must be in 0.1us units. */
@@ -2930,7 +2930,7 @@ static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
* any underrun. If not able to get Dimm info assume 16GB dimm
* to avoid any underrun.
*/
- if (dev_priv->dram_info.is_16gb_dimm)
+ if (dev_priv->dram_info.wm_lv_0_adjust_needed)
wm[0] += 1;
} else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
@@ -3534,17 +3534,17 @@ static bool _ilk_disable_lp_wm(struct drm_i915_private *dev_priv,
if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] & WM1_LP_SR_EN) {
previous->wm_lp[2] &= ~WM1_LP_SR_EN;
- I915_WRITE(WM3_LP_ILK, previous->wm_lp[2]);
+ intel_uncore_write(&dev_priv->uncore, WM3_LP_ILK, previous->wm_lp[2]);
changed = true;
}
if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] & WM1_LP_SR_EN) {
previous->wm_lp[1] &= ~WM1_LP_SR_EN;
- I915_WRITE(WM2_LP_ILK, previous->wm_lp[1]);
+ intel_uncore_write(&dev_priv->uncore, WM2_LP_ILK, previous->wm_lp[1]);
changed = true;
}
if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] & WM1_LP_SR_EN) {
previous->wm_lp[0] &= ~WM1_LP_SR_EN;
- I915_WRITE(WM1_LP_ILK, previous->wm_lp[0]);
+ intel_uncore_write(&dev_priv->uncore, WM1_LP_ILK, previous->wm_lp[0]);
changed = true;
}
@@ -3574,56 +3574,56 @@ static void ilk_write_wm_values(struct drm_i915_private *dev_priv,
_ilk_disable_lp_wm(dev_priv, dirty);
if (dirty & WM_DIRTY_PIPE(PIPE_A))
- I915_WRITE(WM0_PIPE_ILK(PIPE_A), results->wm_pipe[0]);
+ intel_uncore_write(&dev_priv->uncore, WM0_PIPE_ILK(PIPE_A), results->wm_pipe[0]);
if (dirty & WM_DIRTY_PIPE(PIPE_B))
- I915_WRITE(WM0_PIPE_ILK(PIPE_B), results->wm_pipe[1]);
+ intel_uncore_write(&dev_priv->uncore, WM0_PIPE_ILK(PIPE_B), results->wm_pipe[1]);
if (dirty & WM_DIRTY_PIPE(PIPE_C))
- I915_WRITE(WM0_PIPE_ILK(PIPE_C), results->wm_pipe[2]);
+ intel_uncore_write(&dev_priv->uncore, WM0_PIPE_ILK(PIPE_C), results->wm_pipe[2]);
if (dirty & WM_DIRTY_DDB) {
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
- val = I915_READ(WM_MISC);
+ val = intel_uncore_read(&dev_priv->uncore, WM_MISC);
if (results->partitioning == INTEL_DDB_PART_1_2)
val &= ~WM_MISC_DATA_PARTITION_5_6;
else
val |= WM_MISC_DATA_PARTITION_5_6;
- I915_WRITE(WM_MISC, val);
+ intel_uncore_write(&dev_priv->uncore, WM_MISC, val);
} else {
- val = I915_READ(DISP_ARB_CTL2);
+ val = intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL2);
if (results->partitioning == INTEL_DDB_PART_1_2)
val &= ~DISP_DATA_PARTITION_5_6;
else
val |= DISP_DATA_PARTITION_5_6;
- I915_WRITE(DISP_ARB_CTL2, val);
+ intel_uncore_write(&dev_priv->uncore, DISP_ARB_CTL2, val);
}
}
if (dirty & WM_DIRTY_FBC) {
- val = I915_READ(DISP_ARB_CTL);
+ val = intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL);
if (results->enable_fbc_wm)
val &= ~DISP_FBC_WM_DIS;
else
val |= DISP_FBC_WM_DIS;
- I915_WRITE(DISP_ARB_CTL, val);
+ intel_uncore_write(&dev_priv->uncore, DISP_ARB_CTL, val);
}
if (dirty & WM_DIRTY_LP(1) &&
previous->wm_lp_spr[0] != results->wm_lp_spr[0])
- I915_WRITE(WM1S_LP_ILK, results->wm_lp_spr[0]);
+ intel_uncore_write(&dev_priv->uncore, WM1S_LP_ILK, results->wm_lp_spr[0]);
if (INTEL_GEN(dev_priv) >= 7) {
if (dirty & WM_DIRTY_LP(2) && previous->wm_lp_spr[1] != results->wm_lp_spr[1])
- I915_WRITE(WM2S_LP_IVB, results->wm_lp_spr[1]);
+ intel_uncore_write(&dev_priv->uncore, WM2S_LP_IVB, results->wm_lp_spr[1]);
if (dirty & WM_DIRTY_LP(3) && previous->wm_lp_spr[2] != results->wm_lp_spr[2])
- I915_WRITE(WM3S_LP_IVB, results->wm_lp_spr[2]);
+ intel_uncore_write(&dev_priv->uncore, WM3S_LP_IVB, results->wm_lp_spr[2]);
}
if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] != results->wm_lp[0])
- I915_WRITE(WM1_LP_ILK, results->wm_lp[0]);
+ intel_uncore_write(&dev_priv->uncore, WM1_LP_ILK, results->wm_lp[0]);
if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] != results->wm_lp[1])
- I915_WRITE(WM2_LP_ILK, results->wm_lp[1]);
+ intel_uncore_write(&dev_priv->uncore, WM2_LP_ILK, results->wm_lp[1]);
if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] != results->wm_lp[2])
- I915_WRITE(WM3_LP_ILK, results->wm_lp[2]);
+ intel_uncore_write(&dev_priv->uncore, WM3_LP_ILK, results->wm_lp[2]);
dev_priv->wm.hw = *results;
}
@@ -3640,7 +3640,7 @@ u8 intel_enabled_dbuf_slices_mask(struct drm_i915_private *dev_priv)
u8 enabled_slices_mask = 0;
for (i = 0; i < max_slices; i++) {
- if (I915_READ(DBUF_CTL_S(i)) & DBUF_POWER_STATE)
+ if (intel_uncore_read(&dev_priv->uncore, DBUF_CTL_S(i)) & DBUF_POWER_STATE)
enabled_slices_mask |= BIT(i);
}
@@ -4017,43 +4017,48 @@ static int intel_compute_sagv_mask(struct intel_atomic_state *state)
return 0;
}
-/*
- * Calculate initial DBuf slice offset, based on slice size
- * and mask(i.e if slice size is 1024 and second slice is enabled
- * offset would be 1024)
- */
-static unsigned int
-icl_get_first_dbuf_slice_offset(u32 dbuf_slice_mask,
- u32 slice_size,
- u32 ddb_size)
+static int intel_dbuf_size(struct drm_i915_private *dev_priv)
{
- unsigned int offset = 0;
+ int ddb_size = INTEL_INFO(dev_priv)->ddb_size;
- if (!dbuf_slice_mask)
- return 0;
+ drm_WARN_ON(&dev_priv->drm, ddb_size == 0);
- offset = (ffs(dbuf_slice_mask) - 1) * slice_size;
+ if (INTEL_GEN(dev_priv) < 11)
+ return ddb_size - 4; /* 4 blocks for bypass path allocation */
- WARN_ON(offset >= ddb_size);
- return offset;
+ return ddb_size;
}
-u16 intel_get_ddb_size(struct drm_i915_private *dev_priv)
+static int intel_dbuf_slice_size(struct drm_i915_private *dev_priv)
{
- u16 ddb_size = INTEL_INFO(dev_priv)->ddb_size;
- drm_WARN_ON(&dev_priv->drm, ddb_size == 0);
+ return intel_dbuf_size(dev_priv) /
+ INTEL_INFO(dev_priv)->num_supported_dbuf_slices;
+}
- if (INTEL_GEN(dev_priv) < 11)
- return ddb_size - 4; /* 4 blocks for bypass path allocation */
+static void
+skl_ddb_entry_for_slices(struct drm_i915_private *dev_priv, u8 slice_mask,
+ struct skl_ddb_entry *ddb)
+{
+ int slice_size = intel_dbuf_slice_size(dev_priv);
- return ddb_size;
+ if (!slice_mask) {
+ ddb->start = 0;
+ ddb->end = 0;
+ return;
+ }
+
+ ddb->start = (ffs(slice_mask) - 1) * slice_size;
+ ddb->end = fls(slice_mask) * slice_size;
+
+ WARN_ON(ddb->start >= ddb->end);
+ WARN_ON(ddb->end > intel_dbuf_size(dev_priv));
}
u32 skl_ddb_dbuf_slice_mask(struct drm_i915_private *dev_priv,
const struct skl_ddb_entry *entry)
{
u32 slice_mask = 0;
- u16 ddb_size = intel_get_ddb_size(dev_priv);
+ u16 ddb_size = intel_dbuf_size(dev_priv);
u16 num_supported_slices = INTEL_INFO(dev_priv)->num_supported_dbuf_slices;
u16 slice_size = ddb_size / num_supported_slices;
u16 start_slice;
@@ -4077,116 +4082,40 @@ u32 skl_ddb_dbuf_slice_mask(struct drm_i915_private *dev_priv,
return slice_mask;
}
-static u8 skl_compute_dbuf_slices(const struct intel_crtc_state *crtc_state,
- u8 active_pipes);
-
-static int
-skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *dev_priv,
- const struct intel_crtc_state *crtc_state,
- const u64 total_data_rate,
- struct skl_ddb_entry *alloc, /* out */
- int *num_active /* out */)
-{
- struct drm_atomic_state *state = crtc_state->uapi.state;
- struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
- struct drm_crtc *for_crtc = crtc_state->uapi.crtc;
- const struct intel_crtc *crtc;
- u32 pipe_width = 0, total_width_in_range = 0, width_before_pipe_in_range = 0;
- enum pipe for_pipe = to_intel_crtc(for_crtc)->pipe;
- struct intel_dbuf_state *new_dbuf_state =
- intel_atomic_get_new_dbuf_state(intel_state);
- const struct intel_dbuf_state *old_dbuf_state =
- intel_atomic_get_old_dbuf_state(intel_state);
- u8 active_pipes = new_dbuf_state->active_pipes;
- u16 ddb_size;
- u32 ddb_range_size;
- u32 i;
- u32 dbuf_slice_mask;
- u32 offset;
- u32 slice_size;
- u32 total_slice_mask;
- u32 start, end;
- int ret;
-
- *num_active = hweight8(active_pipes);
-
- if (!crtc_state->hw.active) {
- alloc->start = 0;
- alloc->end = 0;
- return 0;
- }
-
- ddb_size = intel_get_ddb_size(dev_priv);
-
- slice_size = ddb_size / INTEL_INFO(dev_priv)->num_supported_dbuf_slices;
+static unsigned int intel_crtc_ddb_weight(const struct intel_crtc_state *crtc_state)
+{
+ const struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode;
+ int hdisplay, vdisplay;
- /*
- * If the state doesn't change the active CRTC's or there is no
- * modeset request, then there's no need to recalculate;
- * the existing pipe allocation limits should remain unchanged.
- * Note that we're safe from racing commits since any racing commit
- * that changes the active CRTC list or do modeset would need to
- * grab _all_ crtc locks, including the one we currently hold.
- */
- if (old_dbuf_state->active_pipes == new_dbuf_state->active_pipes &&
- !dev_priv->wm.distrust_bios_wm) {
- /*
- * alloc may be cleared by clear_intel_crtc_state,
- * copy from old state to be sure
- *
- * FIXME get rid of this mess
- */
- *alloc = to_intel_crtc_state(for_crtc->state)->wm.skl.ddb;
+ if (!crtc_state->hw.active)
return 0;
- }
-
- /*
- * Get allowed DBuf slices for correspondent pipe and platform.
- */
- dbuf_slice_mask = skl_compute_dbuf_slices(crtc_state, active_pipes);
-
- /*
- * Figure out at which DBuf slice we start, i.e if we start at Dbuf S2
- * and slice size is 1024, the offset would be 1024
- */
- offset = icl_get_first_dbuf_slice_offset(dbuf_slice_mask,
- slice_size, ddb_size);
-
- /*
- * Figure out total size of allowed DBuf slices, which is basically
- * a number of allowed slices for that pipe multiplied by slice size.
- * Inside of this
- * range ddb entries are still allocated in proportion to display width.
- */
- ddb_range_size = hweight8(dbuf_slice_mask) * slice_size;
/*
* Watermark/ddb requirement highly depends upon width of the
* framebuffer, So instead of allocating DDB equally among pipes
* distribute DDB based on resolution/width of the display.
*/
- total_slice_mask = dbuf_slice_mask;
- for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) {
- const struct drm_display_mode *pipe_mode =
- &crtc_state->hw.pipe_mode;
- enum pipe pipe = crtc->pipe;
- int hdisplay, vdisplay;
- u32 pipe_dbuf_slice_mask;
+ drm_mode_get_hv_timing(pipe_mode, &hdisplay, &vdisplay);
- if (!crtc_state->hw.active)
- continue;
+ return hdisplay;
+}
- pipe_dbuf_slice_mask = skl_compute_dbuf_slices(crtc_state,
- active_pipes);
+static void intel_crtc_dbuf_weights(const struct intel_dbuf_state *dbuf_state,
+ enum pipe for_pipe,
+ unsigned int *weight_start,
+ unsigned int *weight_end,
+ unsigned int *weight_total)
+{
+ struct drm_i915_private *dev_priv =
+ to_i915(dbuf_state->base.state->base.dev);
+ enum pipe pipe;
- /*
- * According to BSpec pipe can share one dbuf slice with another
- * pipes or pipe can use multiple dbufs, in both cases we
- * account for other pipes only if they have exactly same mask.
- * However we need to account how many slices we should enable
- * in total.
- */
- total_slice_mask |= pipe_dbuf_slice_mask;
+ *weight_start = 0;
+ *weight_end = 0;
+ *weight_total = 0;
+
+ for_each_pipe(dev_priv, pipe) {
+ int weight = dbuf_state->weight[pipe];
/*
* Do not account pipes using other slice sets
@@ -4195,42 +4124,78 @@ skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *dev_priv,
* i.e no partial intersection), so it is enough to check for
* equality for now.
*/
- if (dbuf_slice_mask != pipe_dbuf_slice_mask)
+ if (dbuf_state->slices[pipe] != dbuf_state->slices[for_pipe])
continue;
- drm_mode_get_hv_timing(pipe_mode, &hdisplay, &vdisplay);
+ *weight_total += weight;
+ if (pipe < for_pipe) {
+ *weight_start += weight;
+ *weight_end += weight;
+ } else if (pipe == for_pipe) {
+ *weight_end += weight;
+ }
+ }
+}
- total_width_in_range += hdisplay;
+static int
+skl_crtc_allocate_ddb(struct intel_atomic_state *state, struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ unsigned int weight_total, weight_start, weight_end;
+ const struct intel_dbuf_state *old_dbuf_state =
+ intel_atomic_get_old_dbuf_state(state);
+ struct intel_dbuf_state *new_dbuf_state =
+ intel_atomic_get_new_dbuf_state(state);
+ struct intel_crtc_state *crtc_state;
+ struct skl_ddb_entry ddb_slices;
+ enum pipe pipe = crtc->pipe;
+ u32 ddb_range_size;
+ u32 dbuf_slice_mask;
+ u32 start, end;
+ int ret;
- if (pipe < for_pipe)
- width_before_pipe_in_range += hdisplay;
- else if (pipe == for_pipe)
- pipe_width = hdisplay;
+ if (new_dbuf_state->weight[pipe] == 0) {
+ new_dbuf_state->ddb[pipe].start = 0;
+ new_dbuf_state->ddb[pipe].end = 0;
+ goto out;
}
- /*
- * FIXME: For now we always enable slice S1 as per
- * the Bspec display initialization sequence.
- */
- new_dbuf_state->enabled_slices = total_slice_mask | BIT(DBUF_S1);
+ dbuf_slice_mask = new_dbuf_state->slices[pipe];
- if (old_dbuf_state->enabled_slices != new_dbuf_state->enabled_slices) {
- ret = intel_atomic_serialize_global_state(&new_dbuf_state->base);
- if (ret)
- return ret;
- }
+ skl_ddb_entry_for_slices(dev_priv, dbuf_slice_mask, &ddb_slices);
+ ddb_range_size = skl_ddb_entry_size(&ddb_slices);
+
+ intel_crtc_dbuf_weights(new_dbuf_state, pipe,
+ &weight_start, &weight_end, &weight_total);
+
+ start = ddb_range_size * weight_start / weight_total;
+ end = ddb_range_size * weight_end / weight_total;
+
+ new_dbuf_state->ddb[pipe].start = ddb_slices.start + start;
+ new_dbuf_state->ddb[pipe].end = ddb_slices.start + end;
+
+out:
+ if (skl_ddb_entry_equal(&old_dbuf_state->ddb[pipe],
+ &new_dbuf_state->ddb[pipe]))
+ return 0;
+
+ ret = intel_atomic_lock_global_state(&new_dbuf_state->base);
+ if (ret)
+ return ret;
- start = ddb_range_size * width_before_pipe_in_range / total_width_in_range;
- end = ddb_range_size *
- (width_before_pipe_in_range + pipe_width) / total_width_in_range;
+ crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
- alloc->start = offset + start;
- alloc->end = offset + end;
+ crtc_state->wm.skl.ddb = new_dbuf_state->ddb[pipe];
drm_dbg_kms(&dev_priv->drm,
- "[CRTC:%d:%s] dbuf slices 0x%x, ddb (%d - %d), active pipes 0x%x\n",
- for_crtc->base.id, for_crtc->name,
- dbuf_slice_mask, alloc->start, alloc->end, active_pipes);
+ "[CRTC:%d:%s] dbuf slices 0x%x -> 0x%x, ddb (%d - %d) -> (%d - %d), active pipes 0x%x -> 0x%x\n",
+ crtc->base.base.id, crtc->base.name,
+ old_dbuf_state->slices[pipe], new_dbuf_state->slices[pipe],
+ old_dbuf_state->ddb[pipe].start, old_dbuf_state->ddb[pipe].end,
+ new_dbuf_state->ddb[pipe].start, new_dbuf_state->ddb[pipe].end,
+ old_dbuf_state->active_pipes, new_dbuf_state->active_pipes);
return 0;
}
@@ -4300,12 +4265,12 @@ skl_ddb_get_hw_plane_state(struct drm_i915_private *dev_priv,
/* Cursor doesn't support NV12/planar, so no extra calculation needed */
if (plane_id == PLANE_CURSOR) {
- val = I915_READ(CUR_BUF_CFG(pipe));
+ val = intel_uncore_read(&dev_priv->uncore, CUR_BUF_CFG(pipe));
skl_ddb_entry_init_from_hw(dev_priv, ddb_y, val);
return;
}
- val = I915_READ(PLANE_CTL(pipe, plane_id));
+ val = intel_uncore_read(&dev_priv->uncore, PLANE_CTL(pipe, plane_id));
/* No DDB allocated for disabled planes */
if (val & PLANE_CTL_ENABLE)
@@ -4314,11 +4279,11 @@ skl_ddb_get_hw_plane_state(struct drm_i915_private *dev_priv,
val & PLANE_CTL_ALPHA_MASK);
if (INTEL_GEN(dev_priv) >= 11) {
- val = I915_READ(PLANE_BUF_CFG(pipe, plane_id));
+ val = intel_uncore_read(&dev_priv->uncore, PLANE_BUF_CFG(pipe, plane_id));
skl_ddb_entry_init_from_hw(dev_priv, ddb_y, val);
} else {
- val = I915_READ(PLANE_BUF_CFG(pipe, plane_id));
- val2 = I915_READ(PLANE_NV12_BUF_CFG(pipe, plane_id));
+ val = intel_uncore_read(&dev_priv->uncore, PLANE_BUF_CFG(pipe, plane_id));
+ val2 = intel_uncore_read(&dev_priv->uncore, PLANE_NV12_BUF_CFG(pipe, plane_id));
if (fourcc &&
drm_format_info_is_yuv_semiplanar(drm_format_info(fourcc)))
@@ -4632,10 +4597,8 @@ static u8 tgl_compute_dbuf_slices(enum pipe pipe, u8 active_pipes)
return compute_dbuf_slices(pipe, active_pipes, tgl_allowed_dbufs);
}
-static u8 skl_compute_dbuf_slices(const struct intel_crtc_state *crtc_state,
- u8 active_pipes)
+static u8 skl_compute_dbuf_slices(struct intel_crtc *crtc, u8 active_pipes)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
@@ -4798,55 +4761,30 @@ skl_plane_wm_level(const struct intel_crtc_state *crtc_state,
}
static int
-skl_allocate_pipe_ddb(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
+skl_allocate_plane_ddb(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- struct skl_ddb_entry *alloc = &crtc_state->wm.skl.ddb;
+ const struct intel_dbuf_state *dbuf_state =
+ intel_atomic_get_new_dbuf_state(state);
+ const struct skl_ddb_entry *alloc = &dbuf_state->ddb[crtc->pipe];
+ int num_active = hweight8(dbuf_state->active_pipes);
u16 alloc_size, start = 0;
u16 total[I915_MAX_PLANES] = {};
u16 uv_total[I915_MAX_PLANES] = {};
u64 total_data_rate;
enum plane_id plane_id;
- int num_active;
u32 blocks;
int level;
- int ret;
/* Clear the partitioning for disabled planes. */
memset(crtc_state->wm.skl.plane_ddb_y, 0, sizeof(crtc_state->wm.skl.plane_ddb_y));
memset(crtc_state->wm.skl.plane_ddb_uv, 0, sizeof(crtc_state->wm.skl.plane_ddb_uv));
- if (!crtc_state->hw.active) {
- struct intel_atomic_state *state =
- to_intel_atomic_state(crtc_state->uapi.state);
- struct intel_dbuf_state *new_dbuf_state =
- intel_atomic_get_new_dbuf_state(state);
- const struct intel_dbuf_state *old_dbuf_state =
- intel_atomic_get_old_dbuf_state(state);
-
- /*
- * FIXME hack to make sure we compute this sensibly when
- * turning off all the pipes. Otherwise we leave it at
- * whatever we had previously, and then runtime PM will
- * mess it up by turning off all but S1. Remove this
- * once the dbuf state computation flow becomes sane.
- */
- if (new_dbuf_state->active_pipes == 0) {
- new_dbuf_state->enabled_slices = BIT(DBUF_S1);
-
- if (old_dbuf_state->enabled_slices != new_dbuf_state->enabled_slices) {
- ret = intel_atomic_serialize_global_state(&new_dbuf_state->base);
- if (ret)
- return ret;
- }
- }
-
- alloc->start = alloc->end = 0;
+ if (!crtc_state->hw.active)
return 0;
- }
if (INTEL_GEN(dev_priv) >= 11)
total_data_rate =
@@ -4855,12 +4793,6 @@ skl_allocate_pipe_ddb(struct intel_atomic_state *state,
total_data_rate =
skl_get_total_relative_data_rate(state, crtc);
- ret = skl_ddb_get_pipe_allocation_limits(dev_priv, crtc_state,
- total_data_rate,
- alloc, &num_active);
- if (ret)
- return ret;
-
alloc_size = skl_ddb_entry_size(alloc);
if (alloc_size == 0)
return 0;
@@ -5731,6 +5663,18 @@ static bool skl_ddb_entries_overlap(const struct skl_ddb_entry *a,
return a->start < b->end && b->start < a->end;
}
+static void skl_ddb_entry_union(struct skl_ddb_entry *a,
+ const struct skl_ddb_entry *b)
+{
+ if (a->end && b->end) {
+ a->start = min(a->start, b->start);
+ a->end = max(a->end, b->end);
+ } else if (b->end) {
+ a->start = b->start;
+ a->end = b->end;
+ }
+}
+
bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry *ddb,
const struct skl_ddb_entry *entries,
int num_entries, int ignore_idx)
@@ -5775,39 +5719,114 @@ skl_ddb_add_affected_planes(const struct intel_crtc_state *old_crtc_state,
return 0;
}
+static u8 intel_dbuf_enabled_slices(const struct intel_dbuf_state *dbuf_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(dbuf_state->base.state->base.dev);
+ u8 enabled_slices;
+ enum pipe pipe;
+
+ /*
+ * FIXME: For now we always enable slice S1 as per
+ * the Bspec display initialization sequence.
+ */
+ enabled_slices = BIT(DBUF_S1);
+
+ for_each_pipe(dev_priv, pipe)
+ enabled_slices |= dbuf_state->slices[pipe];
+
+ return enabled_slices;
+}
+
static int
skl_compute_ddb(struct intel_atomic_state *state)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
const struct intel_dbuf_state *old_dbuf_state;
- const struct intel_dbuf_state *new_dbuf_state;
+ struct intel_dbuf_state *new_dbuf_state = NULL;
const struct intel_crtc_state *old_crtc_state;
struct intel_crtc_state *new_crtc_state;
struct intel_crtc *crtc;
int ret, i;
- for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
- new_crtc_state, i) {
- ret = skl_allocate_pipe_ddb(state, crtc);
+ for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
+ new_dbuf_state = intel_atomic_get_dbuf_state(state);
+ if (IS_ERR(new_dbuf_state))
+ return PTR_ERR(new_dbuf_state);
+
+ old_dbuf_state = intel_atomic_get_old_dbuf_state(state);
+ break;
+ }
+
+ if (!new_dbuf_state)
+ return 0;
+
+ new_dbuf_state->active_pipes =
+ intel_calc_active_pipes(state, old_dbuf_state->active_pipes);
+
+ if (old_dbuf_state->active_pipes != new_dbuf_state->active_pipes) {
+ ret = intel_atomic_lock_global_state(&new_dbuf_state->base);
if (ret)
return ret;
+ }
- ret = skl_ddb_add_affected_planes(old_crtc_state,
- new_crtc_state);
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
+ enum pipe pipe = crtc->pipe;
+
+ new_dbuf_state->slices[pipe] =
+ skl_compute_dbuf_slices(crtc, new_dbuf_state->active_pipes);
+
+ if (old_dbuf_state->slices[pipe] == new_dbuf_state->slices[pipe])
+ continue;
+
+ ret = intel_atomic_lock_global_state(&new_dbuf_state->base);
if (ret)
return ret;
}
- old_dbuf_state = intel_atomic_get_old_dbuf_state(state);
- new_dbuf_state = intel_atomic_get_new_dbuf_state(state);
+ new_dbuf_state->enabled_slices = intel_dbuf_enabled_slices(new_dbuf_state);
+
+ if (old_dbuf_state->enabled_slices != new_dbuf_state->enabled_slices) {
+ ret = intel_atomic_serialize_global_state(&new_dbuf_state->base);
+ if (ret)
+ return ret;
- if (new_dbuf_state &&
- new_dbuf_state->enabled_slices != old_dbuf_state->enabled_slices)
drm_dbg_kms(&dev_priv->drm,
"Enabled dbuf slices 0x%x -> 0x%x (out of %d dbuf slices)\n",
old_dbuf_state->enabled_slices,
new_dbuf_state->enabled_slices,
INTEL_INFO(dev_priv)->num_supported_dbuf_slices);
+ }
+
+ for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
+ enum pipe pipe = crtc->pipe;
+
+ new_dbuf_state->weight[pipe] = intel_crtc_ddb_weight(new_crtc_state);
+
+ if (old_dbuf_state->weight[pipe] == new_dbuf_state->weight[pipe])
+ continue;
+
+ ret = intel_atomic_lock_global_state(&new_dbuf_state->base);
+ if (ret)
+ return ret;
+ }
+
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
+ ret = skl_crtc_allocate_ddb(state, crtc);
+ if (ret)
+ return ret;
+ }
+
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+ new_crtc_state, i) {
+ ret = skl_allocate_plane_ddb(state, crtc);
+ if (ret)
+ return ret;
+
+ ret = skl_ddb_add_affected_planes(old_crtc_state,
+ new_crtc_state);
+ if (ret)
+ return ret;
+ }
return 0;
}
@@ -5944,83 +5963,6 @@ skl_print_wm_changes(struct intel_atomic_state *state)
}
}
-static int intel_add_affected_pipes(struct intel_atomic_state *state,
- u8 pipe_mask)
-{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- struct intel_crtc *crtc;
-
- for_each_intel_crtc(&dev_priv->drm, crtc) {
- struct intel_crtc_state *crtc_state;
-
- if ((pipe_mask & BIT(crtc->pipe)) == 0)
- continue;
-
- crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
- if (IS_ERR(crtc_state))
- return PTR_ERR(crtc_state);
- }
-
- return 0;
-}
-
-static int
-skl_ddb_add_affected_pipes(struct intel_atomic_state *state)
-{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- struct intel_crtc_state *crtc_state;
- struct intel_crtc *crtc;
- int i, ret;
-
- if (dev_priv->wm.distrust_bios_wm) {
- /*
- * skl_ddb_get_pipe_allocation_limits() currently requires
- * all active pipes to be included in the state so that
- * it can redistribute the dbuf among them, and it really
- * wants to recompute things when distrust_bios_wm is set
- * so we add all the pipes to the state.
- */
- ret = intel_add_affected_pipes(state, ~0);
- if (ret)
- return ret;
- }
-
- for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
- struct intel_dbuf_state *new_dbuf_state;
- const struct intel_dbuf_state *old_dbuf_state;
-
- new_dbuf_state = intel_atomic_get_dbuf_state(state);
- if (IS_ERR(new_dbuf_state))
- return PTR_ERR(new_dbuf_state);
-
- old_dbuf_state = intel_atomic_get_old_dbuf_state(state);
-
- new_dbuf_state->active_pipes =
- intel_calc_active_pipes(state, old_dbuf_state->active_pipes);
-
- if (old_dbuf_state->active_pipes == new_dbuf_state->active_pipes)
- break;
-
- ret = intel_atomic_lock_global_state(&new_dbuf_state->base);
- if (ret)
- return ret;
-
- /*
- * skl_ddb_get_pipe_allocation_limits() currently requires
- * all active pipes to be included in the state so that
- * it can redistribute the dbuf among them.
- */
- ret = intel_add_affected_pipes(state,
- new_dbuf_state->active_pipes);
- if (ret)
- return ret;
-
- break;
- }
-
- return 0;
-}
-
/*
* To make sure the cursor watermark registers are always consistent
* with our computed state the following scenario needs special
@@ -6088,15 +6030,6 @@ skl_compute_wm(struct intel_atomic_state *state)
struct intel_crtc_state *new_crtc_state;
int ret, i;
- ret = skl_ddb_add_affected_pipes(state);
- if (ret)
- return ret;
-
- /*
- * Calculate WM's for all pipes that are part of this transaction.
- * Note that skl_ddb_add_affected_pipes may have added more CRTC's that
- * weren't otherwise being modified if pipe allocations had to change.
- */
for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
ret = skl_build_pipe_wm(state, crtc);
if (ret)
@@ -6231,9 +6164,9 @@ void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc,
for (level = 0; level <= max_level; level++) {
if (plane_id != PLANE_CURSOR)
- val = I915_READ(PLANE_WM(pipe, plane_id, level));
+ val = intel_uncore_read(&dev_priv->uncore, PLANE_WM(pipe, plane_id, level));
else
- val = I915_READ(CUR_WM(pipe, level));
+ val = intel_uncore_read(&dev_priv->uncore, CUR_WM(pipe, level));
skl_wm_level_from_reg_val(val, &wm->wm[level]);
}
@@ -6242,9 +6175,9 @@ void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc,
wm->sagv_wm0 = wm->wm[0];
if (plane_id != PLANE_CURSOR)
- val = I915_READ(PLANE_WM_TRANS(pipe, plane_id));
+ val = intel_uncore_read(&dev_priv->uncore, PLANE_WM_TRANS(pipe, plane_id));
else
- val = I915_READ(CUR_WM_TRANS(pipe));
+ val = intel_uncore_read(&dev_priv->uncore, CUR_WM_TRANS(pipe));
skl_wm_level_from_reg_val(val, &wm->trans_wm);
}
@@ -6255,20 +6188,49 @@ void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc,
void skl_wm_get_hw_state(struct drm_i915_private *dev_priv)
{
+ struct intel_dbuf_state *dbuf_state =
+ to_intel_dbuf_state(dev_priv->dbuf.obj.state);
struct intel_crtc *crtc;
- struct intel_crtc_state *crtc_state;
for_each_intel_crtc(&dev_priv->drm, crtc) {
- crtc_state = to_intel_crtc_state(crtc->base.state);
+ struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+ enum pipe pipe = crtc->pipe;
+ enum plane_id plane_id;
skl_pipe_wm_get_hw_state(crtc, &crtc_state->wm.skl.optimal);
crtc_state->wm.skl.raw = crtc_state->wm.skl.optimal;
- }
- if (dev_priv->active_pipes) {
- /* Fully recompute DDB on first atomic commit */
- dev_priv->wm.distrust_bios_wm = true;
+ memset(&dbuf_state->ddb[pipe], 0, sizeof(dbuf_state->ddb[pipe]));
+
+ for_each_plane_id_on_crtc(crtc, plane_id) {
+ struct skl_ddb_entry *ddb_y =
+ &crtc_state->wm.skl.plane_ddb_y[plane_id];
+ struct skl_ddb_entry *ddb_uv =
+ &crtc_state->wm.skl.plane_ddb_uv[plane_id];
+
+ skl_ddb_get_hw_plane_state(dev_priv, crtc->pipe,
+ plane_id, ddb_y, ddb_uv);
+
+ skl_ddb_entry_union(&dbuf_state->ddb[pipe], ddb_y);
+ skl_ddb_entry_union(&dbuf_state->ddb[pipe], ddb_uv);
+ }
+
+ dbuf_state->slices[pipe] =
+ skl_compute_dbuf_slices(crtc, dbuf_state->active_pipes);
+
+ dbuf_state->weight[pipe] = intel_crtc_ddb_weight(crtc_state);
+
+ crtc_state->wm.skl.ddb = dbuf_state->ddb[pipe];
+
+ drm_dbg_kms(&dev_priv->drm,
+ "[CRTC:%d:%s] dbuf slices 0x%x, ddb (%d - %d), active pipes 0x%x\n",
+ crtc->base.base.id, crtc->base.name,
+ dbuf_state->slices[pipe], dbuf_state->ddb[pipe].start,
+ dbuf_state->ddb[pipe].end, dbuf_state->active_pipes);
}
+
+ dbuf_state->enabled_slices = dev_priv->dbuf.enabled_slices;
}
static void ilk_pipe_wm_get_hw_state(struct intel_crtc *crtc)
@@ -6280,7 +6242,7 @@ static void ilk_pipe_wm_get_hw_state(struct intel_crtc *crtc)
struct intel_pipe_wm *active = &crtc_state->wm.ilk.optimal;
enum pipe pipe = crtc->pipe;
- hw->wm_pipe[pipe] = I915_READ(WM0_PIPE_ILK(pipe));
+ hw->wm_pipe[pipe] = intel_uncore_read(&dev_priv->uncore, WM0_PIPE_ILK(pipe));
memset(active, 0, sizeof(*active));
@@ -6324,13 +6286,13 @@ static void g4x_read_wm_values(struct drm_i915_private *dev_priv,
{
u32 tmp;
- tmp = I915_READ(DSPFW1);
+ tmp = intel_uncore_read(&dev_priv->uncore, DSPFW1);
wm->sr.plane = _FW_WM(tmp, SR);
wm->pipe[PIPE_B].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORB);
wm->pipe[PIPE_B].plane[PLANE_PRIMARY] = _FW_WM(tmp, PLANEB);
wm->pipe[PIPE_A].plane[PLANE_PRIMARY] = _FW_WM(tmp, PLANEA);
- tmp = I915_READ(DSPFW2);
+ tmp = intel_uncore_read(&dev_priv->uncore, DSPFW2);
wm->fbc_en = tmp & DSPFW_FBC_SR_EN;
wm->sr.fbc = _FW_WM(tmp, FBC_SR);
wm->hpll.fbc = _FW_WM(tmp, FBC_HPLL_SR);
@@ -6338,7 +6300,7 @@ static void g4x_read_wm_values(struct drm_i915_private *dev_priv,
wm->pipe[PIPE_A].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORA);
wm->pipe[PIPE_A].plane[PLANE_SPRITE0] = _FW_WM(tmp, SPRITEA);
- tmp = I915_READ(DSPFW3);
+ tmp = intel_uncore_read(&dev_priv->uncore, DSPFW3);
wm->hpll_en = tmp & DSPFW_HPLL_SR_EN;
wm->sr.cursor = _FW_WM(tmp, CURSOR_SR);
wm->hpll.cursor = _FW_WM(tmp, HPLL_CURSOR);
@@ -6352,7 +6314,7 @@ static void vlv_read_wm_values(struct drm_i915_private *dev_priv,
u32 tmp;
for_each_pipe(dev_priv, pipe) {
- tmp = I915_READ(VLV_DDL(pipe));
+ tmp = intel_uncore_read(&dev_priv->uncore, VLV_DDL(pipe));
wm->ddl[pipe].plane[PLANE_PRIMARY] =
(tmp >> DDL_PLANE_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
@@ -6364,34 +6326,34 @@ static void vlv_read_wm_values(struct drm_i915_private *dev_priv,
(tmp >> DDL_SPRITE_SHIFT(1)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
}
- tmp = I915_READ(DSPFW1);
+ tmp = intel_uncore_read(&dev_priv->uncore, DSPFW1);
wm->sr.plane = _FW_WM(tmp, SR);
wm->pipe[PIPE_B].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORB);
wm->pipe[PIPE_B].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEB);
wm->pipe[PIPE_A].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEA);
- tmp = I915_READ(DSPFW2);
+ tmp = intel_uncore_read(&dev_priv->uncore, DSPFW2);
wm->pipe[PIPE_A].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITEB);
wm->pipe[PIPE_A].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORA);
wm->pipe[PIPE_A].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEA);
- tmp = I915_READ(DSPFW3);
+ tmp = intel_uncore_read(&dev_priv->uncore, DSPFW3);
wm->sr.cursor = _FW_WM(tmp, CURSOR_SR);
if (IS_CHERRYVIEW(dev_priv)) {
- tmp = I915_READ(DSPFW7_CHV);
+ tmp = intel_uncore_read(&dev_priv->uncore, DSPFW7_CHV);
wm->pipe[PIPE_B].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITED);
wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEC);
- tmp = I915_READ(DSPFW8_CHV);
+ tmp = intel_uncore_read(&dev_priv->uncore, DSPFW8_CHV);
wm->pipe[PIPE_C].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITEF);
wm->pipe[PIPE_C].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEE);
- tmp = I915_READ(DSPFW9_CHV);
+ tmp = intel_uncore_read(&dev_priv->uncore, DSPFW9_CHV);
wm->pipe[PIPE_C].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEC);
wm->pipe[PIPE_C].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORC);
- tmp = I915_READ(DSPHOWM);
+ tmp = intel_uncore_read(&dev_priv->uncore, DSPHOWM);
wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9;
wm->pipe[PIPE_C].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITEF_HI) << 8;
wm->pipe[PIPE_C].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEE_HI) << 8;
@@ -6403,11 +6365,11 @@ static void vlv_read_wm_values(struct drm_i915_private *dev_priv,
wm->pipe[PIPE_A].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEA_HI) << 8;
wm->pipe[PIPE_A].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEA_HI) << 8;
} else {
- tmp = I915_READ(DSPFW7);
+ tmp = intel_uncore_read(&dev_priv->uncore, DSPFW7);
wm->pipe[PIPE_B].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITED);
wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEC);
- tmp = I915_READ(DSPHOWM);
+ tmp = intel_uncore_read(&dev_priv->uncore, DSPHOWM);
wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9;
wm->pipe[PIPE_B].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITED_HI) << 8;
wm->pipe[PIPE_B].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEC_HI) << 8;
@@ -6428,7 +6390,7 @@ void g4x_wm_get_hw_state(struct drm_i915_private *dev_priv)
g4x_read_wm_values(dev_priv, wm);
- wm->cxsr = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
+ wm->cxsr = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF) & FW_BLC_SELF_EN;
for_each_intel_crtc(&dev_priv->drm, crtc) {
struct intel_crtc_state *crtc_state =
@@ -6572,7 +6534,7 @@ void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv)
vlv_read_wm_values(dev_priv, wm);
- wm->cxsr = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
+ wm->cxsr = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
wm->level = VLV_WM_LEVEL_PM2;
if (IS_CHERRYVIEW(dev_priv)) {
@@ -6719,9 +6681,9 @@ void vlv_wm_sanitize(struct drm_i915_private *dev_priv)
*/
static void ilk_init_lp_watermarks(struct drm_i915_private *dev_priv)
{
- I915_WRITE(WM3_LP_ILK, I915_READ(WM3_LP_ILK) & ~WM1_LP_SR_EN);
- I915_WRITE(WM2_LP_ILK, I915_READ(WM2_LP_ILK) & ~WM1_LP_SR_EN);
- I915_WRITE(WM1_LP_ILK, I915_READ(WM1_LP_ILK) & ~WM1_LP_SR_EN);
+ intel_uncore_write(&dev_priv->uncore, WM3_LP_ILK, intel_uncore_read(&dev_priv->uncore, WM3_LP_ILK) & ~WM1_LP_SR_EN);
+ intel_uncore_write(&dev_priv->uncore, WM2_LP_ILK, intel_uncore_read(&dev_priv->uncore, WM2_LP_ILK) & ~WM1_LP_SR_EN);
+ intel_uncore_write(&dev_priv->uncore, WM1_LP_ILK, intel_uncore_read(&dev_priv->uncore, WM1_LP_ILK) & ~WM1_LP_SR_EN);
/*
* Don't touch WM1S_LP_EN here.
@@ -6739,25 +6701,25 @@ void ilk_wm_get_hw_state(struct drm_i915_private *dev_priv)
for_each_intel_crtc(&dev_priv->drm, crtc)
ilk_pipe_wm_get_hw_state(crtc);
- hw->wm_lp[0] = I915_READ(WM1_LP_ILK);
- hw->wm_lp[1] = I915_READ(WM2_LP_ILK);
- hw->wm_lp[2] = I915_READ(WM3_LP_ILK);
+ hw->wm_lp[0] = intel_uncore_read(&dev_priv->uncore, WM1_LP_ILK);
+ hw->wm_lp[1] = intel_uncore_read(&dev_priv->uncore, WM2_LP_ILK);
+ hw->wm_lp[2] = intel_uncore_read(&dev_priv->uncore, WM3_LP_ILK);
- hw->wm_lp_spr[0] = I915_READ(WM1S_LP_ILK);
+ hw->wm_lp_spr[0] = intel_uncore_read(&dev_priv->uncore, WM1S_LP_ILK);
if (INTEL_GEN(dev_priv) >= 7) {
- hw->wm_lp_spr[1] = I915_READ(WM2S_LP_IVB);
- hw->wm_lp_spr[2] = I915_READ(WM3S_LP_IVB);
+ hw->wm_lp_spr[1] = intel_uncore_read(&dev_priv->uncore, WM2S_LP_IVB);
+ hw->wm_lp_spr[2] = intel_uncore_read(&dev_priv->uncore, WM3S_LP_IVB);
}
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
- hw->partitioning = (I915_READ(WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ?
+ hw->partitioning = (intel_uncore_read(&dev_priv->uncore, WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ?
INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
else if (IS_IVYBRIDGE(dev_priv))
- hw->partitioning = (I915_READ(DISP_ARB_CTL2) & DISP_DATA_PARTITION_5_6) ?
+ hw->partitioning = (intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL2) & DISP_DATA_PARTITION_5_6) ?
INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
hw->enable_fbc_wm =
- !(I915_READ(DISP_ARB_CTL) & DISP_FBC_WM_DIS);
+ !(intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL) & DISP_FBC_WM_DIS);
}
/**
@@ -6808,14 +6770,14 @@ void intel_enable_ipc(struct drm_i915_private *dev_priv)
if (!HAS_IPC(dev_priv))
return;
- val = I915_READ(DISP_ARB_CTL2);
+ val = intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL2);
if (dev_priv->ipc_enabled)
val |= DISP_IPC_ENABLE;
else
val &= ~DISP_IPC_ENABLE;
- I915_WRITE(DISP_ARB_CTL2, val);
+ intel_uncore_write(&dev_priv->uncore, DISP_ARB_CTL2, val);
}
static bool intel_can_enable_ipc(struct drm_i915_private *dev_priv)
@@ -6850,7 +6812,7 @@ static void ibx_init_clock_gating(struct drm_i915_private *dev_priv)
* gating for the panel power sequencer or it will fail to
* start up when no ports are active.
*/
- I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
+ intel_uncore_write(&dev_priv->uncore, SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
}
static void g4x_disable_trickle_feed(struct drm_i915_private *dev_priv)
@@ -6858,12 +6820,12 @@ static void g4x_disable_trickle_feed(struct drm_i915_private *dev_priv)
enum pipe pipe;
for_each_pipe(dev_priv, pipe) {
- I915_WRITE(DSPCNTR(pipe),
- I915_READ(DSPCNTR(pipe)) |
+ intel_uncore_write(&dev_priv->uncore, DSPCNTR(pipe),
+ intel_uncore_read(&dev_priv->uncore, DSPCNTR(pipe)) |
DISPPLANE_TRICKLE_FEED_DISABLE);
- I915_WRITE(DSPSURF(pipe), I915_READ(DSPSURF(pipe)));
- POSTING_READ(DSPSURF(pipe));
+ intel_uncore_write(&dev_priv->uncore, DSPSURF(pipe), intel_uncore_read(&dev_priv->uncore, DSPSURF(pipe)));
+ intel_uncore_posting_read(&dev_priv->uncore, DSPSURF(pipe));
}
}
@@ -6879,10 +6841,10 @@ static void ilk_init_clock_gating(struct drm_i915_private *dev_priv)
ILK_DPFCUNIT_CLOCK_GATE_DISABLE |
ILK_DPFDUNIT_CLOCK_GATE_ENABLE;
- I915_WRITE(PCH_3DCGDIS0,
+ intel_uncore_write(&dev_priv->uncore, PCH_3DCGDIS0,
MARIUNIT_CLOCK_GATE_DISABLE |
SVSMUNIT_CLOCK_GATE_DISABLE);
- I915_WRITE(PCH_3DCGDIS1,
+ intel_uncore_write(&dev_priv->uncore, PCH_3DCGDIS1,
VFMUNIT_CLOCK_GATE_DISABLE);
/*
@@ -6892,12 +6854,12 @@ static void ilk_init_clock_gating(struct drm_i915_private *dev_priv)
* The bit 5 of 0x42020
* The bit 15 of 0x45000
*/
- I915_WRITE(ILK_DISPLAY_CHICKEN2,
- (I915_READ(ILK_DISPLAY_CHICKEN2) |
+ intel_uncore_write(&dev_priv->uncore, ILK_DISPLAY_CHICKEN2,
+ (intel_uncore_read(&dev_priv->uncore, ILK_DISPLAY_CHICKEN2) |
ILK_DPARB_GATE | ILK_VSDPFD_FULL));
dspclk_gate |= ILK_DPARBUNIT_CLOCK_GATE_ENABLE;
- I915_WRITE(DISP_ARB_CTL,
- (I915_READ(DISP_ARB_CTL) |
+ intel_uncore_write(&dev_priv->uncore, DISP_ARB_CTL,
+ (intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL) |
DISP_FBC_WM_DIS));
/*
@@ -6909,18 +6871,18 @@ static void ilk_init_clock_gating(struct drm_i915_private *dev_priv)
*/
if (IS_IRONLAKE_M(dev_priv)) {
/* WaFbcAsynchFlipDisableFbcQueue:ilk */
- I915_WRITE(ILK_DISPLAY_CHICKEN1,
- I915_READ(ILK_DISPLAY_CHICKEN1) |
+ intel_uncore_write(&dev_priv->uncore, ILK_DISPLAY_CHICKEN1,
+ intel_uncore_read(&dev_priv->uncore, ILK_DISPLAY_CHICKEN1) |
ILK_FBCQ_DIS);
- I915_WRITE(ILK_DISPLAY_CHICKEN2,
- I915_READ(ILK_DISPLAY_CHICKEN2) |
+ intel_uncore_write(&dev_priv->uncore, ILK_DISPLAY_CHICKEN2,
+ intel_uncore_read(&dev_priv->uncore, ILK_DISPLAY_CHICKEN2) |
ILK_DPARB_GATE);
}
- I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
+ intel_uncore_write(&dev_priv->uncore, ILK_DSPCLK_GATE_D, dspclk_gate);
- I915_WRITE(ILK_DISPLAY_CHICKEN2,
- I915_READ(ILK_DISPLAY_CHICKEN2) |
+ intel_uncore_write(&dev_priv->uncore, ILK_DISPLAY_CHICKEN2,
+ intel_uncore_read(&dev_priv->uncore, ILK_DISPLAY_CHICKEN2) |
ILK_ELPIN_409_SELECT);
g4x_disable_trickle_feed(dev_priv);
@@ -6938,27 +6900,27 @@ static void cpt_init_clock_gating(struct drm_i915_private *dev_priv)
* gating for the panel power sequencer or it will fail to
* start up when no ports are active.
*/
- I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE |
+ intel_uncore_write(&dev_priv->uncore, SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE |
PCH_DPLUNIT_CLOCK_GATE_DISABLE |
PCH_CPUNIT_CLOCK_GATE_DISABLE);
- I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
+ intel_uncore_write(&dev_priv->uncore, SOUTH_CHICKEN2, intel_uncore_read(&dev_priv->uncore, SOUTH_CHICKEN2) |
DPLS_EDP_PPS_FIX_DIS);
/* The below fixes the weird display corruption, a few pixels shifted
* downward, on (only) LVDS of some HP laptops with IVY.
*/
for_each_pipe(dev_priv, pipe) {
- val = I915_READ(TRANS_CHICKEN2(pipe));
+ val = intel_uncore_read(&dev_priv->uncore, TRANS_CHICKEN2(pipe));
val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
val &= ~TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
if (dev_priv->vbt.fdi_rx_polarity_inverted)
val |= TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER;
val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH;
- I915_WRITE(TRANS_CHICKEN2(pipe), val);
+ intel_uncore_write(&dev_priv->uncore, TRANS_CHICKEN2(pipe), val);
}
/* WADP0ClockGatingDisable */
for_each_pipe(dev_priv, pipe) {
- I915_WRITE(TRANS_CHICKEN1(pipe),
+ intel_uncore_write(&dev_priv->uncore, TRANS_CHICKEN1(pipe),
TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
}
}
@@ -6967,7 +6929,7 @@ static void gen6_check_mch_setup(struct drm_i915_private *dev_priv)
{
u32 tmp;
- tmp = I915_READ(MCH_SSKPD);
+ tmp = intel_uncore_read(&dev_priv->uncore, MCH_SSKPD);
if ((tmp & MCH_SSKPD_WM0_MASK) != MCH_SSKPD_WM0_VAL)
drm_dbg_kms(&dev_priv->drm,
"Wrong MCH_SSKPD value: 0x%08x This can cause underruns.\n",
@@ -6978,14 +6940,14 @@ static void gen6_init_clock_gating(struct drm_i915_private *dev_priv)
{
u32 dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
- I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
+ intel_uncore_write(&dev_priv->uncore, ILK_DSPCLK_GATE_D, dspclk_gate);
- I915_WRITE(ILK_DISPLAY_CHICKEN2,
- I915_READ(ILK_DISPLAY_CHICKEN2) |
+ intel_uncore_write(&dev_priv->uncore, ILK_DISPLAY_CHICKEN2,
+ intel_uncore_read(&dev_priv->uncore, ILK_DISPLAY_CHICKEN2) |
ILK_ELPIN_409_SELECT);
- I915_WRITE(GEN6_UCGCTL1,
- I915_READ(GEN6_UCGCTL1) |
+ intel_uncore_write(&dev_priv->uncore, GEN6_UCGCTL1,
+ intel_uncore_read(&dev_priv->uncore, GEN6_UCGCTL1) |
GEN6_BLBUNIT_CLOCK_GATE_DISABLE |
GEN6_CSUNIT_CLOCK_GATE_DISABLE);
@@ -7002,7 +6964,7 @@ static void gen6_init_clock_gating(struct drm_i915_private *dev_priv)
* WaDisableRCCUnitClockGating:snb
* WaDisableRCPBUnitClockGating:snb
*/
- I915_WRITE(GEN6_UCGCTL2,
+ intel_uncore_write(&dev_priv->uncore, GEN6_UCGCTL2,
GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
@@ -7017,14 +6979,14 @@ static void gen6_init_clock_gating(struct drm_i915_private *dev_priv)
*
* WaFbcAsynchFlipDisableFbcQueue:snb
*/
- I915_WRITE(ILK_DISPLAY_CHICKEN1,
- I915_READ(ILK_DISPLAY_CHICKEN1) |
+ intel_uncore_write(&dev_priv->uncore, ILK_DISPLAY_CHICKEN1,
+ intel_uncore_read(&dev_priv->uncore, ILK_DISPLAY_CHICKEN1) |
ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS);
- I915_WRITE(ILK_DISPLAY_CHICKEN2,
- I915_READ(ILK_DISPLAY_CHICKEN2) |
+ intel_uncore_write(&dev_priv->uncore, ILK_DISPLAY_CHICKEN2,
+ intel_uncore_read(&dev_priv->uncore, ILK_DISPLAY_CHICKEN2) |
ILK_DPARB_GATE | ILK_VSDPFD_FULL);
- I915_WRITE(ILK_DSPCLK_GATE_D,
- I915_READ(ILK_DSPCLK_GATE_D) |
+ intel_uncore_write(&dev_priv->uncore, ILK_DSPCLK_GATE_D,
+ intel_uncore_read(&dev_priv->uncore, ILK_DSPCLK_GATE_D) |
ILK_DPARBUNIT_CLOCK_GATE_ENABLE |
ILK_DPFDUNIT_CLOCK_GATE_ENABLE);
@@ -7042,23 +7004,23 @@ static void lpt_init_clock_gating(struct drm_i915_private *dev_priv)
* disabled when not needed anymore in order to save power.
*/
if (HAS_PCH_LPT_LP(dev_priv))
- I915_WRITE(SOUTH_DSPCLK_GATE_D,
- I915_READ(SOUTH_DSPCLK_GATE_D) |
+ intel_uncore_write(&dev_priv->uncore, SOUTH_DSPCLK_GATE_D,
+ intel_uncore_read(&dev_priv->uncore, SOUTH_DSPCLK_GATE_D) |
PCH_LP_PARTITION_LEVEL_DISABLE);
/* WADPOClockGatingDisable:hsw */
- I915_WRITE(TRANS_CHICKEN1(PIPE_A),
- I915_READ(TRANS_CHICKEN1(PIPE_A)) |
+ intel_uncore_write(&dev_priv->uncore, TRANS_CHICKEN1(PIPE_A),
+ intel_uncore_read(&dev_priv->uncore, TRANS_CHICKEN1(PIPE_A)) |
TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
}
static void lpt_suspend_hw(struct drm_i915_private *dev_priv)
{
if (HAS_PCH_LPT_LP(dev_priv)) {
- u32 val = I915_READ(SOUTH_DSPCLK_GATE_D);
+ u32 val = intel_uncore_read(&dev_priv->uncore, SOUTH_DSPCLK_GATE_D);
val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
- I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
+ intel_uncore_write(&dev_priv->uncore, SOUTH_DSPCLK_GATE_D, val);
}
}
@@ -7070,60 +7032,62 @@ static void gen8_set_l3sqc_credits(struct drm_i915_private *dev_priv,
u32 val;
/* WaTempDisableDOPClkGating:bdw */
- misccpctl = I915_READ(GEN7_MISCCPCTL);
- I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
+ misccpctl = intel_uncore_read(&dev_priv->uncore, GEN7_MISCCPCTL);
+ intel_uncore_write(&dev_priv->uncore, GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
- val = I915_READ(GEN8_L3SQCREG1);
+ val = intel_uncore_read(&dev_priv->uncore, GEN8_L3SQCREG1);
val &= ~L3_PRIO_CREDITS_MASK;
val |= L3_GENERAL_PRIO_CREDITS(general_prio_credits);
val |= L3_HIGH_PRIO_CREDITS(high_prio_credits);
- I915_WRITE(GEN8_L3SQCREG1, val);
+ intel_uncore_write(&dev_priv->uncore, GEN8_L3SQCREG1, val);
/*
* Wait at least 100 clocks before re-enabling clock gating.
* See the definition of L3SQCREG1 in BSpec.
*/
- POSTING_READ(GEN8_L3SQCREG1);
+ intel_uncore_posting_read(&dev_priv->uncore, GEN8_L3SQCREG1);
udelay(1);
- I915_WRITE(GEN7_MISCCPCTL, misccpctl);
+ intel_uncore_write(&dev_priv->uncore, GEN7_MISCCPCTL, misccpctl);
}
static void icl_init_clock_gating(struct drm_i915_private *dev_priv)
{
/* Wa_1409120013:icl,ehl */
- I915_WRITE(ILK_DPFC_CHICKEN,
+ intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN,
ILK_DPFC_CHICKEN_COMP_DUMMY_PIXEL);
/* This is not an Wa. Enable to reduce Sampler power */
- I915_WRITE(GEN10_DFR_RATIO_EN_AND_CHICKEN,
- I915_READ(GEN10_DFR_RATIO_EN_AND_CHICKEN) & ~DFR_DISABLE);
+ intel_uncore_write(&dev_priv->uncore, GEN10_DFR_RATIO_EN_AND_CHICKEN,
+ intel_uncore_read(&dev_priv->uncore, GEN10_DFR_RATIO_EN_AND_CHICKEN) & ~DFR_DISABLE);
/*Wa_14010594013:icl, ehl */
intel_uncore_rmw(&dev_priv->uncore, GEN8_CHICKEN_DCPR_1,
0, CNL_DELAY_PMRSP);
}
-static void tgl_init_clock_gating(struct drm_i915_private *dev_priv)
+static void gen12lp_init_clock_gating(struct drm_i915_private *dev_priv)
{
- /* Wa_1409120013:tgl */
- I915_WRITE(ILK_DPFC_CHICKEN,
- ILK_DPFC_CHICKEN_COMP_DUMMY_PIXEL);
+ /* Wa_1409120013:tgl,rkl,adl_s,dg1 */
+ intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN,
+ ILK_DPFC_CHICKEN_COMP_DUMMY_PIXEL);
/* Wa_1409825376:tgl (pre-prod)*/
if (IS_TGL_DISP_REVID(dev_priv, TGL_REVID_A0, TGL_REVID_B1))
- I915_WRITE(GEN9_CLKGATE_DIS_3, I915_READ(GEN9_CLKGATE_DIS_3) |
+ intel_uncore_write(&dev_priv->uncore, GEN9_CLKGATE_DIS_3, intel_uncore_read(&dev_priv->uncore, GEN9_CLKGATE_DIS_3) |
TGL_VRH_GATING_DIS);
- /* Wa_14011059788:tgl */
+ /* Wa_14011059788:tgl,rkl,adl_s,dg1 */
intel_uncore_rmw(&dev_priv->uncore, GEN10_DFR_RATIO_EN_AND_CHICKEN,
0, DFR_DISABLE);
}
static void dg1_init_clock_gating(struct drm_i915_private *dev_priv)
{
+ gen12lp_init_clock_gating(dev_priv);
+
/* Wa_1409836686:dg1[a0] */
if (IS_DG1_REVID(dev_priv, DG1_REVID_A0, DG1_REVID_A0))
- I915_WRITE(GEN9_CLKGATE_DIS_3, I915_READ(GEN9_CLKGATE_DIS_3) |
+ intel_uncore_write(&dev_priv->uncore, GEN9_CLKGATE_DIS_3, intel_uncore_read(&dev_priv->uncore, GEN9_CLKGATE_DIS_3) |
DPT_GATING_DIS);
}
@@ -7133,7 +7097,7 @@ static void cnp_init_clock_gating(struct drm_i915_private *dev_priv)
return;
/* Display WA #1181 WaSouthDisplayDisablePWMCGEGating: cnp */
- I915_WRITE(SOUTH_DSPCLK_GATE_D, I915_READ(SOUTH_DSPCLK_GATE_D) |
+ intel_uncore_write(&dev_priv->uncore, SOUTH_DSPCLK_GATE_D, intel_uncore_read(&dev_priv->uncore, SOUTH_DSPCLK_GATE_D) |
CNP_PWM_CGE_GATING_DISABLE);
}
@@ -7143,35 +7107,35 @@ static void cnl_init_clock_gating(struct drm_i915_private *dev_priv)
cnp_init_clock_gating(dev_priv);
/* This is not an Wa. Enable for better image quality */
- I915_WRITE(_3D_CHICKEN3,
+ intel_uncore_write(&dev_priv->uncore, _3D_CHICKEN3,
_MASKED_BIT_ENABLE(_3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE));
/* WaEnableChickenDCPR:cnl */
- I915_WRITE(GEN8_CHICKEN_DCPR_1,
- I915_READ(GEN8_CHICKEN_DCPR_1) | MASK_WAKEMEM);
+ intel_uncore_write(&dev_priv->uncore, GEN8_CHICKEN_DCPR_1,
+ intel_uncore_read(&dev_priv->uncore, GEN8_CHICKEN_DCPR_1) | MASK_WAKEMEM);
/*
* WaFbcWakeMemOn:cnl
* Display WA #0859: cnl
*/
- I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
+ intel_uncore_write(&dev_priv->uncore, DISP_ARB_CTL, intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL) |
DISP_FBC_MEMORY_WAKE);
- val = I915_READ(SLICE_UNIT_LEVEL_CLKGATE);
+ val = intel_uncore_read(&dev_priv->uncore, SLICE_UNIT_LEVEL_CLKGATE);
/* ReadHitWriteOnlyDisable:cnl */
val |= RCCUNIT_CLKGATE_DIS;
- I915_WRITE(SLICE_UNIT_LEVEL_CLKGATE, val);
+ intel_uncore_write(&dev_priv->uncore, SLICE_UNIT_LEVEL_CLKGATE, val);
/* Wa_2201832410:cnl */
- val = I915_READ(SUBSLICE_UNIT_LEVEL_CLKGATE);
+ val = intel_uncore_read(&dev_priv->uncore, SUBSLICE_UNIT_LEVEL_CLKGATE);
val |= GWUNIT_CLKGATE_DIS;
- I915_WRITE(SUBSLICE_UNIT_LEVEL_CLKGATE, val);
+ intel_uncore_write(&dev_priv->uncore, SUBSLICE_UNIT_LEVEL_CLKGATE, val);
/* WaDisableVFclkgate:cnl */
/* WaVFUnitClockGatingDisable:cnl */
- val = I915_READ(UNSLICE_UNIT_LEVEL_CLKGATE);
+ val = intel_uncore_read(&dev_priv->uncore, UNSLICE_UNIT_LEVEL_CLKGATE);
val |= VFUNIT_CLKGATE_DIS;
- I915_WRITE(UNSLICE_UNIT_LEVEL_CLKGATE, val);
+ intel_uncore_write(&dev_priv->uncore, UNSLICE_UNIT_LEVEL_CLKGATE, val);
}
static void cfl_init_clock_gating(struct drm_i915_private *dev_priv)
@@ -7180,21 +7144,21 @@ static void cfl_init_clock_gating(struct drm_i915_private *dev_priv)
gen9_init_clock_gating(dev_priv);
/* WAC6entrylatency:cfl */
- I915_WRITE(FBC_LLC_READ_CTRL, I915_READ(FBC_LLC_READ_CTRL) |
+ intel_uncore_write(&dev_priv->uncore, FBC_LLC_READ_CTRL, intel_uncore_read(&dev_priv->uncore, FBC_LLC_READ_CTRL) |
FBC_LLC_FULLY_OPEN);
/*
* WaFbcTurnOffFbcWatermark:cfl
* Display WA #0562: cfl
*/
- I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
+ intel_uncore_write(&dev_priv->uncore, DISP_ARB_CTL, intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL) |
DISP_FBC_WM_DIS);
/*
* WaFbcNukeOnHostModify:cfl
* Display WA #0873: cfl
*/
- I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
+ intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN, intel_uncore_read(&dev_priv->uncore, ILK_DPFC_CHICKEN) |
ILK_DPFC_NUKE_ON_ANY_MODIFICATION);
}
@@ -7203,31 +7167,31 @@ static void kbl_init_clock_gating(struct drm_i915_private *dev_priv)
gen9_init_clock_gating(dev_priv);
/* WAC6entrylatency:kbl */
- I915_WRITE(FBC_LLC_READ_CTRL, I915_READ(FBC_LLC_READ_CTRL) |
+ intel_uncore_write(&dev_priv->uncore, FBC_LLC_READ_CTRL, intel_uncore_read(&dev_priv->uncore, FBC_LLC_READ_CTRL) |
FBC_LLC_FULLY_OPEN);
/* WaDisableSDEUnitClockGating:kbl */
if (IS_KBL_GT_REVID(dev_priv, 0, KBL_REVID_B0))
- I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
+ intel_uncore_write(&dev_priv->uncore, GEN8_UCGCTL6, intel_uncore_read(&dev_priv->uncore, GEN8_UCGCTL6) |
GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
/* WaDisableGamClockGating:kbl */
if (IS_KBL_GT_REVID(dev_priv, 0, KBL_REVID_B0))
- I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) |
+ intel_uncore_write(&dev_priv->uncore, GEN6_UCGCTL1, intel_uncore_read(&dev_priv->uncore, GEN6_UCGCTL1) |
GEN6_GAMUNIT_CLOCK_GATE_DISABLE);
/*
* WaFbcTurnOffFbcWatermark:kbl
* Display WA #0562: kbl
*/
- I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
+ intel_uncore_write(&dev_priv->uncore, DISP_ARB_CTL, intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL) |
DISP_FBC_WM_DIS);
/*
* WaFbcNukeOnHostModify:kbl
* Display WA #0873: kbl
*/
- I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
+ intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN, intel_uncore_read(&dev_priv->uncore, ILK_DPFC_CHICKEN) |
ILK_DPFC_NUKE_ON_ANY_MODIFICATION);
}
@@ -7236,32 +7200,32 @@ static void skl_init_clock_gating(struct drm_i915_private *dev_priv)
gen9_init_clock_gating(dev_priv);
/* WaDisableDopClockGating:skl */
- I915_WRITE(GEN7_MISCCPCTL, I915_READ(GEN7_MISCCPCTL) &
+ intel_uncore_write(&dev_priv->uncore, GEN7_MISCCPCTL, intel_uncore_read(&dev_priv->uncore, GEN7_MISCCPCTL) &
~GEN7_DOP_CLOCK_GATE_ENABLE);
/* WAC6entrylatency:skl */
- I915_WRITE(FBC_LLC_READ_CTRL, I915_READ(FBC_LLC_READ_CTRL) |
+ intel_uncore_write(&dev_priv->uncore, FBC_LLC_READ_CTRL, intel_uncore_read(&dev_priv->uncore, FBC_LLC_READ_CTRL) |
FBC_LLC_FULLY_OPEN);
/*
* WaFbcTurnOffFbcWatermark:skl
* Display WA #0562: skl
*/
- I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
+ intel_uncore_write(&dev_priv->uncore, DISP_ARB_CTL, intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL) |
DISP_FBC_WM_DIS);
/*
* WaFbcNukeOnHostModify:skl
* Display WA #0873: skl
*/
- I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
+ intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN, intel_uncore_read(&dev_priv->uncore, ILK_DPFC_CHICKEN) |
ILK_DPFC_NUKE_ON_ANY_MODIFICATION);
/*
* WaFbcHighMemBwCorruptionAvoidance:skl
* Display WA #0883: skl
*/
- I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
+ intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN, intel_uncore_read(&dev_priv->uncore, ILK_DPFC_CHICKEN) |
ILK_DPFC_DISABLE_DUMMY0);
}
@@ -7270,42 +7234,42 @@ static void bdw_init_clock_gating(struct drm_i915_private *dev_priv)
enum pipe pipe;
/* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */
- I915_WRITE(CHICKEN_PIPESL_1(PIPE_A),
- I915_READ(CHICKEN_PIPESL_1(PIPE_A)) |
+ intel_uncore_write(&dev_priv->uncore, CHICKEN_PIPESL_1(PIPE_A),
+ intel_uncore_read(&dev_priv->uncore, CHICKEN_PIPESL_1(PIPE_A)) |
HSW_FBCQ_DIS);
/* WaSwitchSolVfFArbitrationPriority:bdw */
- I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
+ intel_uncore_write(&dev_priv->uncore, GAM_ECOCHK, intel_uncore_read(&dev_priv->uncore, GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
/* WaPsrDPAMaskVBlankInSRD:bdw */
- I915_WRITE(CHICKEN_PAR1_1,
- I915_READ(CHICKEN_PAR1_1) | DPA_MASK_VBLANK_SRD);
+ intel_uncore_write(&dev_priv->uncore, CHICKEN_PAR1_1,
+ intel_uncore_read(&dev_priv->uncore, CHICKEN_PAR1_1) | DPA_MASK_VBLANK_SRD);
/* WaPsrDPRSUnmaskVBlankInSRD:bdw */
for_each_pipe(dev_priv, pipe) {
- I915_WRITE(CHICKEN_PIPESL_1(pipe),
- I915_READ(CHICKEN_PIPESL_1(pipe)) |
+ intel_uncore_write(&dev_priv->uncore, CHICKEN_PIPESL_1(pipe),
+ intel_uncore_read(&dev_priv->uncore, CHICKEN_PIPESL_1(pipe)) |
BDW_DPRS_MASK_VBLANK_SRD);
}
/* WaVSRefCountFullforceMissDisable:bdw */
/* WaDSRefCountFullforceMissDisable:bdw */
- I915_WRITE(GEN7_FF_THREAD_MODE,
- I915_READ(GEN7_FF_THREAD_MODE) &
+ intel_uncore_write(&dev_priv->uncore, GEN7_FF_THREAD_MODE,
+ intel_uncore_read(&dev_priv->uncore, GEN7_FF_THREAD_MODE) &
~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME));
- I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL,
+ intel_uncore_write(&dev_priv->uncore, GEN6_RC_SLEEP_PSMI_CONTROL,
_MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE));
/* WaDisableSDEUnitClockGating:bdw */
- I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
+ intel_uncore_write(&dev_priv->uncore, GEN8_UCGCTL6, intel_uncore_read(&dev_priv->uncore, GEN8_UCGCTL6) |
GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
/* WaProgramL3SqcReg1Default:bdw */
gen8_set_l3sqc_credits(dev_priv, 30, 2);
/* WaKVMNotificationOnConfigChange:bdw */
- I915_WRITE(CHICKEN_PAR2_1, I915_READ(CHICKEN_PAR2_1)
+ intel_uncore_write(&dev_priv->uncore, CHICKEN_PAR2_1, intel_uncore_read(&dev_priv->uncore, CHICKEN_PAR2_1)
| KVM_CONFIG_CHANGE_NOTIFICATION_SELECT);
lpt_init_clock_gating(dev_priv);
@@ -7315,24 +7279,24 @@ static void bdw_init_clock_gating(struct drm_i915_private *dev_priv)
* Also see the CHICKEN2 write in bdw_init_workarounds() to disable DOP
* clock gating.
*/
- I915_WRITE(GEN6_UCGCTL1,
- I915_READ(GEN6_UCGCTL1) | GEN6_EU_TCUNIT_CLOCK_GATE_DISABLE);
+ intel_uncore_write(&dev_priv->uncore, GEN6_UCGCTL1,
+ intel_uncore_read(&dev_priv->uncore, GEN6_UCGCTL1) | GEN6_EU_TCUNIT_CLOCK_GATE_DISABLE);
}
static void hsw_init_clock_gating(struct drm_i915_private *dev_priv)
{
/* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */
- I915_WRITE(CHICKEN_PIPESL_1(PIPE_A),
- I915_READ(CHICKEN_PIPESL_1(PIPE_A)) |
+ intel_uncore_write(&dev_priv->uncore, CHICKEN_PIPESL_1(PIPE_A),
+ intel_uncore_read(&dev_priv->uncore, CHICKEN_PIPESL_1(PIPE_A)) |
HSW_FBCQ_DIS);
/* This is required by WaCatErrorRejectionIssue:hsw */
- I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
- I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
+ intel_uncore_write(&dev_priv->uncore, GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
+ intel_uncore_read(&dev_priv->uncore, GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
/* WaSwitchSolVfFArbitrationPriority:hsw */
- I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
+ intel_uncore_write(&dev_priv->uncore, GAM_ECOCHK, intel_uncore_read(&dev_priv->uncore, GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
lpt_init_clock_gating(dev_priv);
}
@@ -7341,26 +7305,26 @@ static void ivb_init_clock_gating(struct drm_i915_private *dev_priv)
{
u32 snpcr;
- I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE);
+ intel_uncore_write(&dev_priv->uncore, ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE);
/* WaFbcAsynchFlipDisableFbcQueue:ivb */
- I915_WRITE(ILK_DISPLAY_CHICKEN1,
- I915_READ(ILK_DISPLAY_CHICKEN1) |
+ intel_uncore_write(&dev_priv->uncore, ILK_DISPLAY_CHICKEN1,
+ intel_uncore_read(&dev_priv->uncore, ILK_DISPLAY_CHICKEN1) |
ILK_FBCQ_DIS);
/* WaDisableBackToBackFlipFix:ivb */
- I915_WRITE(IVB_CHICKEN3,
+ intel_uncore_write(&dev_priv->uncore, IVB_CHICKEN3,
CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
CHICKEN3_DGMG_DONE_FIX_DISABLE);
if (IS_IVB_GT1(dev_priv))
- I915_WRITE(GEN7_ROW_CHICKEN2,
+ intel_uncore_write(&dev_priv->uncore, GEN7_ROW_CHICKEN2,
_MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
else {
/* must write both registers */
- I915_WRITE(GEN7_ROW_CHICKEN2,
+ intel_uncore_write(&dev_priv->uncore, GEN7_ROW_CHICKEN2,
_MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
- I915_WRITE(GEN7_ROW_CHICKEN2_GT2,
+ intel_uncore_write(&dev_priv->uncore, GEN7_ROW_CHICKEN2_GT2,
_MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
}
@@ -7368,20 +7332,20 @@ static void ivb_init_clock_gating(struct drm_i915_private *dev_priv)
* According to the spec, bit 13 (RCZUNIT) must be set on IVB.
* This implements the WaDisableRCZUnitClockGating:ivb workaround.
*/
- I915_WRITE(GEN6_UCGCTL2,
+ intel_uncore_write(&dev_priv->uncore, GEN6_UCGCTL2,
GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
/* This is required by WaCatErrorRejectionIssue:ivb */
- I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
- I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
+ intel_uncore_write(&dev_priv->uncore, GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
+ intel_uncore_read(&dev_priv->uncore, GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
g4x_disable_trickle_feed(dev_priv);
- snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
+ snpcr = intel_uncore_read(&dev_priv->uncore, GEN6_MBCUNIT_SNPCR);
snpcr &= ~GEN6_MBC_SNPCR_MASK;
snpcr |= GEN6_MBC_SNPCR_MED;
- I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
+ intel_uncore_write(&dev_priv->uncore, GEN6_MBCUNIT_SNPCR, snpcr);
if (!HAS_PCH_NOP(dev_priv))
cpt_init_clock_gating(dev_priv);
@@ -7392,58 +7356,58 @@ static void ivb_init_clock_gating(struct drm_i915_private *dev_priv)
static void vlv_init_clock_gating(struct drm_i915_private *dev_priv)
{
/* WaDisableBackToBackFlipFix:vlv */
- I915_WRITE(IVB_CHICKEN3,
+ intel_uncore_write(&dev_priv->uncore, IVB_CHICKEN3,
CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
CHICKEN3_DGMG_DONE_FIX_DISABLE);
/* WaDisableDopClockGating:vlv */
- I915_WRITE(GEN7_ROW_CHICKEN2,
+ intel_uncore_write(&dev_priv->uncore, GEN7_ROW_CHICKEN2,
_MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
/* This is required by WaCatErrorRejectionIssue:vlv */
- I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
- I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
+ intel_uncore_write(&dev_priv->uncore, GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
+ intel_uncore_read(&dev_priv->uncore, GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
/*
* According to the spec, bit 13 (RCZUNIT) must be set on IVB.
* This implements the WaDisableRCZUnitClockGating:vlv workaround.
*/
- I915_WRITE(GEN6_UCGCTL2,
+ intel_uncore_write(&dev_priv->uncore, GEN6_UCGCTL2,
GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
/* WaDisableL3Bank2xClockGate:vlv
* Disabling L3 clock gating- MMIO 940c[25] = 1
* Set bit 25, to disable L3_BANK_2x_CLK_GATING */
- I915_WRITE(GEN7_UCGCTL4,
- I915_READ(GEN7_UCGCTL4) | GEN7_L3BANK2X_CLOCK_GATE_DISABLE);
+ intel_uncore_write(&dev_priv->uncore, GEN7_UCGCTL4,
+ intel_uncore_read(&dev_priv->uncore, GEN7_UCGCTL4) | GEN7_L3BANK2X_CLOCK_GATE_DISABLE);
/*
* WaDisableVLVClockGating_VBIIssue:vlv
* Disable clock gating on th GCFG unit to prevent a delay
* in the reporting of vblank events.
*/
- I915_WRITE(VLV_GUNIT_CLOCK_GATE, GCFG_DIS);
+ intel_uncore_write(&dev_priv->uncore, VLV_GUNIT_CLOCK_GATE, GCFG_DIS);
}
static void chv_init_clock_gating(struct drm_i915_private *dev_priv)
{
/* WaVSRefCountFullforceMissDisable:chv */
/* WaDSRefCountFullforceMissDisable:chv */
- I915_WRITE(GEN7_FF_THREAD_MODE,
- I915_READ(GEN7_FF_THREAD_MODE) &
+ intel_uncore_write(&dev_priv->uncore, GEN7_FF_THREAD_MODE,
+ intel_uncore_read(&dev_priv->uncore, GEN7_FF_THREAD_MODE) &
~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME));
/* WaDisableSemaphoreAndSyncFlipWait:chv */
- I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL,
+ intel_uncore_write(&dev_priv->uncore, GEN6_RC_SLEEP_PSMI_CONTROL,
_MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE));
/* WaDisableCSUnitClockGating:chv */
- I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) |
+ intel_uncore_write(&dev_priv->uncore, GEN6_UCGCTL1, intel_uncore_read(&dev_priv->uncore, GEN6_UCGCTL1) |
GEN6_CSUNIT_CLOCK_GATE_DISABLE);
/* WaDisableSDEUnitClockGating:chv */
- I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
+ intel_uncore_write(&dev_priv->uncore, GEN8_UCGCTL6, intel_uncore_read(&dev_priv->uncore, GEN8_UCGCTL6) |
GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
/*
@@ -7458,17 +7422,17 @@ static void g4x_init_clock_gating(struct drm_i915_private *dev_priv)
{
u32 dspclk_gate;
- I915_WRITE(RENCLK_GATE_D1, 0);
- I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE |
+ intel_uncore_write(&dev_priv->uncore, RENCLK_GATE_D1, 0);
+ intel_uncore_write(&dev_priv->uncore, RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE |
GS_UNIT_CLOCK_GATE_DISABLE |
CL_UNIT_CLOCK_GATE_DISABLE);
- I915_WRITE(RAMCLK_GATE_D, 0);
+ intel_uncore_write(&dev_priv->uncore, RAMCLK_GATE_D, 0);
dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE |
OVRUNIT_CLOCK_GATE_DISABLE |
OVCUNIT_CLOCK_GATE_DISABLE;
if (IS_GM45(dev_priv))
dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
- I915_WRITE(DSPCLK_GATE_D, dspclk_gate);
+ intel_uncore_write(&dev_priv->uncore, DSPCLK_GATE_D, dspclk_gate);
g4x_disable_trickle_feed(dev_priv);
}
@@ -7489,49 +7453,49 @@ static void i965gm_init_clock_gating(struct drm_i915_private *dev_priv)
static void i965g_init_clock_gating(struct drm_i915_private *dev_priv)
{
- I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
+ intel_uncore_write(&dev_priv->uncore, RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
I965_RCC_CLOCK_GATE_DISABLE |
I965_RCPB_CLOCK_GATE_DISABLE |
I965_ISC_CLOCK_GATE_DISABLE |
I965_FBC_CLOCK_GATE_DISABLE);
- I915_WRITE(RENCLK_GATE_D2, 0);
- I915_WRITE(MI_ARB_STATE,
+ intel_uncore_write(&dev_priv->uncore, RENCLK_GATE_D2, 0);
+ intel_uncore_write(&dev_priv->uncore, MI_ARB_STATE,
_MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
}
static void gen3_init_clock_gating(struct drm_i915_private *dev_priv)
{
- u32 dstate = I915_READ(D_STATE);
+ u32 dstate = intel_uncore_read(&dev_priv->uncore, D_STATE);
dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
DSTATE_DOT_CLOCK_GATING;
- I915_WRITE(D_STATE, dstate);
+ intel_uncore_write(&dev_priv->uncore, D_STATE, dstate);
if (IS_PINEVIEW(dev_priv))
- I915_WRITE(ECOSKPD, _MASKED_BIT_ENABLE(ECO_GATING_CX_ONLY));
+ intel_uncore_write(&dev_priv->uncore, ECOSKPD, _MASKED_BIT_ENABLE(ECO_GATING_CX_ONLY));
/* IIR "flip pending" means done if this bit is set */
- I915_WRITE(ECOSKPD, _MASKED_BIT_DISABLE(ECO_FLIP_DONE));
+ intel_uncore_write(&dev_priv->uncore, ECOSKPD, _MASKED_BIT_DISABLE(ECO_FLIP_DONE));
/* interrupts should cause a wake up from C3 */
- I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_INT_EN));
+ intel_uncore_write(&dev_priv->uncore, INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_INT_EN));
/* On GEN3 we really need to make sure the ARB C3 LP bit is set */
- I915_WRITE(MI_ARB_STATE, _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
+ intel_uncore_write(&dev_priv->uncore, MI_ARB_STATE, _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
- I915_WRITE(MI_ARB_STATE,
+ intel_uncore_write(&dev_priv->uncore, MI_ARB_STATE,
_MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
}
static void i85x_init_clock_gating(struct drm_i915_private *dev_priv)
{
- I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
+ intel_uncore_write(&dev_priv->uncore, RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
/* interrupts should cause a wake up from C3 */
- I915_WRITE(MI_STATE, _MASKED_BIT_ENABLE(MI_AGPBUSY_INT_EN) |
+ intel_uncore_write(&dev_priv->uncore, MI_STATE, _MASKED_BIT_ENABLE(MI_AGPBUSY_INT_EN) |
_MASKED_BIT_DISABLE(MI_AGPBUSY_830_MODE));
- I915_WRITE(MEM_MODE,
+ intel_uncore_write(&dev_priv->uncore, MEM_MODE,
_MASKED_BIT_ENABLE(MEM_DISPLAY_TRICKLE_FEED_DISABLE));
/*
@@ -7541,13 +7505,13 @@ static void i85x_init_clock_gating(struct drm_i915_private *dev_priv)
* abosultely nothing) would not allow FBC to recompress
* until a 2D blit occurs.
*/
- I915_WRITE(SCPD0,
+ intel_uncore_write(&dev_priv->uncore, SCPD0,
_MASKED_BIT_ENABLE(SCPD_FBC_IGNORE_3D));
}
static void i830_init_clock_gating(struct drm_i915_private *dev_priv)
{
- I915_WRITE(MEM_MODE,
+ intel_uncore_write(&dev_priv->uncore, MEM_MODE,
_MASKED_BIT_ENABLE(MEM_DISPLAY_A_TRICKLE_FEED_DISABLE) |
_MASKED_BIT_ENABLE(MEM_DISPLAY_B_TRICKLE_FEED_DISABLE));
}
@@ -7583,7 +7547,7 @@ void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv)
if (IS_DG1(dev_priv))
dev_priv->display.init_clock_gating = dg1_init_clock_gating;
else if (IS_GEN(dev_priv, 12))
- dev_priv->display.init_clock_gating = tgl_init_clock_gating;
+ dev_priv->display.init_clock_gating = gen12lp_init_clock_gating;
else if (IS_GEN(dev_priv, 11))
dev_priv->display.init_clock_gating = icl_init_clock_gating;
else if (IS_CANNONLAKE(dev_priv))
diff --git a/drivers/gpu/drm/i915/intel_pm.h b/drivers/gpu/drm/i915/intel_pm.h
index eab83e251dd5..97550cf0b6df 100644
--- a/drivers/gpu/drm/i915/intel_pm.h
+++ b/drivers/gpu/drm/i915/intel_pm.h
@@ -9,8 +9,10 @@
#include <linux/types.h>
#include "display/intel_bw.h"
+#include "display/intel_display.h"
#include "display/intel_global_state.h"
+#include "i915_drv.h"
#include "i915_reg.h"
struct drm_device;
@@ -40,7 +42,6 @@ void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc,
struct skl_ddb_entry *ddb_y,
struct skl_ddb_entry *ddb_uv);
void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv);
-u16 intel_get_ddb_size(struct drm_i915_private *dev_priv);
u32 skl_ddb_dbuf_slice_mask(struct drm_i915_private *dev_priv,
const struct skl_ddb_entry *entry);
void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc,
@@ -69,6 +70,10 @@ bool intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable);
struct intel_dbuf_state {
struct intel_global_state base;
+ struct skl_ddb_entry ddb[I915_MAX_PIPES];
+ unsigned int weight[I915_MAX_PIPES];
+ u8 slices[I915_MAX_PIPES];
+
u8 enabled_slices;
u8 active_pipes;
};
diff --git a/drivers/gpu/drm/i915/intel_sideband.c b/drivers/gpu/drm/i915/intel_sideband.c
index 02ebf5a04a9b..0ec0cf191955 100644
--- a/drivers/gpu/drm/i915/intel_sideband.c
+++ b/drivers/gpu/drm/i915/intel_sideband.c
@@ -404,8 +404,8 @@ static int __sandybridge_pcode_rw(struct drm_i915_private *i915,
lockdep_assert_held(&i915->sb_lock);
/*
- * GEN6_PCODE_* are outside of the forcewake domain, we can
- * use te fw I915_READ variants to reduce the amount of work
+ * GEN6_PCODE_* are outside of the forcewake domain, we can use
+ * intel_uncore_read/write_fw variants to reduce the amount of work
* required when reading/writing.
*/
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 1c14a07eba7d..9ac501bcfdad 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -2070,7 +2070,7 @@ int i915_reg_read_ioctl(struct drm_device *dev,
* This routine waits until the target register @reg contains the expected
* @value after applying the @mask, i.e. it waits until ::
*
- * (I915_READ_FW(reg) & mask) == value
+ * (intel_uncore_read_fw(uncore, reg) & mask) == value
*
* Otherwise, the wait will timeout after @slow_timeout_ms milliseconds.
* For atomic context @slow_timeout_ms must be zero and @fast_timeout_us
@@ -2126,7 +2126,7 @@ int __intel_wait_for_register_fw(struct intel_uncore *uncore,
* This routine waits until the target register @reg contains the expected
* @value after applying the @mask, i.e. it waits until ::
*
- * (I915_READ(reg) & mask) == value
+ * (intel_uncore_read(uncore, reg) & mask) == value
*
* Otherwise, the wait will timeout after @timeout_ms milliseconds.
*
diff --git a/drivers/gpu/drm/i915/intel_uncore.h b/drivers/gpu/drm/i915/intel_uncore.h
index bd2467284295..59f0da8f1fbb 100644
--- a/drivers/gpu/drm/i915/intel_uncore.h
+++ b/drivers/gpu/drm/i915/intel_uncore.h
@@ -216,7 +216,7 @@ void intel_uncore_forcewake_flush(struct intel_uncore *uncore,
/*
* Like above but the caller must manage the uncore.lock itself.
- * Must be used with I915_READ_FW and friends.
+ * Must be used with intel_uncore_read_fw() and friends.
*/
void intel_uncore_forcewake_get__locked(struct intel_uncore *uncore,
enum forcewake_domains domains);
@@ -318,8 +318,8 @@ __uncore_write(write_notrace, 32, l, false)
* will be implemented using 2 32-bit writes in an arbitrary order with
* an arbitrary delay between them. This can cause the hardware to
* act upon the intermediate value, possibly leading to corruption and
- * machine death. For this reason we do not support I915_WRITE64, or
- * uncore->funcs.mmio_writeq.
+ * machine death. For this reason we do not support intel_uncore_write64,
+ * or uncore->funcs.mmio_writeq.
*
* When reading a 64-bit value as two 32-bit values, the delay may cause
* the two reads to mismatch, e.g. a timestamp overflowing. Also note that
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
index f88473d396f4..f99bb0113726 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
@@ -38,8 +38,8 @@ static void quirk_add(struct drm_i915_gem_object *obj,
struct list_head *objects)
{
/* quirk is only for live tiled objects, use it to declare ownership */
- GEM_BUG_ON(obj->mm.quirked);
- obj->mm.quirked = true;
+ GEM_BUG_ON(i915_gem_object_has_tiling_quirk(obj));
+ i915_gem_object_set_tiling_quirk(obj);
list_add(&obj->st_link, objects);
}
@@ -85,7 +85,7 @@ static void unpin_ggtt(struct i915_ggtt *ggtt)
struct i915_vma *vma;
list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link)
- if (vma->obj->mm.quirked)
+ if (i915_gem_object_has_tiling_quirk(vma->obj))
i915_vma_unpin(vma);
}
@@ -94,8 +94,8 @@ static void cleanup_objects(struct i915_ggtt *ggtt, struct list_head *list)
struct drm_i915_gem_object *obj, *on;
list_for_each_entry_safe(obj, on, list, st_link) {
- GEM_BUG_ON(!obj->mm.quirked);
- obj->mm.quirked = false;
+ GEM_BUG_ON(!i915_gem_object_has_tiling_quirk(obj));
+ i915_gem_object_set_tiling_quirk(obj);
i915_gem_object_put(obj);
}
@@ -442,28 +442,22 @@ static int igt_evict_contexts(void *arg)
/* Overfill the GGTT with context objects and so try to evict one. */
for_each_engine(engine, gt, id) {
struct i915_sw_fence fence;
- struct file *file;
-
- file = mock_file(i915);
- if (IS_ERR(file)) {
- err = PTR_ERR(file);
- break;
- }
count = 0;
onstack_fence_init(&fence);
do {
+ struct intel_context *ce;
struct i915_request *rq;
- struct i915_gem_context *ctx;
- ctx = live_context(i915, file);
- if (IS_ERR(ctx))
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce))
break;
/* We will need some GGTT space for the rq's context */
igt_evict_ctl.fail_if_busy = true;
- rq = igt_request_alloc(ctx, engine);
+ rq = intel_context_create_request(ce);
igt_evict_ctl.fail_if_busy = false;
+ intel_context_put(ce);
if (IS_ERR(rq)) {
/* When full, fail_if_busy will trigger EBUSY */
@@ -490,8 +484,6 @@ static int igt_evict_contexts(void *arg)
onstack_fence_fini(&fence);
pr_info("Submitted %lu contexts/requests on %s\n",
count, engine->name);
-
- fput(file);
if (err)
break;
}
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
index c53a222e3dec..70e07e9b78c2 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
@@ -28,6 +28,7 @@
#include "gem/i915_gem_context.h"
#include "gem/selftests/mock_context.h"
#include "gt/intel_context.h"
+#include "gt/intel_gpu_commands.h"
#include "i915_random.h"
#include "i915_selftest.h"
diff --git a/drivers/gpu/drm/i915/selftests/i915_perf.c b/drivers/gpu/drm/i915/selftests/i915_perf.c
index debbac660519..e9d86dab8677 100644
--- a/drivers/gpu/drm/i915/selftests/i915_perf.c
+++ b/drivers/gpu/drm/i915/selftests/i915_perf.c
@@ -262,7 +262,7 @@ static int live_noa_delay(void *arg)
delay = intel_read_status_page(stream->engine, 0x102);
delay -= intel_read_status_page(stream->engine, 0x100);
- delay = i915_cs_timestamp_ticks_to_ns(i915, delay);
+ delay = intel_gt_clock_interval_to_ns(stream->engine->gt, delay);
pr_info("GPU delay: %uns, expected %lluns\n",
delay, expected);
diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c
index e424a6d1a68c..d2a678a2497e 100644
--- a/drivers/gpu/drm/i915/selftests/i915_request.c
+++ b/drivers/gpu/drm/i915/selftests/i915_request.c
@@ -33,6 +33,7 @@
#include "gt/intel_engine_pm.h"
#include "gt/intel_engine_user.h"
#include "gt/intel_gt.h"
+#include "gt/intel_gt_clock_utils.h"
#include "gt/intel_gt_requests.h"
#include "gt/selftest_engine_heartbeat.h"
@@ -1560,7 +1561,7 @@ static u32 trifilter(u32 *a)
static u64 cycles_to_ns(struct intel_engine_cs *engine, u32 cycles)
{
- u64 ns = i915_cs_timestamp_ticks_to_ns(engine->i915, cycles);
+ u64 ns = intel_gt_clock_interval_to_ns(engine->gt, cycles);
return DIV_ROUND_CLOSEST(ns, 1 << TF_BIAS);
}
@@ -1932,9 +1933,7 @@ static int measure_inter_request(struct intel_context *ce)
intel_ring_advance(rq, cs);
i915_request_add(rq);
}
- local_bh_disable();
i915_sw_fence_commit(submit);
- local_bh_enable();
intel_engine_flush_submission(ce->engine);
heap_fence_put(submit);
@@ -2220,11 +2219,9 @@ static int measure_completion(struct intel_context *ce)
intel_ring_advance(rq, cs);
dma_fence_add_callback(&rq->fence, &cb.base, signal_cb);
-
- local_bh_disable();
i915_request_add(rq);
- local_bh_enable();
+ intel_engine_flush_submission(ce->engine);
if (wait_for(READ_ONCE(sema[i]) == -1, 50)) {
err = -EIO;
goto err;
diff --git a/drivers/gpu/drm/i915/selftests/igt_spinner.c b/drivers/gpu/drm/i915/selftests/igt_spinner.c
index ec0ecb4e4ca6..83f6e5f31fb3 100644
--- a/drivers/gpu/drm/i915/selftests/igt_spinner.c
+++ b/drivers/gpu/drm/i915/selftests/igt_spinner.c
@@ -3,6 +3,7 @@
*
* Copyright © 2018 Intel Corporation
*/
+#include "gt/intel_gpu_commands.h"
#include "gt/intel_gt.h"
#include "gem/selftests/igt_gem_utils.h"
@@ -219,6 +220,9 @@ void igt_spinner_fini(struct igt_spinner *spin)
bool igt_wait_for_spinner(struct igt_spinner *spin, struct i915_request *rq)
{
+ if (i915_request_is_ready(rq))
+ intel_engine_flush_submission(rq->engine);
+
return !(wait_for_us(i915_seqno_passed(hws_seqno(spin, rq),
rq->fence.seqno),
100) &&
diff --git a/drivers/gpu/drm/i915/selftests/intel_memory_region.c b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
index 0aeba8e3af28..ce7adfa3bca0 100644
--- a/drivers/gpu/drm/i915/selftests/intel_memory_region.c
+++ b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
@@ -129,6 +129,21 @@ static void igt_object_release(struct drm_i915_gem_object *obj)
i915_gem_object_put(obj);
}
+static bool is_contiguous(struct drm_i915_gem_object *obj)
+{
+ struct scatterlist *sg;
+ dma_addr_t addr = -1;
+
+ for (sg = obj->mm.pages->sgl; sg; sg = sg_next(sg)) {
+ if (addr != -1 && sg_dma_address(sg) != addr)
+ return false;
+
+ addr = sg_dma_address(sg) + sg_dma_len(sg);
+ }
+
+ return true;
+}
+
static int igt_mock_contiguous(void *arg)
{
struct intel_memory_region *mem = arg;
@@ -150,8 +165,8 @@ static int igt_mock_contiguous(void *arg)
if (IS_ERR(obj))
return PTR_ERR(obj);
- if (obj->mm.pages->nents != 1) {
- pr_err("%s min object spans multiple sg entries\n", __func__);
+ if (!is_contiguous(obj)) {
+ pr_err("%s min object spans disjoint sg entries\n", __func__);
err = -EINVAL;
goto err_close_objects;
}
@@ -163,8 +178,8 @@ static int igt_mock_contiguous(void *arg)
if (IS_ERR(obj))
return PTR_ERR(obj);
- if (obj->mm.pages->nents != 1) {
- pr_err("%s max object spans multiple sg entries\n", __func__);
+ if (!is_contiguous(obj)) {
+ pr_err("%s max object spans disjoint sg entries\n", __func__);
err = -EINVAL;
goto err_close_objects;
}
@@ -189,8 +204,8 @@ static int igt_mock_contiguous(void *arg)
goto err_close_objects;
}
- if (obj->mm.pages->nents != 1) {
- pr_err("%s object spans multiple sg entries\n", __func__);
+ if (!is_contiguous(obj)) {
+ pr_err("%s object spans disjoint sg entries\n", __func__);
err = -EINVAL;
goto err_close_objects;
}
@@ -337,6 +352,68 @@ out_put:
return err;
}
+#ifndef SZ_8G
+#define SZ_8G BIT_ULL(33)
+#endif
+
+static int igt_mock_max_segment(void *arg)
+{
+ const unsigned int max_segment = i915_sg_segment_size();
+ struct intel_memory_region *mem = arg;
+ struct drm_i915_private *i915 = mem->i915;
+ struct drm_i915_gem_object *obj;
+ struct i915_buddy_block *block;
+ struct scatterlist *sg;
+ LIST_HEAD(objects);
+ u64 size;
+ int err = 0;
+
+ /*
+ * While we may create very large contiguous blocks, we may need
+ * to break those down for consumption elsewhere. In particular,
+ * dma-mapping with scatterlist elements have an implicit limit of
+ * UINT_MAX on each element.
+ */
+
+ size = SZ_8G;
+ mem = mock_region_create(i915, 0, size, PAGE_SIZE, 0);
+ if (IS_ERR(mem))
+ return PTR_ERR(mem);
+
+ obj = igt_object_create(mem, &objects, size, 0);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ goto out_put;
+ }
+
+ size = 0;
+ list_for_each_entry(block, &obj->mm.blocks, link) {
+ if (i915_buddy_block_size(&mem->mm, block) > size)
+ size = i915_buddy_block_size(&mem->mm, block);
+ }
+ if (size < max_segment) {
+ pr_err("%s: Failed to create a huge contiguous block [> %u], largest block %lld\n",
+ __func__, max_segment, size);
+ err = -EINVAL;
+ goto out_close;
+ }
+
+ for (sg = obj->mm.pages->sgl; sg; sg = sg_next(sg)) {
+ if (sg->length > max_segment) {
+ pr_err("%s: Created an oversized scatterlist entry, %u > %u\n",
+ __func__, sg->length, max_segment);
+ err = -EINVAL;
+ goto out_close;
+ }
+ }
+
+out_close:
+ close_objects(mem, &objects);
+out_put:
+ intel_memory_region_put(mem);
+ return err;
+}
+
static int igt_gpu_write_dw(struct intel_context *ce,
struct i915_vma *vma,
u32 dword,
@@ -775,14 +852,22 @@ static int _perf_memcpy(struct intel_memory_region *src_mr,
}
sort(t, ARRAY_SIZE(t), sizeof(*t), wrap_ktime_compare, NULL);
+ if (t[0] <= 0) {
+ /* ignore the impossible to protect our sanity */
+ pr_debug("Skipping %s src(%s, %s) -> dst(%s, %s) %14s %4lluKiB copy, unstable measurement [%lld, %lld]\n",
+ __func__,
+ src_mr->name, repr_type(src_type),
+ dst_mr->name, repr_type(dst_type),
+ tests[i].name, size >> 10,
+ t[0], t[4]);
+ continue;
+ }
+
pr_info("%s src(%s, %s) -> dst(%s, %s) %14s %4llu KiB copy: %5lld MiB/s\n",
__func__,
- src_mr->name,
- repr_type(src_type),
- dst_mr->name,
- repr_type(dst_type),
- tests[i].name,
- size >> 10,
+ src_mr->name, repr_type(src_type),
+ dst_mr->name, repr_type(dst_type),
+ tests[i].name, size >> 10,
div64_u64(mul_u32_u32(4 * size,
1000 * 1000 * 1000),
t[1] + 2 * t[2] + t[3]) >> 20);
@@ -848,6 +933,7 @@ int intel_memory_region_mock_selftests(void)
SUBTEST(igt_mock_fill),
SUBTEST(igt_mock_contiguous),
SUBTEST(igt_mock_splintered_region),
+ SUBTEST(igt_mock_max_segment),
};
struct intel_memory_region *mem;
struct drm_i915_private *i915;
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
index e946bd2087d8..0188f877cab2 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
@@ -64,8 +64,6 @@ static void mock_device_release(struct drm_device *dev)
mock_device_flush(i915);
intel_gt_driver_remove(&i915->gt);
- i915_gem_driver_release__contexts(i915);
-
i915_gem_drain_workqueue(i915);
i915_gem_drain_freed_objects(i915);
diff --git a/drivers/gpu/drm/i915/selftests/mock_region.c b/drivers/gpu/drm/i915/selftests/mock_region.c
index 979d96f27c43..3c6021415274 100644
--- a/drivers/gpu/drm/i915/selftests/mock_region.c
+++ b/drivers/gpu/drm/i915/selftests/mock_region.c
@@ -15,21 +15,16 @@ static const struct drm_i915_gem_object_ops mock_region_obj_ops = {
.release = i915_gem_object_release_memory_region,
};
-static struct drm_i915_gem_object *
-mock_object_create(struct intel_memory_region *mem,
- resource_size_t size,
- unsigned int flags)
+static int mock_object_init(struct intel_memory_region *mem,
+ struct drm_i915_gem_object *obj,
+ resource_size_t size,
+ unsigned int flags)
{
static struct lock_class_key lock_class;
struct drm_i915_private *i915 = mem->i915;
- struct drm_i915_gem_object *obj;
if (size > mem->mm.size)
- return ERR_PTR(-E2BIG);
-
- obj = i915_gem_object_alloc();
- if (!obj)
- return ERR_PTR(-ENOMEM);
+ return -E2BIG;
drm_gem_private_object_init(&i915->drm, &obj->base, size);
i915_gem_object_init(obj, &mock_region_obj_ops, &lock_class);
@@ -40,13 +35,13 @@ mock_object_create(struct intel_memory_region *mem,
i915_gem_object_init_memory_region(obj, mem, flags);
- return obj;
+ return 0;
}
static const struct intel_memory_region_ops mock_region_ops = {
.init = intel_memory_region_init_buddy,
.release = intel_memory_region_release_buddy,
- .create_object = mock_object_create,
+ .init_object = mock_object_init,
};
struct intel_memory_region *
diff --git a/drivers/gpu/drm/imx/Kconfig b/drivers/gpu/drm/imx/Kconfig
index 6231048aa5aa..b5fa0e45a839 100644
--- a/drivers/gpu/drm/imx/Kconfig
+++ b/drivers/gpu/drm/imx/Kconfig
@@ -28,6 +28,7 @@ config DRM_IMX_TVE
config DRM_IMX_LDB
tristate "Support for LVDS displays"
depends on DRM_IMX && MFD_SYSCON
+ depends on COMMON_CLK
select DRM_PANEL
help
Choose this to enable the internal LVDS Display Bridge (LDB)
@@ -36,7 +37,7 @@ config DRM_IMX_LDB
config DRM_IMX_HDMI
tristate "Freescale i.MX DRM HDMI"
select DRM_DW_HDMI
- depends on DRM_IMX
+ depends on DRM_IMX && OF
help
Choose this if you want to use HDMI on i.MX6.
diff --git a/drivers/gpu/drm/imx/dw_hdmi-imx.c b/drivers/gpu/drm/imx/dw_hdmi-imx.c
index d07b39b8afd2..87428fb23d9f 100644
--- a/drivers/gpu/drm/imx/dw_hdmi-imx.c
+++ b/drivers/gpu/drm/imx/dw_hdmi-imx.c
@@ -15,23 +15,32 @@
#include <drm/bridge/dw_hdmi.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_edid.h>
#include <drm/drm_encoder.h>
+#include <drm/drm_managed.h>
#include <drm/drm_of.h>
#include <drm/drm_simple_kms_helper.h>
#include "imx-drm.h"
+struct imx_hdmi;
+
+struct imx_hdmi_encoder {
+ struct drm_encoder encoder;
+ struct imx_hdmi *hdmi;
+};
+
struct imx_hdmi {
struct device *dev;
- struct drm_encoder encoder;
+ struct drm_bridge *bridge;
struct dw_hdmi *hdmi;
struct regmap *regmap;
};
static inline struct imx_hdmi *enc_to_imx_hdmi(struct drm_encoder *e)
{
- return container_of(e, struct imx_hdmi, encoder);
+ return container_of(e, struct imx_hdmi_encoder, encoder)->hdmi;
}
static const struct dw_hdmi_mpll_config imx_mpll_cfg[] = {
@@ -98,19 +107,6 @@ static const struct dw_hdmi_phy_config imx_phy_config[] = {
{ ~0UL, 0x0000, 0x0000, 0x0000}
};
-static int dw_hdmi_imx_parse_dt(struct imx_hdmi *hdmi)
-{
- struct device_node *np = hdmi->dev->of_node;
-
- hdmi->regmap = syscon_regmap_lookup_by_phandle(np, "gpr");
- if (IS_ERR(hdmi->regmap)) {
- dev_err(hdmi->dev, "Unable to get gpr\n");
- return PTR_ERR(hdmi->regmap);
- }
-
- return 0;
-}
-
static void dw_hdmi_imx_encoder_enable(struct drm_encoder *encoder)
{
struct imx_hdmi *hdmi = enc_to_imx_hdmi(encoder);
@@ -195,65 +191,36 @@ MODULE_DEVICE_TABLE(of, dw_hdmi_imx_dt_ids);
static int dw_hdmi_imx_bind(struct device *dev, struct device *master,
void *data)
{
- struct platform_device *pdev = to_platform_device(dev);
- const struct dw_hdmi_plat_data *plat_data;
- const struct of_device_id *match;
struct drm_device *drm = data;
+ struct imx_hdmi_encoder *hdmi_encoder;
struct drm_encoder *encoder;
- struct imx_hdmi *hdmi;
int ret;
- if (!pdev->dev.of_node)
- return -ENODEV;
+ hdmi_encoder = drmm_simple_encoder_alloc(drm, struct imx_hdmi_encoder,
+ encoder, DRM_MODE_ENCODER_TMDS);
+ if (IS_ERR(hdmi_encoder))
+ return PTR_ERR(hdmi_encoder);
- hdmi = dev_get_drvdata(dev);
- memset(hdmi, 0, sizeof(*hdmi));
-
- match = of_match_node(dw_hdmi_imx_dt_ids, pdev->dev.of_node);
- plat_data = match->data;
- hdmi->dev = &pdev->dev;
- encoder = &hdmi->encoder;
+ hdmi_encoder->hdmi = dev_get_drvdata(dev);
+ encoder = &hdmi_encoder->encoder;
ret = imx_drm_encoder_parse_of(drm, encoder, dev->of_node);
if (ret)
return ret;
- ret = dw_hdmi_imx_parse_dt(hdmi);
- if (ret < 0)
- return ret;
-
drm_encoder_helper_add(encoder, &dw_hdmi_imx_encoder_helper_funcs);
- drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_TMDS);
-
- hdmi->hdmi = dw_hdmi_bind(pdev, encoder, plat_data);
- /*
- * If dw_hdmi_bind() fails we'll never call dw_hdmi_unbind(),
- * which would have called the encoder cleanup. Do it manually.
- */
- if (IS_ERR(hdmi->hdmi)) {
- ret = PTR_ERR(hdmi->hdmi);
- drm_encoder_cleanup(encoder);
- }
-
- return ret;
-}
-
-static void dw_hdmi_imx_unbind(struct device *dev, struct device *master,
- void *data)
-{
- struct imx_hdmi *hdmi = dev_get_drvdata(dev);
-
- dw_hdmi_unbind(hdmi->hdmi);
+ return drm_bridge_attach(encoder, hdmi_encoder->hdmi->bridge, NULL, 0);
}
static const struct component_ops dw_hdmi_imx_ops = {
.bind = dw_hdmi_imx_bind,
- .unbind = dw_hdmi_imx_unbind,
};
static int dw_hdmi_imx_probe(struct platform_device *pdev)
{
+ struct device_node *np = pdev->dev.of_node;
+ const struct of_device_id *match = of_match_node(dw_hdmi_imx_dt_ids, np);
struct imx_hdmi *hdmi;
hdmi = devm_kzalloc(&pdev->dev, sizeof(*hdmi), GFP_KERNEL);
@@ -261,13 +228,33 @@ static int dw_hdmi_imx_probe(struct platform_device *pdev)
return -ENOMEM;
platform_set_drvdata(pdev, hdmi);
+ hdmi->dev = &pdev->dev;
+
+ hdmi->regmap = syscon_regmap_lookup_by_phandle(np, "gpr");
+ if (IS_ERR(hdmi->regmap)) {
+ dev_err(hdmi->dev, "Unable to get gpr\n");
+ return PTR_ERR(hdmi->regmap);
+ }
+
+ hdmi->hdmi = dw_hdmi_probe(pdev, match->data);
+ if (IS_ERR(hdmi->hdmi))
+ return PTR_ERR(hdmi->hdmi);
+
+ hdmi->bridge = of_drm_find_bridge(np);
+ if (!hdmi->bridge) {
+ dev_err(hdmi->dev, "Unable to find bridge\n");
+ return -ENODEV;
+ }
return component_add(&pdev->dev, &dw_hdmi_imx_ops);
}
static int dw_hdmi_imx_remove(struct platform_device *pdev)
{
+ struct imx_hdmi *hdmi = platform_get_drvdata(pdev);
+
component_del(&pdev->dev, &dw_hdmi_imx_ops);
+ dw_hdmi_remove(hdmi->hdmi);
return 0;
}
diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c
index 41e2978cb1eb..dbfe39e2f7f6 100644
--- a/drivers/gpu/drm/imx/imx-ldb.c
+++ b/drivers/gpu/drm/imx/imx-ldb.c
@@ -22,6 +22,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
#include <drm/drm_fb_helper.h>
+#include <drm/drm_managed.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
#include <drm/drm_print.h>
@@ -47,12 +48,18 @@
#define LDB_DI1_VS_POL_ACT_LOW (1 << 10)
#define LDB_BGREF_RMODE_INT (1 << 15)
+struct imx_ldb_channel;
+
+struct imx_ldb_encoder {
+ struct drm_connector connector;
+ struct drm_encoder encoder;
+ struct imx_ldb_channel *channel;
+};
+
struct imx_ldb;
struct imx_ldb_channel {
struct imx_ldb *ldb;
- struct drm_connector connector;
- struct drm_encoder encoder;
/* Defines what is connected to the ldb, only one at a time */
struct drm_panel *panel;
@@ -70,12 +77,12 @@ struct imx_ldb_channel {
static inline struct imx_ldb_channel *con_to_imx_ldb_ch(struct drm_connector *c)
{
- return container_of(c, struct imx_ldb_channel, connector);
+ return container_of(c, struct imx_ldb_encoder, connector)->channel;
}
static inline struct imx_ldb_channel *enc_to_imx_ldb_ch(struct drm_encoder *e)
{
- return container_of(e, struct imx_ldb_channel, encoder);
+ return container_of(e, struct imx_ldb_encoder, encoder)->channel;
}
struct bus_mux {
@@ -411,9 +418,20 @@ static int imx_ldb_register(struct drm_device *drm,
struct imx_ldb_channel *imx_ldb_ch)
{
struct imx_ldb *ldb = imx_ldb_ch->ldb;
- struct drm_encoder *encoder = &imx_ldb_ch->encoder;
+ struct imx_ldb_encoder *ldb_encoder;
+ struct drm_connector *connector;
+ struct drm_encoder *encoder;
int ret;
+ ldb_encoder = drmm_simple_encoder_alloc(drm, struct imx_ldb_encoder,
+ encoder, DRM_MODE_ENCODER_LVDS);
+ if (IS_ERR(ldb_encoder))
+ return PTR_ERR(ldb_encoder);
+
+ ldb_encoder->channel = imx_ldb_ch;
+ connector = &ldb_encoder->connector;
+ encoder = &ldb_encoder->encoder;
+
ret = imx_drm_encoder_parse_of(drm, encoder, imx_ldb_ch->child);
if (ret)
return ret;
@@ -429,11 +447,9 @@ static int imx_ldb_register(struct drm_device *drm,
}
drm_encoder_helper_add(encoder, &imx_ldb_encoder_helper_funcs);
- drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_LVDS);
if (imx_ldb_ch->bridge) {
- ret = drm_bridge_attach(&imx_ldb_ch->encoder,
- imx_ldb_ch->bridge, NULL, 0);
+ ret = drm_bridge_attach(encoder, imx_ldb_ch->bridge, NULL, 0);
if (ret) {
DRM_ERROR("Failed to initialize bridge with drm\n");
return ret;
@@ -445,13 +461,13 @@ static int imx_ldb_register(struct drm_device *drm,
* historical reasons, the ldb driver can also work without
* a panel.
*/
- drm_connector_helper_add(&imx_ldb_ch->connector,
- &imx_ldb_connector_helper_funcs);
- drm_connector_init_with_ddc(drm, &imx_ldb_ch->connector,
+ drm_connector_helper_add(connector,
+ &imx_ldb_connector_helper_funcs);
+ drm_connector_init_with_ddc(drm, connector,
&imx_ldb_connector_funcs,
DRM_MODE_CONNECTOR_LVDS,
imx_ldb_ch->ddc);
- drm_connector_attach_encoder(&imx_ldb_ch->connector, encoder);
+ drm_connector_attach_encoder(connector, encoder);
}
return 0;
@@ -559,17 +575,42 @@ static int imx_ldb_panel_ddc(struct device *dev,
static int imx_ldb_bind(struct device *dev, struct device *master, void *data)
{
struct drm_device *drm = data;
+ struct imx_ldb *imx_ldb = dev_get_drvdata(dev);
+ int ret;
+ int i;
+
+ for (i = 0; i < 2; i++) {
+ struct imx_ldb_channel *channel = &imx_ldb->channel[i];
+
+ if (!channel->ldb)
+ break;
+
+ ret = imx_ldb_register(drm, channel);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct component_ops imx_ldb_ops = {
+ .bind = imx_ldb_bind,
+};
+
+static int imx_ldb_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
- const struct of_device_id *of_id =
- of_match_device(imx_ldb_dt_ids, dev);
+ const struct of_device_id *of_id = of_match_device(imx_ldb_dt_ids, dev);
struct device_node *child;
struct imx_ldb *imx_ldb;
int dual;
int ret;
int i;
- imx_ldb = dev_get_drvdata(dev);
- memset(imx_ldb, 0, sizeof(*imx_ldb));
+ imx_ldb = devm_kzalloc(dev, sizeof(*imx_ldb), GFP_KERNEL);
+ if (!imx_ldb)
+ return -ENOMEM;
imx_ldb->regmap = syscon_regmap_lookup_by_phandle(np, "gpr");
if (IS_ERR(imx_ldb->regmap)) {
@@ -669,25 +710,20 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data)
}
channel->bus_format = bus_format;
channel->child = child;
-
- ret = imx_ldb_register(drm, channel);
- if (ret) {
- channel->child = NULL;
- goto free_child;
- }
}
- return 0;
+ platform_set_drvdata(pdev, imx_ldb);
+
+ return component_add(&pdev->dev, &imx_ldb_ops);
free_child:
of_node_put(child);
return ret;
}
-static void imx_ldb_unbind(struct device *dev, struct device *master,
- void *data)
+static int imx_ldb_remove(struct platform_device *pdev)
{
- struct imx_ldb *imx_ldb = dev_get_drvdata(dev);
+ struct imx_ldb *imx_ldb = platform_get_drvdata(pdev);
int i;
for (i = 0; i < 2; i++) {
@@ -696,28 +732,7 @@ static void imx_ldb_unbind(struct device *dev, struct device *master,
kfree(channel->edid);
i2c_put_adapter(channel->ddc);
}
-}
-
-static const struct component_ops imx_ldb_ops = {
- .bind = imx_ldb_bind,
- .unbind = imx_ldb_unbind,
-};
-
-static int imx_ldb_probe(struct platform_device *pdev)
-{
- struct imx_ldb *imx_ldb;
-
- imx_ldb = devm_kzalloc(&pdev->dev, sizeof(*imx_ldb), GFP_KERNEL);
- if (!imx_ldb)
- return -ENOMEM;
-
- platform_set_drvdata(pdev, imx_ldb);
-
- return component_add(&pdev->dev, &imx_ldb_ops);
-}
-static int imx_ldb_remove(struct platform_device *pdev)
-{
component_del(&pdev->dev, &imx_ldb_ops);
return 0;
}
diff --git a/drivers/gpu/drm/imx/imx-tve.c b/drivers/gpu/drm/imx/imx-tve.c
index 2a8d2e32e7b4..bc8c3f802a15 100644
--- a/drivers/gpu/drm/imx/imx-tve.c
+++ b/drivers/gpu/drm/imx/imx-tve.c
@@ -19,6 +19,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_fb_helper.h>
+#include <drm/drm_managed.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
@@ -99,9 +100,13 @@ enum {
TVE_MODE_VGA,
};
-struct imx_tve {
+struct imx_tve_encoder {
struct drm_connector connector;
struct drm_encoder encoder;
+ struct imx_tve *tve;
+};
+
+struct imx_tve {
struct device *dev;
int mode;
int di_hsync_pin;
@@ -118,12 +123,12 @@ struct imx_tve {
static inline struct imx_tve *con_to_tve(struct drm_connector *c)
{
- return container_of(c, struct imx_tve, connector);
+ return container_of(c, struct imx_tve_encoder, connector)->tve;
}
static inline struct imx_tve *enc_to_tve(struct drm_encoder *e)
{
- return container_of(e, struct imx_tve, encoder);
+ return container_of(e, struct imx_tve_encoder, encoder)->tve;
}
static void tve_enable(struct imx_tve *tve)
@@ -418,7 +423,7 @@ static int tve_clk_init(struct imx_tve *tve, void __iomem *base)
init.parent_names = (const char **)&tve_di_parent;
tve->clk_hw_di.init = &init;
- tve->di_clk = clk_register(tve->dev, &tve->clk_hw_di);
+ tve->di_clk = devm_clk_register(tve->dev, &tve->clk_hw_di);
if (IS_ERR(tve->di_clk)) {
dev_err(tve->dev, "failed to register TVE output clock: %ld\n",
PTR_ERR(tve->di_clk));
@@ -428,33 +433,6 @@ static int tve_clk_init(struct imx_tve *tve, void __iomem *base)
return 0;
}
-static int imx_tve_register(struct drm_device *drm, struct imx_tve *tve)
-{
- int encoder_type;
- int ret;
-
- encoder_type = tve->mode == TVE_MODE_VGA ?
- DRM_MODE_ENCODER_DAC : DRM_MODE_ENCODER_TVDAC;
-
- ret = imx_drm_encoder_parse_of(drm, &tve->encoder, tve->dev->of_node);
- if (ret)
- return ret;
-
- drm_encoder_helper_add(&tve->encoder, &imx_tve_encoder_helper_funcs);
- drm_simple_encoder_init(drm, &tve->encoder, encoder_type);
-
- drm_connector_helper_add(&tve->connector,
- &imx_tve_connector_helper_funcs);
- drm_connector_init_with_ddc(drm, &tve->connector,
- &imx_tve_connector_funcs,
- DRM_MODE_CONNECTOR_VGA,
- tve->ddc);
-
- drm_connector_attach_encoder(&tve->connector, &tve->encoder);
-
- return 0;
-}
-
static void imx_tve_disable_regulator(void *data)
{
struct imx_tve *tve = data;
@@ -502,8 +480,49 @@ static int of_get_tve_mode(struct device_node *np)
static int imx_tve_bind(struct device *dev, struct device *master, void *data)
{
- struct platform_device *pdev = to_platform_device(dev);
struct drm_device *drm = data;
+ struct imx_tve *tve = dev_get_drvdata(dev);
+ struct imx_tve_encoder *tvee;
+ struct drm_encoder *encoder;
+ struct drm_connector *connector;
+ int encoder_type;
+ int ret;
+
+ encoder_type = tve->mode == TVE_MODE_VGA ?
+ DRM_MODE_ENCODER_DAC : DRM_MODE_ENCODER_TVDAC;
+
+ tvee = drmm_simple_encoder_alloc(drm, struct imx_tve_encoder, encoder,
+ encoder_type);
+ if (IS_ERR(tvee))
+ return PTR_ERR(tvee);
+
+ tvee->tve = tve;
+ encoder = &tvee->encoder;
+ connector = &tvee->connector;
+
+ ret = imx_drm_encoder_parse_of(drm, encoder, tve->dev->of_node);
+ if (ret)
+ return ret;
+
+ drm_encoder_helper_add(encoder, &imx_tve_encoder_helper_funcs);
+
+ drm_connector_helper_add(connector, &imx_tve_connector_helper_funcs);
+ ret = drm_connector_init_with_ddc(drm, connector,
+ &imx_tve_connector_funcs,
+ DRM_MODE_CONNECTOR_VGA, tve->ddc);
+ if (ret)
+ return ret;
+
+ return drm_connector_attach_encoder(connector, encoder);
+}
+
+static const struct component_ops imx_tve_ops = {
+ .bind = imx_tve_bind,
+};
+
+static int imx_tve_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
struct device_node *ddc_node;
struct imx_tve *tve;
@@ -513,8 +532,9 @@ static int imx_tve_bind(struct device *dev, struct device *master, void *data)
int irq;
int ret;
- tve = dev_get_drvdata(dev);
- memset(tve, 0, sizeof(*tve));
+ tve = devm_kzalloc(dev, sizeof(*tve), GFP_KERNEL);
+ if (!tve)
+ return -ENOMEM;
tve->dev = dev;
@@ -621,28 +641,9 @@ static int imx_tve_bind(struct device *dev, struct device *master, void *data)
if (ret)
return ret;
- ret = imx_tve_register(drm, tve);
- if (ret)
- return ret;
-
- return 0;
-}
-
-static const struct component_ops imx_tve_ops = {
- .bind = imx_tve_bind,
-};
-
-static int imx_tve_probe(struct platform_device *pdev)
-{
- struct imx_tve *tve;
-
- tve = devm_kzalloc(&pdev->dev, sizeof(*tve), GFP_KERNEL);
- if (!tve)
- return -ENOMEM;
-
platform_set_drvdata(pdev, tve);
- return component_add(&pdev->dev, &imx_tve_ops);
+ return component_add(dev, &imx_tve_ops);
}
static int imx_tve_remove(struct platform_device *pdev)
diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c
index 7ebd99ee3240..e6431a227feb 100644
--- a/drivers/gpu/drm/imx/ipuv3-crtc.c
+++ b/drivers/gpu/drm/imx/ipuv3-crtc.c
@@ -20,6 +20,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_managed.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
@@ -163,7 +164,6 @@ static void ipu_disable_vblank(struct drm_crtc *crtc)
static const struct drm_crtc_funcs ipu_crtc_funcs = {
.set_config = drm_atomic_helper_set_config,
- .destroy = drm_crtc_cleanup,
.page_flip = drm_atomic_helper_page_flip,
.reset = imx_drm_crtc_reset,
.atomic_duplicate_state = imx_drm_crtc_duplicate_state,
@@ -322,73 +322,73 @@ static const struct drm_crtc_helper_funcs ipu_helper_funcs = {
.atomic_enable = ipu_crtc_atomic_enable,
};
-static void ipu_put_resources(struct ipu_crtc *ipu_crtc)
+static void ipu_put_resources(struct drm_device *dev, void *ptr)
{
+ struct ipu_crtc *ipu_crtc = ptr;
+
if (!IS_ERR_OR_NULL(ipu_crtc->dc))
ipu_dc_put(ipu_crtc->dc);
if (!IS_ERR_OR_NULL(ipu_crtc->di))
ipu_di_put(ipu_crtc->di);
}
-static int ipu_get_resources(struct ipu_crtc *ipu_crtc,
- struct ipu_client_platformdata *pdata)
+static int ipu_get_resources(struct drm_device *dev, struct ipu_crtc *ipu_crtc,
+ struct ipu_client_platformdata *pdata)
{
struct ipu_soc *ipu = dev_get_drvdata(ipu_crtc->dev->parent);
int ret;
ipu_crtc->dc = ipu_dc_get(ipu, pdata->dc);
- if (IS_ERR(ipu_crtc->dc)) {
- ret = PTR_ERR(ipu_crtc->dc);
- goto err_out;
- }
+ if (IS_ERR(ipu_crtc->dc))
+ return PTR_ERR(ipu_crtc->dc);
+
+ ret = drmm_add_action_or_reset(dev, ipu_put_resources, ipu_crtc);
+ if (ret)
+ return ret;
ipu_crtc->di = ipu_di_get(ipu, pdata->di);
- if (IS_ERR(ipu_crtc->di)) {
- ret = PTR_ERR(ipu_crtc->di);
- goto err_out;
- }
+ if (IS_ERR(ipu_crtc->di))
+ return PTR_ERR(ipu_crtc->di);
return 0;
-err_out:
- ipu_put_resources(ipu_crtc);
-
- return ret;
}
-static int ipu_crtc_init(struct ipu_crtc *ipu_crtc,
- struct ipu_client_platformdata *pdata, struct drm_device *drm)
+static int ipu_drm_bind(struct device *dev, struct device *master, void *data)
{
- struct ipu_soc *ipu = dev_get_drvdata(ipu_crtc->dev->parent);
- struct drm_crtc *crtc = &ipu_crtc->base;
+ struct ipu_client_platformdata *pdata = dev->platform_data;
+ struct ipu_soc *ipu = dev_get_drvdata(dev->parent);
+ struct drm_device *drm = data;
+ struct ipu_plane *primary_plane;
+ struct ipu_crtc *ipu_crtc;
+ struct drm_crtc *crtc;
int dp = -EINVAL;
int ret;
- ret = ipu_get_resources(ipu_crtc, pdata);
- if (ret) {
- dev_err(ipu_crtc->dev, "getting resources failed with %d.\n",
- ret);
- return ret;
- }
-
if (pdata->dp >= 0)
dp = IPU_DP_FLOW_SYNC_BG;
- ipu_crtc->plane[0] = ipu_plane_init(drm, ipu, pdata->dma[0], dp, 0,
- DRM_PLANE_TYPE_PRIMARY);
- if (IS_ERR(ipu_crtc->plane[0])) {
- ret = PTR_ERR(ipu_crtc->plane[0]);
- goto err_put_resources;
- }
+ primary_plane = ipu_plane_init(drm, ipu, pdata->dma[0], dp, 0,
+ DRM_PLANE_TYPE_PRIMARY);
+ if (IS_ERR(primary_plane))
+ return PTR_ERR(primary_plane);
+
+ ipu_crtc = drmm_crtc_alloc_with_planes(drm, struct ipu_crtc, base,
+ &primary_plane->base, NULL,
+ &ipu_crtc_funcs, NULL);
+ if (IS_ERR(ipu_crtc))
+ return PTR_ERR(ipu_crtc);
+ ipu_crtc->dev = dev;
+ ipu_crtc->plane[0] = primary_plane;
+
+ crtc = &ipu_crtc->base;
crtc->port = pdata->of_node;
drm_crtc_helper_add(crtc, &ipu_helper_funcs);
- drm_crtc_init_with_planes(drm, crtc, &ipu_crtc->plane[0]->base, NULL,
- &ipu_crtc_funcs, NULL);
- ret = ipu_plane_get_resources(ipu_crtc->plane[0]);
+ ret = ipu_get_resources(drm, ipu_crtc, pdata);
if (ret) {
- dev_err(ipu_crtc->dev, "getting plane 0 resources failed with %d.\n",
+ dev_err(ipu_crtc->dev, "getting resources failed with %d.\n",
ret);
- goto err_put_resources;
+ return ret;
}
/* If this crtc is using the DP, add an overlay plane */
@@ -397,16 +397,8 @@ static int ipu_crtc_init(struct ipu_crtc *ipu_crtc,
IPU_DP_FLOW_SYNC_FG,
drm_crtc_mask(&ipu_crtc->base),
DRM_PLANE_TYPE_OVERLAY);
- if (IS_ERR(ipu_crtc->plane[1])) {
+ if (IS_ERR(ipu_crtc->plane[1]))
ipu_crtc->plane[1] = NULL;
- } else {
- ret = ipu_plane_get_resources(ipu_crtc->plane[1]);
- if (ret) {
- dev_err(ipu_crtc->dev, "getting plane 1 "
- "resources failed with %d.\n", ret);
- goto err_put_plane0_res;
- }
- }
}
ipu_crtc->irq = ipu_plane_irq(ipu_crtc->plane[0]);
@@ -414,58 +406,21 @@ static int ipu_crtc_init(struct ipu_crtc *ipu_crtc,
"imx_drm", ipu_crtc);
if (ret < 0) {
dev_err(ipu_crtc->dev, "irq request failed with %d.\n", ret);
- goto err_put_plane1_res;
+ return ret;
}
/* Only enable IRQ when we actually need it to trigger work. */
disable_irq(ipu_crtc->irq);
return 0;
-
-err_put_plane1_res:
- if (ipu_crtc->plane[1])
- ipu_plane_put_resources(ipu_crtc->plane[1]);
-err_put_plane0_res:
- ipu_plane_put_resources(ipu_crtc->plane[0]);
-err_put_resources:
- ipu_put_resources(ipu_crtc);
-
- return ret;
-}
-
-static int ipu_drm_bind(struct device *dev, struct device *master, void *data)
-{
- struct ipu_client_platformdata *pdata = dev->platform_data;
- struct drm_device *drm = data;
- struct ipu_crtc *ipu_crtc;
-
- ipu_crtc = dev_get_drvdata(dev);
- memset(ipu_crtc, 0, sizeof(*ipu_crtc));
-
- ipu_crtc->dev = dev;
-
- return ipu_crtc_init(ipu_crtc, pdata, drm);
-}
-
-static void ipu_drm_unbind(struct device *dev, struct device *master,
- void *data)
-{
- struct ipu_crtc *ipu_crtc = dev_get_drvdata(dev);
-
- ipu_put_resources(ipu_crtc);
- if (ipu_crtc->plane[1])
- ipu_plane_put_resources(ipu_crtc->plane[1]);
- ipu_plane_put_resources(ipu_crtc->plane[0]);
}
static const struct component_ops ipu_crtc_ops = {
.bind = ipu_drm_bind,
- .unbind = ipu_drm_unbind,
};
static int ipu_drm_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct ipu_crtc *ipu_crtc;
int ret;
if (!dev->platform_data)
@@ -475,12 +430,6 @@ static int ipu_drm_probe(struct platform_device *pdev)
if (ret)
return ret;
- ipu_crtc = devm_kzalloc(dev, sizeof(*ipu_crtc), GFP_KERNEL);
- if (!ipu_crtc)
- return -ENOMEM;
-
- dev_set_drvdata(dev, ipu_crtc);
-
return component_add(dev, &ipu_crtc_ops);
}
diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3-plane.c
index 8a4235d9d9f1..075508051b5f 100644
--- a/drivers/gpu/drm/imx/ipuv3-plane.c
+++ b/drivers/gpu/drm/imx/ipuv3-plane.c
@@ -11,6 +11,7 @@
#include <drm/drm_fourcc.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_managed.h>
#include <drm/drm_plane_helper.h>
#include <video/imx-ipu-v3.h>
@@ -142,8 +143,10 @@ drm_plane_state_to_vbo(struct drm_plane_state *state)
fb->format->cpp[2] * x - eba;
}
-void ipu_plane_put_resources(struct ipu_plane *ipu_plane)
+static void ipu_plane_put_resources(struct drm_device *dev, void *ptr)
{
+ struct ipu_plane *ipu_plane = ptr;
+
if (!IS_ERR_OR_NULL(ipu_plane->dp))
ipu_dp_put(ipu_plane->dp);
if (!IS_ERR_OR_NULL(ipu_plane->dmfc))
@@ -154,7 +157,8 @@ void ipu_plane_put_resources(struct ipu_plane *ipu_plane)
ipu_idmac_put(ipu_plane->alpha_ch);
}
-int ipu_plane_get_resources(struct ipu_plane *ipu_plane)
+static int ipu_plane_get_resources(struct drm_device *dev,
+ struct ipu_plane *ipu_plane)
{
int ret;
int alpha_ch;
@@ -166,6 +170,10 @@ int ipu_plane_get_resources(struct ipu_plane *ipu_plane)
return ret;
}
+ ret = drmm_add_action_or_reset(dev, ipu_plane_put_resources, ipu_plane);
+ if (ret)
+ return ret;
+
alpha_ch = ipu_channel_alpha_channel(ipu_plane->dma);
if (alpha_ch >= 0) {
ipu_plane->alpha_ch = ipu_idmac_get(ipu_plane->ipu, alpha_ch);
@@ -181,7 +189,7 @@ int ipu_plane_get_resources(struct ipu_plane *ipu_plane)
if (IS_ERR(ipu_plane->dmfc)) {
ret = PTR_ERR(ipu_plane->dmfc);
DRM_ERROR("failed to get dmfc: ret %d\n", ret);
- goto err_out;
+ return ret;
}
if (ipu_plane->dp_flow >= 0) {
@@ -189,15 +197,11 @@ int ipu_plane_get_resources(struct ipu_plane *ipu_plane)
if (IS_ERR(ipu_plane->dp)) {
ret = PTR_ERR(ipu_plane->dp);
DRM_ERROR("failed to get dp flow: %d\n", ret);
- goto err_out;
+ return ret;
}
}
return 0;
-err_out:
- ipu_plane_put_resources(ipu_plane);
-
- return ret;
}
static bool ipu_plane_separate_alpha(struct ipu_plane *ipu_plane)
@@ -262,16 +266,6 @@ void ipu_plane_disable_deferred(struct drm_plane *plane)
}
EXPORT_SYMBOL_GPL(ipu_plane_disable_deferred);
-static void ipu_plane_destroy(struct drm_plane *plane)
-{
- struct ipu_plane *ipu_plane = to_ipu_plane(plane);
-
- DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
-
- drm_plane_cleanup(plane);
- kfree(ipu_plane);
-}
-
static void ipu_plane_state_reset(struct drm_plane *plane)
{
unsigned int zpos = (plane->type == DRM_PLANE_TYPE_PRIMARY) ? 0 : 1;
@@ -336,7 +330,6 @@ static bool ipu_plane_format_mod_supported(struct drm_plane *plane,
static const struct drm_plane_funcs ipu_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
- .destroy = ipu_plane_destroy,
.reset = ipu_plane_state_reset,
.atomic_duplicate_state = ipu_plane_duplicate_state,
.atomic_destroy_state = ipu_plane_destroy_state,
@@ -834,10 +827,15 @@ struct ipu_plane *ipu_plane_init(struct drm_device *dev, struct ipu_soc *ipu,
DRM_DEBUG_KMS("channel %d, dp flow %d, possible_crtcs=0x%x\n",
dma, dp, possible_crtcs);
- ipu_plane = kzalloc(sizeof(*ipu_plane), GFP_KERNEL);
- if (!ipu_plane) {
- DRM_ERROR("failed to allocate plane\n");
- return ERR_PTR(-ENOMEM);
+ ipu_plane = drmm_universal_plane_alloc(dev, struct ipu_plane, base,
+ possible_crtcs, &ipu_plane_funcs,
+ ipu_plane_formats,
+ ARRAY_SIZE(ipu_plane_formats),
+ modifiers, type, NULL);
+ if (IS_ERR(ipu_plane)) {
+ DRM_ERROR("failed to allocate and initialize %s plane\n",
+ zpos ? "overlay" : "primary");
+ return ipu_plane;
}
ipu_plane->ipu = ipu;
@@ -847,22 +845,23 @@ struct ipu_plane *ipu_plane_init(struct drm_device *dev, struct ipu_soc *ipu,
if (ipu_prg_present(ipu))
modifiers = pre_format_modifiers;
- ret = drm_universal_plane_init(dev, &ipu_plane->base, possible_crtcs,
- &ipu_plane_funcs, ipu_plane_formats,
- ARRAY_SIZE(ipu_plane_formats),
- modifiers, type, NULL);
- if (ret) {
- DRM_ERROR("failed to initialize plane\n");
- kfree(ipu_plane);
- return ERR_PTR(ret);
- }
-
drm_plane_helper_add(&ipu_plane->base, &ipu_plane_helper_funcs);
if (dp == IPU_DP_FLOW_SYNC_BG || dp == IPU_DP_FLOW_SYNC_FG)
- drm_plane_create_zpos_property(&ipu_plane->base, zpos, 0, 1);
+ ret = drm_plane_create_zpos_property(&ipu_plane->base, zpos, 0,
+ 1);
else
- drm_plane_create_zpos_immutable_property(&ipu_plane->base, 0);
+ ret = drm_plane_create_zpos_immutable_property(&ipu_plane->base,
+ 0);
+ if (ret)
+ return ERR_PTR(ret);
+
+ ret = ipu_plane_get_resources(dev, ipu_plane);
+ if (ret) {
+ DRM_ERROR("failed to get %s plane resources: %pe\n",
+ zpos ? "overlay" : "primary", &ret);
+ return ERR_PTR(ret);
+ }
return ipu_plane;
}
diff --git a/drivers/gpu/drm/imx/ipuv3-plane.h b/drivers/gpu/drm/imx/ipuv3-plane.h
index ffacbcdd2f98..6d544e6ce63f 100644
--- a/drivers/gpu/drm/imx/ipuv3-plane.h
+++ b/drivers/gpu/drm/imx/ipuv3-plane.h
@@ -41,9 +41,6 @@ int ipu_plane_mode_set(struct ipu_plane *plane, struct drm_crtc *crtc,
uint32_t src_x, uint32_t src_y, uint32_t src_w,
uint32_t src_h, bool interlaced);
-int ipu_plane_get_resources(struct ipu_plane *plane);
-void ipu_plane_put_resources(struct ipu_plane *plane);
-
int ipu_plane_irq(struct ipu_plane *plane);
void ipu_plane_disable(struct ipu_plane *ipu_plane, bool disable_dp_channel);
diff --git a/drivers/gpu/drm/imx/parallel-display.c b/drivers/gpu/drm/imx/parallel-display.c
index 2eb8df4697df..e0412e694fd9 100644
--- a/drivers/gpu/drm/imx/parallel-display.c
+++ b/drivers/gpu/drm/imx/parallel-display.c
@@ -15,6 +15,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
#include <drm/drm_fb_helper.h>
+#include <drm/drm_managed.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
#include <drm/drm_probe_helper.h>
@@ -22,10 +23,14 @@
#include "imx-drm.h"
-struct imx_parallel_display {
+struct imx_parallel_display_encoder {
struct drm_connector connector;
struct drm_encoder encoder;
struct drm_bridge bridge;
+ struct imx_parallel_display *pd;
+};
+
+struct imx_parallel_display {
struct device *dev;
void *edid;
u32 bus_format;
@@ -37,12 +42,12 @@ struct imx_parallel_display {
static inline struct imx_parallel_display *con_to_imxpd(struct drm_connector *c)
{
- return container_of(c, struct imx_parallel_display, connector);
+ return container_of(c, struct imx_parallel_display_encoder, connector)->pd;
}
static inline struct imx_parallel_display *bridge_to_imxpd(struct drm_bridge *b)
{
- return container_of(b, struct imx_parallel_display, bridge);
+ return container_of(b, struct imx_parallel_display_encoder, bridge)->pd;
}
static int imx_pd_connector_get_modes(struct drm_connector *connector)
@@ -74,7 +79,7 @@ static int imx_pd_connector_get_modes(struct drm_connector *connector)
return ret;
drm_mode_copy(mode, &imxpd->mode);
- mode->type |= DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
+ mode->type |= DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
drm_mode_probed_add(connector, mode);
num_modes++;
}
@@ -253,12 +258,26 @@ static const struct drm_bridge_funcs imx_pd_bridge_funcs = {
.atomic_get_output_bus_fmts = imx_pd_bridge_atomic_get_output_bus_fmts,
};
-static int imx_pd_register(struct drm_device *drm,
- struct imx_parallel_display *imxpd)
+static int imx_pd_bind(struct device *dev, struct device *master, void *data)
{
- struct drm_encoder *encoder = &imxpd->encoder;
+ struct drm_device *drm = data;
+ struct imx_parallel_display *imxpd = dev_get_drvdata(dev);
+ struct imx_parallel_display_encoder *imxpd_encoder;
+ struct drm_connector *connector;
+ struct drm_encoder *encoder;
+ struct drm_bridge *bridge;
int ret;
+ imxpd_encoder = drmm_simple_encoder_alloc(drm, struct imx_parallel_display_encoder,
+ encoder, DRM_MODE_ENCODER_NONE);
+ if (IS_ERR(imxpd_encoder))
+ return PTR_ERR(imxpd_encoder);
+
+ imxpd_encoder->pd = imxpd;
+ connector = &imxpd_encoder->connector;
+ encoder = &imxpd_encoder->encoder;
+ bridge = &imxpd_encoder->bridge;
+
ret = imx_drm_encoder_parse_of(drm, encoder, imxpd->dev->of_node);
if (ret)
return ret;
@@ -268,39 +287,37 @@ static int imx_pd_register(struct drm_device *drm,
* immediately since the current state is ON
* at this point.
*/
- imxpd->connector.dpms = DRM_MODE_DPMS_OFF;
-
- drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_NONE);
-
- imxpd->bridge.funcs = &imx_pd_bridge_funcs;
- drm_bridge_attach(encoder, &imxpd->bridge, NULL, 0);
+ connector->dpms = DRM_MODE_DPMS_OFF;
- if (!imxpd->next_bridge) {
- drm_connector_helper_add(&imxpd->connector,
- &imx_pd_connector_helper_funcs);
- drm_connector_init(drm, &imxpd->connector,
- &imx_pd_connector_funcs,
- DRM_MODE_CONNECTOR_DPI);
- }
+ bridge->funcs = &imx_pd_bridge_funcs;
+ drm_bridge_attach(encoder, bridge, NULL, 0);
if (imxpd->next_bridge) {
- ret = drm_bridge_attach(encoder, imxpd->next_bridge,
- &imxpd->bridge, 0);
+ ret = drm_bridge_attach(encoder, imxpd->next_bridge, bridge, 0);
if (ret < 0) {
dev_err(imxpd->dev, "failed to attach bridge: %d\n",
ret);
return ret;
}
} else {
- drm_connector_attach_encoder(&imxpd->connector, encoder);
+ drm_connector_helper_add(connector,
+ &imx_pd_connector_helper_funcs);
+ drm_connector_init(drm, connector, &imx_pd_connector_funcs,
+ DRM_MODE_CONNECTOR_DPI);
+
+ drm_connector_attach_encoder(connector, encoder);
}
return 0;
}
-static int imx_pd_bind(struct device *dev, struct device *master, void *data)
+static const struct component_ops imx_pd_ops = {
+ .bind = imx_pd_bind,
+};
+
+static int imx_pd_probe(struct platform_device *pdev)
{
- struct drm_device *drm = data;
+ struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
const u8 *edidp;
struct imx_parallel_display *imxpd;
@@ -309,8 +326,9 @@ static int imx_pd_bind(struct device *dev, struct device *master, void *data)
u32 bus_format = 0;
const char *fmt;
- imxpd = dev_get_drvdata(dev);
- memset(imxpd, 0, sizeof(*imxpd));
+ imxpd = devm_kzalloc(dev, sizeof(*imxpd), GFP_KERNEL);
+ if (!imxpd)
+ return -ENOMEM;
/* port@1 is the output port */
ret = drm_of_find_panel_or_bridge(np, 1, 0, &imxpd->panel,
@@ -337,28 +355,9 @@ static int imx_pd_bind(struct device *dev, struct device *master, void *data)
imxpd->dev = dev;
- ret = imx_pd_register(drm, imxpd);
- if (ret)
- return ret;
-
- return 0;
-}
-
-static const struct component_ops imx_pd_ops = {
- .bind = imx_pd_bind,
-};
-
-static int imx_pd_probe(struct platform_device *pdev)
-{
- struct imx_parallel_display *imxpd;
-
- imxpd = devm_kzalloc(&pdev->dev, sizeof(*imxpd), GFP_KERNEL);
- if (!imxpd)
- return -ENOMEM;
-
platform_set_drvdata(pdev, imxpd);
- return component_add(&pdev->dev, &imx_pd_ops);
+ return component_add(dev, &imx_pd_ops);
}
static int imx_pd_remove(struct platform_device *pdev)
diff --git a/drivers/gpu/drm/ingenic/Kconfig b/drivers/gpu/drm/ingenic/Kconfig
index 477d5387e43e..3b57f8be007c 100644
--- a/drivers/gpu/drm/ingenic/Kconfig
+++ b/drivers/gpu/drm/ingenic/Kconfig
@@ -4,6 +4,7 @@ config DRM_INGENIC
depends on DRM
depends on CMA
depends on OF
+ depends on COMMON_CLK
select DRM_BRIDGE
select DRM_PANEL_BRIDGE
select DRM_KMS_HELPER
diff --git a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c
index 368bfef8b340..7bb31fbee29d 100644
--- a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c
+++ b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c
@@ -14,6 +14,7 @@
#include <linux/of_device.h>
#include <linux/of_reserved_mem.h>
#include <linux/platform_device.h>
+#include <linux/pm.h>
#include <linux/regmap.h>
#include <drm/drm_atomic.h>
@@ -190,15 +191,15 @@ static void ingenic_drm_crtc_update_timings(struct ingenic_drm *priv,
{
unsigned int vpe, vds, vde, vt, hpe, hds, hde, ht;
- vpe = mode->vsync_end - mode->vsync_start;
- vds = mode->vtotal - mode->vsync_start;
- vde = vds + mode->vdisplay;
- vt = vde + mode->vsync_start - mode->vdisplay;
+ vpe = mode->crtc_vsync_end - mode->crtc_vsync_start;
+ vds = mode->crtc_vtotal - mode->crtc_vsync_start;
+ vde = vds + mode->crtc_vdisplay;
+ vt = vde + mode->crtc_vsync_start - mode->crtc_vdisplay;
- hpe = mode->hsync_end - mode->hsync_start;
- hds = mode->htotal - mode->hsync_start;
- hde = hds + mode->hdisplay;
- ht = hde + mode->hsync_start - mode->hdisplay;
+ hpe = mode->crtc_hsync_end - mode->crtc_hsync_start;
+ hds = mode->crtc_htotal - mode->crtc_hsync_start;
+ hde = hds + mode->crtc_hdisplay;
+ ht = hde + mode->crtc_hsync_start - mode->crtc_hdisplay;
regmap_write(priv->map, JZ_REG_LCD_VSYNC,
0 << JZ_LCD_VSYNC_VPS_OFFSET |
@@ -333,7 +334,7 @@ static void ingenic_drm_crtc_atomic_flush(struct drm_crtc *crtc,
struct drm_pending_vblank_event *event = crtc_state->event;
if (drm_atomic_crtc_needs_modeset(crtc_state)) {
- ingenic_drm_crtc_update_timings(priv, &crtc_state->mode);
+ ingenic_drm_crtc_update_timings(priv, &crtc_state->adjusted_mode);
priv->update_clk_rate = true;
}
@@ -589,7 +590,7 @@ static void ingenic_drm_encoder_atomic_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode = &crtc_state->adjusted_mode;
struct drm_connector *conn = conn_state->connector;
struct drm_display_info *info = &conn->display_info;
- unsigned int cfg;
+ unsigned int cfg, rgbcfg = 0;
priv->panel_is_sharp = info->bus_flags & DRM_BUS_FLAG_SHARP_SIGNALS;
@@ -626,6 +627,9 @@ static void ingenic_drm_encoder_atomic_mode_set(struct drm_encoder *encoder,
case MEDIA_BUS_FMT_RGB888_1X24:
cfg |= JZ_LCD_CFG_MODE_GENERIC_24BIT;
break;
+ case MEDIA_BUS_FMT_RGB888_3X8_DELTA:
+ rgbcfg = JZ_LCD_RGBC_EVEN_GBR | JZ_LCD_RGBC_ODD_RGB;
+ fallthrough;
case MEDIA_BUS_FMT_RGB888_3X8:
cfg |= JZ_LCD_CFG_MODE_8BIT_SERIAL;
break;
@@ -636,6 +640,7 @@ static void ingenic_drm_encoder_atomic_mode_set(struct drm_encoder *encoder,
}
regmap_write(priv->map, JZ_REG_LCD_CFG, cfg);
+ regmap_write(priv->map, JZ_REG_LCD_RGBC, rgbcfg);
}
static int ingenic_drm_encoder_atomic_check(struct drm_encoder *encoder,
@@ -643,6 +648,7 @@ static int ingenic_drm_encoder_atomic_check(struct drm_encoder *encoder,
struct drm_connector_state *conn_state)
{
struct drm_display_info *info = &conn_state->connector->display_info;
+ struct drm_display_mode *mode = &crtc_state->adjusted_mode;
if (info->num_bus_formats != 1)
return -EINVAL;
@@ -651,10 +657,23 @@ static int ingenic_drm_encoder_atomic_check(struct drm_encoder *encoder,
return 0;
switch (*info->bus_formats) {
+ case MEDIA_BUS_FMT_RGB888_3X8:
+ case MEDIA_BUS_FMT_RGB888_3X8_DELTA:
+ /*
+ * The LCD controller expects timing values in dot-clock ticks,
+ * which is 3x the timing values in pixels when using a 3x8-bit
+ * display; but it will count the display area size in pixels
+ * either way. Go figure.
+ */
+ mode->crtc_clock = mode->clock * 3;
+ mode->crtc_hsync_start = mode->hsync_start * 3 - mode->hdisplay * 2;
+ mode->crtc_hsync_end = mode->hsync_end * 3 - mode->hdisplay * 2;
+ mode->crtc_hdisplay = mode->hdisplay;
+ mode->crtc_htotal = mode->htotal * 3 - mode->hdisplay * 2;
+ return 0;
case MEDIA_BUS_FMT_RGB565_1X16:
case MEDIA_BUS_FMT_RGB666_1X18:
case MEDIA_BUS_FMT_RGB888_1X24:
- case MEDIA_BUS_FMT_RGB888_3X8:
return 0;
default:
return -EINVAL;
@@ -755,8 +774,6 @@ static const struct drm_crtc_funcs ingenic_drm_crtc_funcs = {
.enable_vblank = ingenic_drm_enable_vblank,
.disable_vblank = ingenic_drm_disable_vblank,
-
- .gamma_set = drm_atomic_helper_legacy_gamma_set,
};
static const struct drm_plane_helper_funcs ingenic_drm_plane_helper_funcs = {
@@ -1167,6 +1184,22 @@ static int ingenic_drm_remove(struct platform_device *pdev)
return 0;
}
+static int __maybe_unused ingenic_drm_suspend(struct device *dev)
+{
+ struct ingenic_drm *priv = dev_get_drvdata(dev);
+
+ return drm_mode_config_helper_suspend(&priv->drm);
+}
+
+static int __maybe_unused ingenic_drm_resume(struct device *dev)
+{
+ struct ingenic_drm *priv = dev_get_drvdata(dev);
+
+ return drm_mode_config_helper_resume(&priv->drm);
+}
+
+static SIMPLE_DEV_PM_OPS(ingenic_drm_pm_ops, ingenic_drm_suspend, ingenic_drm_resume);
+
static const u32 jz4740_formats[] = {
DRM_FORMAT_XRGB1555,
DRM_FORMAT_RGB565,
@@ -1246,6 +1279,7 @@ MODULE_DEVICE_TABLE(of, ingenic_drm_of_match);
static struct platform_driver ingenic_drm_driver = {
.driver = {
.name = "ingenic-drm",
+ .pm = pm_ptr(&ingenic_drm_pm_ops),
.of_match_table = of_match_ptr(ingenic_drm_of_match),
},
.probe = ingenic_drm_probe,
diff --git a/drivers/gpu/drm/ingenic/ingenic-drm.h b/drivers/gpu/drm/ingenic/ingenic-drm.h
index 9b48ce02803d..1b4347f7f084 100644
--- a/drivers/gpu/drm/ingenic/ingenic-drm.h
+++ b/drivers/gpu/drm/ingenic/ingenic-drm.h
@@ -31,6 +31,7 @@
#define JZ_REG_LCD_SA1 0x54
#define JZ_REG_LCD_FID1 0x58
#define JZ_REG_LCD_CMD1 0x5C
+#define JZ_REG_LCD_RGBC 0x90
#define JZ_REG_LCD_OSDC 0x100
#define JZ_REG_LCD_OSDCTRL 0x104
#define JZ_REG_LCD_OSDS 0x108
@@ -138,6 +139,19 @@
#define JZ_LCD_STATE_SOF_IRQ BIT(4)
#define JZ_LCD_STATE_DISABLED BIT(0)
+#define JZ_LCD_RGBC_ODD_RGB (0x0 << 4)
+#define JZ_LCD_RGBC_ODD_RBG (0x1 << 4)
+#define JZ_LCD_RGBC_ODD_GRB (0x2 << 4)
+#define JZ_LCD_RGBC_ODD_GBR (0x3 << 4)
+#define JZ_LCD_RGBC_ODD_BRG (0x4 << 4)
+#define JZ_LCD_RGBC_ODD_BGR (0x5 << 4)
+#define JZ_LCD_RGBC_EVEN_RGB (0x0 << 0)
+#define JZ_LCD_RGBC_EVEN_RBG (0x1 << 0)
+#define JZ_LCD_RGBC_EVEN_GRB (0x2 << 0)
+#define JZ_LCD_RGBC_EVEN_GBR (0x3 << 0)
+#define JZ_LCD_RGBC_EVEN_BRG (0x4 << 0)
+#define JZ_LCD_RGBC_EVEN_BGR (0x5 << 0)
+
#define JZ_LCD_OSDC_OSDEN BIT(0)
#define JZ_LCD_OSDC_F0EN BIT(3)
#define JZ_LCD_OSDC_F1EN BIT(4)
diff --git a/drivers/gpu/drm/kmb/kmb_drv.c b/drivers/gpu/drm/kmb/kmb_drv.c
index a31a840ce634..f64e06e1067d 100644
--- a/drivers/gpu/drm/kmb/kmb_drv.c
+++ b/drivers/gpu/drm/kmb/kmb_drv.c
@@ -400,7 +400,7 @@ static void kmb_irq_reset(struct drm_device *drm)
DEFINE_DRM_GEM_CMA_FOPS(fops);
-static struct drm_driver kmb_driver = {
+static const struct drm_driver kmb_driver = {
.driver_features = DRIVER_GEM |
DRIVER_MODESET | DRIVER_ATOMIC,
.irq_handler = kmb_isr,
@@ -556,7 +556,7 @@ MODULE_DEVICE_TABLE(of, kmb_of_match);
static int __maybe_unused kmb_pm_suspend(struct device *dev)
{
struct drm_device *drm = dev_get_drvdata(dev);
- struct kmb_drm_private *kmb = drm ? to_kmb(drm) : NULL;
+ struct kmb_drm_private *kmb = to_kmb(drm);
drm_kms_helper_poll_disable(drm);
diff --git a/drivers/gpu/drm/kmb/kmb_plane.c b/drivers/gpu/drm/kmb/kmb_plane.c
index 8448d1edb553..be8eea3830c1 100644
--- a/drivers/gpu/drm/kmb/kmb_plane.c
+++ b/drivers/gpu/drm/kmb/kmb_plane.c
@@ -114,6 +114,9 @@ static void kmb_plane_atomic_disable(struct drm_plane *plane,
kmb = to_kmb(plane->dev);
+ if (WARN_ON(plane_id >= KMB_MAX_PLANES))
+ return;
+
switch (plane_id) {
case LAYER_0:
kmb->plane_status[plane_id].ctrl = LCD_CTRL_VL1_ENABLE;
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
index e9b6788d52cd..8b0de90156c6 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
@@ -635,7 +635,6 @@ static const struct drm_crtc_funcs mtk_crtc_funcs = {
.reset = mtk_drm_crtc_reset,
.atomic_duplicate_state = mtk_drm_crtc_duplicate_state,
.atomic_destroy_state = mtk_drm_crtc_destroy_state,
- .gamma_set = drm_atomic_helper_legacy_gamma_set,
.enable_vblank = mtk_drm_crtc_enable_vblank,
.disable_vblank = mtk_drm_crtc_disable_vblank,
};
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_gem.c b/drivers/gpu/drm/mediatek/mtk_drm_gem.c
index 28a2ee1336ef..280ea0d5e840 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_gem.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_gem.c
@@ -260,7 +260,7 @@ int mtk_drm_gem_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map)
return -ENOMEM;
}
- drm_prime_sg_to_page_addr_arrays(sgt, mtk_gem->pages, NULL, npages);
+ drm_prime_sg_to_page_array(sgt, mtk_gem->pages, npages);
mtk_gem->kvaddr = vmap(mtk_gem->pages, npages, VM_MAP,
pgprot_writecombine(PAGE_KERNEL));
diff --git a/drivers/gpu/drm/mga/mga_ioc32.c b/drivers/gpu/drm/mga/mga_ioc32.c
index 6ccd270789c6..4fd4de16cd32 100644
--- a/drivers/gpu/drm/mga/mga_ioc32.c
+++ b/drivers/gpu/drm/mga/mga_ioc32.c
@@ -1,4 +1,4 @@
-/**
+/*
* \file mga_ioc32.c
*
* 32-bit ioctl compatibility routines for the MGA DRM.
@@ -159,13 +159,13 @@ static struct {
};
/**
- * Called whenever a 32-bit process running under a 64-bit kernel
- * performs an ioctl on /dev/dri/card<n>.
+ * mga_compat_ioctl - Called whenever a 32-bit process running under
+ * a 64-bit kernel performs an ioctl on /dev/dri/card<n>.
*
- * \param filp file pointer.
- * \param cmd command.
- * \param arg user argument.
- * \return zero on success or negative number on failure.
+ * @filp: file pointer.
+ * @cmd: command.
+ * @arg: user argument.
+ * return: zero on success or negative number on failure.
*/
long mga_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c
index a977c9f49719..4e4c105f9a50 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.c
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.c
@@ -47,10 +47,11 @@ static const struct drm_driver mgag200_driver = {
static bool mgag200_has_sgram(struct mga_device *mdev)
{
struct drm_device *dev = &mdev->base;
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
u32 option;
int ret;
- ret = pci_read_config_dword(dev->pdev, PCI_MGA_OPTION, &option);
+ ret = pci_read_config_dword(pdev, PCI_MGA_OPTION, &option);
if (drm_WARN(dev, ret, "failed to read PCI config dword: %d\n", ret))
return false;
@@ -60,6 +61,7 @@ static bool mgag200_has_sgram(struct mga_device *mdev)
static int mgag200_regs_init(struct mga_device *mdev)
{
struct drm_device *dev = &mdev->base;
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
u32 option, option2;
u8 crtcext3;
@@ -99,13 +101,13 @@ static int mgag200_regs_init(struct mga_device *mdev)
}
if (option)
- pci_write_config_dword(dev->pdev, PCI_MGA_OPTION, option);
+ pci_write_config_dword(pdev, PCI_MGA_OPTION, option);
if (option2)
- pci_write_config_dword(dev->pdev, PCI_MGA_OPTION2, option2);
+ pci_write_config_dword(pdev, PCI_MGA_OPTION2, option2);
/* BAR 1 contains registers */
- mdev->rmmio_base = pci_resource_start(dev->pdev, 1);
- mdev->rmmio_size = pci_resource_len(dev->pdev, 1);
+ mdev->rmmio_base = pci_resource_start(pdev, 1);
+ mdev->rmmio_size = pci_resource_len(pdev, 1);
if (!devm_request_mem_region(dev->dev, mdev->rmmio_base,
mdev->rmmio_size, "mgadrmfb_mmio")) {
@@ -113,7 +115,7 @@ static int mgag200_regs_init(struct mga_device *mdev)
return -ENOMEM;
}
- mdev->rmmio = pcim_iomap(dev->pdev, 1, 0);
+ mdev->rmmio = pcim_iomap(pdev, 1, 0);
if (mdev->rmmio == NULL)
return -ENOMEM;
@@ -218,6 +220,7 @@ static void mgag200_g200_interpret_bios(struct mga_device *mdev,
static void mgag200_g200_init_refclk(struct mga_device *mdev)
{
struct drm_device *dev = &mdev->base;
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
unsigned char __iomem *rom;
unsigned char *bios;
size_t size;
@@ -226,7 +229,7 @@ static void mgag200_g200_init_refclk(struct mga_device *mdev)
mdev->model.g200.pclk_max = 230000;
mdev->model.g200.ref_clk = 27050;
- rom = pci_map_rom(dev->pdev, &size);
+ rom = pci_map_rom(pdev, &size);
if (!rom)
return;
@@ -244,7 +247,7 @@ static void mgag200_g200_init_refclk(struct mga_device *mdev)
vfree(bios);
out:
- pci_unmap_rom(dev->pdev, rom);
+ pci_unmap_rom(pdev, rom);
}
static void mgag200_g200se_init_unique_id(struct mga_device *mdev)
@@ -301,7 +304,6 @@ mgag200_device_create(struct pci_dev *pdev, unsigned long flags)
return mdev;
dev = &mdev->base;
- dev->pdev = pdev;
pci_set_drvdata(pdev, dev);
ret = mgag200_device_init(mdev, flags);
diff --git a/drivers/gpu/drm/mgag200/mgag200_i2c.c b/drivers/gpu/drm/mgag200/mgag200_i2c.c
index 09731e614e46..ac8e34eef513 100644
--- a/drivers/gpu/drm/mgag200/mgag200_i2c.c
+++ b/drivers/gpu/drm/mgag200/mgag200_i2c.c
@@ -126,7 +126,7 @@ struct mga_i2c_chan *mgag200_i2c_create(struct drm_device *dev)
i2c->clock = clock;
i2c->adapter.owner = THIS_MODULE;
i2c->adapter.class = I2C_CLASS_DDC;
- i2c->adapter.dev.parent = &dev->pdev->dev;
+ i2c->adapter.dev.parent = dev->dev;
i2c->dev = dev;
i2c_set_adapdata(&i2c->adapter, i2c);
snprintf(i2c->adapter.name, sizeof(i2c->adapter.name), "mga i2c");
diff --git a/drivers/gpu/drm/mgag200/mgag200_mm.c b/drivers/gpu/drm/mgag200/mgag200_mm.c
index 641f1aa992be..b667371b69a4 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mm.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mm.c
@@ -78,11 +78,12 @@ static size_t mgag200_probe_vram(struct mga_device *mdev, void __iomem *mem,
static void mgag200_mm_release(struct drm_device *dev, void *ptr)
{
struct mga_device *mdev = to_mga_device(dev);
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
mdev->vram_fb_available = 0;
iounmap(mdev->vram);
- arch_io_free_memtype_wc(pci_resource_start(dev->pdev, 0),
- pci_resource_len(dev->pdev, 0));
+ arch_io_free_memtype_wc(pci_resource_start(pdev, 0),
+ pci_resource_len(pdev, 0));
arch_phys_wc_del(mdev->fb_mtrr);
mdev->fb_mtrr = 0;
}
@@ -90,6 +91,7 @@ static void mgag200_mm_release(struct drm_device *dev, void *ptr)
int mgag200_mm_init(struct mga_device *mdev)
{
struct drm_device *dev = &mdev->base;
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
u8 misc;
resource_size_t start, len;
int ret;
@@ -102,8 +104,8 @@ int mgag200_mm_init(struct mga_device *mdev)
WREG8(MGA_MISC_OUT, misc);
/* BAR 0 is VRAM */
- start = pci_resource_start(dev->pdev, 0);
- len = pci_resource_len(dev->pdev, 0);
+ start = pci_resource_start(pdev, 0);
+ len = pci_resource_len(pdev, 0);
if (!devm_request_mem_region(dev->dev, start, len, "mgadrmfb_vram")) {
drm_err(dev, "can't reserve VRAM\n");
diff --git a/drivers/gpu/drm/msm/adreno/a2xx_gpu.c b/drivers/gpu/drm/msm/adreno/a2xx_gpu.c
index 7e82c41a85f1..bdc989183c64 100644
--- a/drivers/gpu/drm/msm/adreno/a2xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a2xx_gpu.c
@@ -534,8 +534,10 @@ struct msm_gpu *a2xx_gpu_init(struct drm_device *dev)
if (!gpu->aspace) {
dev_err(dev->dev, "No memory protection without MMU\n");
- ret = -ENXIO;
- goto fail;
+ if (!allow_vram_carveout) {
+ ret = -ENXIO;
+ goto fail;
+ }
}
return gpu;
diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
index 93da6683a866..4534633fe7cd 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
@@ -564,8 +564,10 @@ struct msm_gpu *a3xx_gpu_init(struct drm_device *dev)
* implement a cmdstream validator.
*/
DRM_DEV_ERROR(dev->dev, "No memory protection without IOMMU\n");
- ret = -ENXIO;
- goto fail;
+ if (!allow_vram_carveout) {
+ ret = -ENXIO;
+ goto fail;
+ }
}
icc_path = devm_of_icc_get(&pdev->dev, "gfx-mem");
diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
index c0be3a0f36b2..82bebb40234d 100644
--- a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
@@ -692,8 +692,10 @@ struct msm_gpu *a4xx_gpu_init(struct drm_device *dev)
* implement a cmdstream validator.
*/
DRM_DEV_ERROR(dev->dev, "No memory protection without IOMMU\n");
- ret = -ENXIO;
- goto fail;
+ if (!allow_vram_carveout) {
+ ret = -ENXIO;
+ goto fail;
+ }
}
icc_path = devm_of_icc_get(&pdev->dev, "gfx-mem");
diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c
index 87c8b033ad1a..12e75ba360f9 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_device.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_device.c
@@ -18,6 +18,10 @@ bool snapshot_debugbus = false;
MODULE_PARM_DESC(snapshot_debugbus, "Include debugbus sections in GPU devcoredump (if not fused off)");
module_param_named(snapshot_debugbus, snapshot_debugbus, bool, 0600);
+bool allow_vram_carveout = false;
+MODULE_PARM_DESC(allow_vram_carveout, "Allow using VRAM Carveout, in place of IOMMU");
+module_param_named(allow_vram_carveout, allow_vram_carveout, bool, 0600);
+
static const struct adreno_info gpulist[] = {
{
.rev = ADRENO_REV(2, 0, 0, 0),
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index 6cf9975e951e..f09175698827 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -191,8 +191,6 @@ adreno_iommu_create_address_space(struct msm_gpu *gpu,
struct platform_device *pdev)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
- struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
- struct io_pgtable_domain_attr pgtbl_cfg;
struct iommu_domain *iommu;
struct msm_mmu *mmu;
struct msm_gem_address_space *aspace;
@@ -202,13 +200,18 @@ adreno_iommu_create_address_space(struct msm_gpu *gpu,
if (!iommu)
return NULL;
- /*
- * This allows GPU to set the bus attributes required to use system
- * cache on behalf of the iommu page table walker.
- */
- if (!IS_ERR(a6xx_gpu->htw_llc_slice)) {
- pgtbl_cfg.quirks = IO_PGTABLE_QUIRK_ARM_OUTER_WBWA;
- iommu_domain_set_attr(iommu, DOMAIN_ATTR_IO_PGTABLE_CFG, &pgtbl_cfg);
+
+ if (adreno_is_a6xx(adreno_gpu)) {
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ struct io_pgtable_domain_attr pgtbl_cfg;
+ /*
+ * This allows GPU to set the bus attributes required to use system
+ * cache on behalf of the iommu page table walker.
+ */
+ if (!IS_ERR(a6xx_gpu->htw_llc_slice)) {
+ pgtbl_cfg.quirks = IO_PGTABLE_QUIRK_ARM_OUTER_WBWA;
+ iommu_domain_set_attr(iommu, DOMAIN_ATTR_IO_PGTABLE_CFG, &pgtbl_cfg);
+ }
}
mmu = msm_iommu_new(&pdev->dev, iommu);
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
index c3775f79525a..b3d9a333591b 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
@@ -18,6 +18,7 @@
#include "adreno_pm4.xml.h"
extern bool snapshot_debugbus;
+extern bool allow_vram_carveout;
enum {
ADRENO_FW_PM4 = 0,
@@ -211,6 +212,11 @@ static inline int adreno_is_a540(struct adreno_gpu *gpu)
return gpu->revn == 540;
}
+static inline bool adreno_is_a6xx(struct adreno_gpu *gpu)
+{
+ return ((gpu->revn < 700 && gpu->revn > 599));
+}
+
static inline int adreno_is_a618(struct adreno_gpu *gpu)
{
return gpu->revn == 618;
diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.c b/drivers/gpu/drm/msm/dp/dp_ctrl.c
index e3462f5d96d7..36b39c381b3f 100644
--- a/drivers/gpu/drm/msm/dp/dp_ctrl.c
+++ b/drivers/gpu/drm/msm/dp/dp_ctrl.c
@@ -1420,16 +1420,14 @@ void dp_ctrl_host_deinit(struct dp_ctrl *dp_ctrl)
static bool dp_ctrl_use_fixed_nvid(struct dp_ctrl_private *ctrl)
{
u8 *dpcd = ctrl->panel->dpcd;
- u32 edid_quirks = 0;
- edid_quirks = drm_dp_get_edid_quirks(ctrl->panel->edid);
/*
* For better interop experience, used a fixed NVID=0x8000
* whenever connected to a VGA dongle downstream.
*/
if (drm_dp_is_branch(dpcd))
- return (drm_dp_has_quirk(&ctrl->panel->desc, edid_quirks,
- DP_DPCD_QUIRK_CONSTANT_N));
+ return (drm_dp_has_quirk(&ctrl->panel->desc,
+ DP_DPCD_QUIRK_CONSTANT_N));
return false;
}
diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c
index 6e971d552911..3bc7ed21de28 100644
--- a/drivers/gpu/drm/msm/dp/dp_display.c
+++ b/drivers/gpu/drm/msm/dp/dp_display.c
@@ -693,6 +693,13 @@ static int dp_irq_hpd_handle(struct dp_display_private *dp, u32 data)
return 0;
}
+ if (state == ST_CONNECT_PENDING) {
+ /* wait until ST_CONNECTED */
+ dp_add_event(dp, EV_IRQ_HPD_INT, 0, 1); /* delay = 1 */
+ mutex_unlock(&dp->event_mutex);
+ return 0;
+ }
+
ret = dp_display_usbpd_attention_cb(&dp->pdev->dev);
if (ret == -ECONNRESET) { /* cable unplugged */
dp->core_initialized = false;
diff --git a/drivers/gpu/drm/msm/dp/dp_panel.c b/drivers/gpu/drm/msm/dp/dp_panel.c
index 97dca3e378b7..d1780bcac8cc 100644
--- a/drivers/gpu/drm/msm/dp/dp_panel.c
+++ b/drivers/gpu/drm/msm/dp/dp_panel.c
@@ -167,12 +167,18 @@ int dp_panel_read_sink_caps(struct dp_panel *dp_panel,
panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
rc = dp_panel_read_dpcd(dp_panel);
+ if (rc) {
+ DRM_ERROR("read dpcd failed %d\n", rc);
+ return rc;
+ }
+
bw_code = drm_dp_link_rate_to_bw_code(dp_panel->link_info.rate);
- if (rc || !is_link_rate_valid(bw_code) ||
+ if (!is_link_rate_valid(bw_code) ||
!is_lane_count_valid(dp_panel->link_info.num_lanes) ||
(bw_code > dp_panel->max_bw_code)) {
- DRM_ERROR("read dpcd failed %d\n", rc);
- return rc;
+ DRM_ERROR("Illegal link rate=%d lane=%d\n", dp_panel->link_info.rate,
+ dp_panel->link_info.num_lanes);
+ return -EINVAL;
}
if (dp_panel->dfp_present) {
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 535a0263ceeb..108c405e03dd 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -457,14 +457,14 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
drm_mode_config_init(ddev);
- /* Bind all our sub-components: */
- ret = component_bind_all(dev, ddev);
+ ret = msm_init_vram(ddev);
if (ret)
goto err_destroy_mdss;
- ret = msm_init_vram(ddev);
+ /* Bind all our sub-components: */
+ ret = component_bind_all(dev, ddev);
if (ret)
- goto err_msm_uninit;
+ goto err_destroy_mdss;
dma_set_max_seg_size(dev, UINT_MAX);
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 9a7c49bc394f..a588b0388d27 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -96,6 +96,8 @@ static struct page **get_pages(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ WARN_ON(!msm_gem_is_locked(obj));
+
if (!msm_obj->pages) {
struct drm_device *dev = obj->dev;
struct page **p;
@@ -988,6 +990,8 @@ void msm_gem_free_object(struct drm_gem_object *obj)
if (msm_obj->pages)
kvfree(msm_obj->pages);
+ put_iova_vmas(obj);
+
/* dma_buf_detach() grabs resv lock, so we need to unlock
* prior to drm_prime_gem_destroy
*/
@@ -997,11 +1001,10 @@ void msm_gem_free_object(struct drm_gem_object *obj)
} else {
msm_gem_vunmap(obj);
put_pages(obj);
+ put_iova_vmas(obj);
msm_gem_unlock(obj);
}
- put_iova_vmas(obj);
-
drm_gem_object_release(obj);
kfree(msm_obj);
@@ -1115,6 +1118,8 @@ static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
struct msm_gem_vma *vma;
struct page **pages;
+ drm_gem_private_object_init(dev, obj, size);
+
msm_gem_lock(obj);
vma = add_vma(obj, NULL);
@@ -1126,9 +1131,9 @@ static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
to_msm_bo(obj)->vram_node = &vma->node;
- drm_gem_private_object_init(dev, obj, size);
-
+ msm_gem_lock(obj);
pages = get_pages(obj);
+ msm_gem_unlock(obj);
if (IS_ERR(pages)) {
ret = PTR_ERR(pages);
goto fail;
@@ -1211,7 +1216,7 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev,
goto fail;
}
- ret = drm_prime_sg_to_page_addr_arrays(sgt, msm_obj->pages, NULL, npages);
+ ret = drm_prime_sg_to_page_array(sgt, msm_obj->pages, npages);
if (ret) {
msm_gem_unlock(obj);
goto fail;
diff --git a/drivers/gpu/drm/nouveau/dispnv04/arb.c b/drivers/gpu/drm/nouveau/dispnv04/arb.c
index 9d4a2d97507e..1d3542d6006b 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/arb.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/arb.c
@@ -200,16 +200,17 @@ nv04_update_arb(struct drm_device *dev, int VClk, int bpp,
int MClk = nouveau_hw_get_clock(dev, PLL_MEMORY);
int NVClk = nouveau_hw_get_clock(dev, PLL_CORE);
uint32_t cfg1 = nvif_rd32(device, NV04_PFB_CFG1);
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
sim_data.pclk_khz = VClk;
sim_data.mclk_khz = MClk;
sim_data.nvclk_khz = NVClk;
sim_data.bpp = bpp;
sim_data.two_heads = nv_two_heads(dev);
- if ((dev->pdev->device & 0xffff) == 0x01a0 /*CHIPSET_NFORCE*/ ||
- (dev->pdev->device & 0xffff) == 0x01f0 /*CHIPSET_NFORCE2*/) {
+ if ((pdev->device & 0xffff) == 0x01a0 /*CHIPSET_NFORCE*/ ||
+ (pdev->device & 0xffff) == 0x01f0 /*CHIPSET_NFORCE2*/) {
uint32_t type;
- int domain = pci_domain_nr(dev->pdev->bus);
+ int domain = pci_domain_nr(pdev->bus);
pci_read_config_dword(pci_get_domain_bus_and_slot(domain, 0, 1),
0x7c, &type);
@@ -251,11 +252,12 @@ void
nouveau_calc_arb(struct drm_device *dev, int vclk, int bpp, int *burst, int *lwm)
{
struct nouveau_drm *drm = nouveau_drm(dev);
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
if (drm->client.device.info.family < NV_DEVICE_INFO_V0_KELVIN)
nv04_update_arb(dev, vclk, bpp, burst, lwm);
- else if ((dev->pdev->device & 0xfff0) == 0x0240 /*CHIPSET_C51*/ ||
- (dev->pdev->device & 0xfff0) == 0x03d0 /*CHIPSET_C512*/) {
+ else if ((pdev->device & 0xfff0) == 0x0240 /*CHIPSET_C51*/ ||
+ (pdev->device & 0xfff0) == 0x03d0 /*CHIPSET_C512*/) {
*burst = 128;
*lwm = 0x0480;
} else
diff --git a/drivers/gpu/drm/nouveau/dispnv04/dfp.c b/drivers/gpu/drm/nouveau/dispnv04/dfp.c
index 42687ea2a4ca..ce3d8c6ef000 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/dfp.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/dfp.c
@@ -488,12 +488,13 @@ static void nv04_dfp_update_backlight(struct drm_encoder *encoder, int mode)
#ifdef __powerpc__
struct drm_device *dev = encoder->dev;
struct nvif_object *device = &nouveau_drm(dev)->client.device.object;
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
/* BIOS scripts usually take care of the backlight, thanks
* Apple for your consistency.
*/
- if (dev->pdev->device == 0x0174 || dev->pdev->device == 0x0179 ||
- dev->pdev->device == 0x0189 || dev->pdev->device == 0x0329) {
+ if (pdev->device == 0x0174 || pdev->device == 0x0179 ||
+ pdev->device == 0x0189 || pdev->device == 0x0329) {
if (mode == DRM_MODE_DPMS_ON) {
nvif_mask(device, NV_PBUS_DEBUG_DUALHEAD_CTL, 1 << 31, 1 << 31);
nvif_mask(device, NV_PCRTC_GPIO_EXT, 3, 1);
diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.h b/drivers/gpu/drm/nouveau/dispnv04/disp.h
index 5ace5e906949..f0a24126641a 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/disp.h
+++ b/drivers/gpu/drm/nouveau/dispnv04/disp.h
@@ -130,7 +130,7 @@ static inline bool
nv_two_heads(struct drm_device *dev)
{
struct nouveau_drm *drm = nouveau_drm(dev);
- const int impl = dev->pdev->device & 0x0ff0;
+ const int impl = to_pci_dev(dev->dev)->device & 0x0ff0;
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_CELSIUS && impl != 0x0100 &&
impl != 0x0150 && impl != 0x01a0 && impl != 0x0200)
@@ -142,14 +142,14 @@ nv_two_heads(struct drm_device *dev)
static inline bool
nv_gf4_disp_arch(struct drm_device *dev)
{
- return nv_two_heads(dev) && (dev->pdev->device & 0x0ff0) != 0x0110;
+ return nv_two_heads(dev) && (to_pci_dev(dev->dev)->device & 0x0ff0) != 0x0110;
}
static inline bool
nv_two_reg_pll(struct drm_device *dev)
{
struct nouveau_drm *drm = nouveau_drm(dev);
- const int impl = dev->pdev->device & 0x0ff0;
+ const int impl = to_pci_dev(dev->dev)->device & 0x0ff0;
if (impl == 0x0310 || impl == 0x0340 || drm->client.device.info.family >= NV_DEVICE_INFO_V0_CURIE)
return true;
@@ -160,9 +160,11 @@ static inline bool
nv_match_device(struct drm_device *dev, unsigned device,
unsigned sub_vendor, unsigned sub_device)
{
- return dev->pdev->device == device &&
- dev->pdev->subsystem_vendor == sub_vendor &&
- dev->pdev->subsystem_device == sub_device;
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
+
+ return pdev->device == device &&
+ pdev->subsystem_vendor == sub_vendor &&
+ pdev->subsystem_device == sub_device;
}
#include <subdev/bios/init.h>
diff --git a/drivers/gpu/drm/nouveau/dispnv04/hw.c b/drivers/gpu/drm/nouveau/dispnv04/hw.c
index b674d68ef28a..f7d35657aa64 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/hw.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/hw.c
@@ -214,14 +214,15 @@ nouveau_hw_pllvals_to_clk(struct nvkm_pll_vals *pv)
int
nouveau_hw_get_clock(struct drm_device *dev, enum nvbios_pll_type plltype)
{
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
struct nvkm_pll_vals pllvals;
int ret;
int domain;
- domain = pci_domain_nr(dev->pdev->bus);
+ domain = pci_domain_nr(pdev->bus);
if (plltype == PLL_MEMORY &&
- (dev->pdev->device & 0x0ff0) == CHIPSET_NFORCE) {
+ (pdev->device & 0x0ff0) == CHIPSET_NFORCE) {
uint32_t mpllP;
pci_read_config_dword(pci_get_domain_bus_and_slot(domain, 0, 3),
0x6c, &mpllP);
@@ -232,7 +233,7 @@ nouveau_hw_get_clock(struct drm_device *dev, enum nvbios_pll_type plltype)
return 400000 / mpllP;
} else
if (plltype == PLL_MEMORY &&
- (dev->pdev->device & 0xff0) == CHIPSET_NFORCE2) {
+ (pdev->device & 0xff0) == CHIPSET_NFORCE2) {
uint32_t clock;
pci_read_config_dword(pci_get_domain_bus_and_slot(domain, 0, 5),
@@ -309,6 +310,7 @@ void
nouveau_hw_save_vga_fonts(struct drm_device *dev, bool save)
{
struct nouveau_drm *drm = nouveau_drm(dev);
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
uint8_t misc, gr4, gr5, gr6, seq2, seq4;
bool graphicsmode;
unsigned plane;
@@ -327,7 +329,7 @@ nouveau_hw_save_vga_fonts(struct drm_device *dev, bool save)
NV_INFO(drm, "%sing VGA fonts\n", save ? "Sav" : "Restor");
/* map first 64KiB of VRAM, holds VGA fonts etc */
- iovram = ioremap(pci_resource_start(dev->pdev, 1), 65536);
+ iovram = ioremap(pci_resource_start(pdev, 1), 65536);
if (!iovram) {
NV_ERROR(drm, "Failed to map VRAM, "
"cannot save/restore VGA fonts.\n");
diff --git a/drivers/gpu/drm/nouveau/dispnv50/Kbuild b/drivers/gpu/drm/nouveau/dispnv50/Kbuild
index 6fdddb266fb1..4488e1c061b3 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/Kbuild
+++ b/drivers/gpu/drm/nouveau/dispnv50/Kbuild
@@ -37,6 +37,7 @@ nouveau-y += dispnv50/wimmc37b.o
nouveau-y += dispnv50/wndw.o
nouveau-y += dispnv50/wndwc37e.o
nouveau-y += dispnv50/wndwc57e.o
+nouveau-y += dispnv50/wndwc67e.o
nouveau-y += dispnv50/base.o
nouveau-y += dispnv50/base507c.o
diff --git a/drivers/gpu/drm/nouveau/dispnv50/core.c b/drivers/gpu/drm/nouveau/dispnv50/core.c
index 27ea3f34706d..abefc2343443 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/core.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/core.c
@@ -42,6 +42,7 @@ nv50_core_new(struct nouveau_drm *drm, struct nv50_core **pcore)
int version;
int (*new)(struct nouveau_drm *, s32, struct nv50_core **);
} cores[] = {
+ { GA102_DISP_CORE_CHANNEL_DMA, 0, corec57d_new },
{ TU102_DISP_CORE_CHANNEL_DMA, 0, corec57d_new },
{ GV100_DISP_CORE_CHANNEL_DMA, 0, corec37d_new },
{ GP102_DISP_CORE_CHANNEL_DMA, 0, core917d_new },
diff --git a/drivers/gpu/drm/nouveau/dispnv50/core507d.c b/drivers/gpu/drm/nouveau/dispnv50/core507d.c
index e6f16a7750f0..1a1d806e0b01 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/core507d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/core507d.c
@@ -36,7 +36,7 @@ core507d_update(struct nv50_core *core, u32 *interlock, bool ntfy)
struct nvif_push *push = core->chan.push;
int ret;
- if ((ret = PUSH_WAIT(push, 5)))
+ if ((ret = PUSH_WAIT(push, (ntfy ? 2 : 0) + 3)))
return ret;
if (ntfy) {
diff --git a/drivers/gpu/drm/nouveau/dispnv50/corec37d.c b/drivers/gpu/drm/nouveau/dispnv50/corec37d.c
index 9035d3ab062c..42f877f2ced2 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/corec37d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/corec37d.c
@@ -54,7 +54,7 @@ corec37d_update(struct nv50_core *core, u32 *interlock, bool ntfy)
struct nvif_push *push = core->chan.push;
int ret;
- if ((ret = PUSH_WAIT(push, 9)))
+ if ((ret = PUSH_WAIT(push, (ntfy ? 2 * 2 : 0) + 5)))
return ret;
if (ntfy) {
diff --git a/drivers/gpu/drm/nouveau/dispnv50/curs.c b/drivers/gpu/drm/nouveau/dispnv50/curs.c
index 121c24a18f11..31d8b2e4791d 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/curs.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/curs.c
@@ -31,6 +31,7 @@ nv50_curs_new(struct nouveau_drm *drm, int head, struct nv50_wndw **pwndw)
int version;
int (*new)(struct nouveau_drm *, int, s32, struct nv50_wndw **);
} curses[] = {
+ { GA102_DISP_CURSOR, 0, cursc37a_new },
{ TU102_DISP_CURSOR, 0, cursc37a_new },
{ GV100_DISP_CURSOR, 0, cursc37a_new },
{ GK104_DISP_CURSOR, 0, curs907a_new },
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
index 33fff388dd83..c9e1575a06a2 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -220,9 +220,13 @@ nv50_dmac_wait(struct nvif_push *push, u32 size)
return 0;
}
+MODULE_PARM_DESC(kms_vram_pushbuf, "Place EVO/NVD push buffers in VRAM (default: auto)");
+static int nv50_dmac_vram_pushbuf = -1;
+module_param_named(kms_vram_pushbuf, nv50_dmac_vram_pushbuf, int, 0400);
+
int
nv50_dmac_create(struct nvif_device *device, struct nvif_object *disp,
- const s32 *oclass, u8 head, void *data, u32 size, u64 syncbuf,
+ const s32 *oclass, u8 head, void *data, u32 size, s64 syncbuf,
struct nv50_dmac *dmac)
{
struct nouveau_cli *cli = (void *)device->object.client;
@@ -241,7 +245,8 @@ nv50_dmac_create(struct nvif_device *device, struct nvif_object *disp,
*
* This appears to match NVIDIA's behaviour on Pascal.
*/
- if (device->info.family == NV_DEVICE_INFO_V0_PASCAL)
+ if ((nv50_dmac_vram_pushbuf > 0) ||
+ (nv50_dmac_vram_pushbuf < 0 && device->info.family == NV_DEVICE_INFO_V0_PASCAL))
type |= NVIF_MEM_VRAM;
ret = nvif_mem_ctor_map(&cli->mmu, "kmsChanPush", type, 0x1000,
@@ -271,7 +276,7 @@ nv50_dmac_create(struct nvif_device *device, struct nvif_object *disp,
if (ret)
return ret;
- if (!syncbuf)
+ if (syncbuf < 0)
return 0;
ret = nvif_object_ctor(&dmac->base.user, "kmsSyncCtxDma", NV50_DISP_HANDLE_SYNCBUF,
@@ -305,6 +310,14 @@ nv50_dmac_create(struct nvif_device *device, struct nvif_object *disp,
* Output path helpers
*****************************************************************************/
static void
+nv50_outp_dump_caps(struct nouveau_drm *drm,
+ struct nouveau_encoder *outp)
+{
+ NV_DEBUG(drm, "%s caps: dp_interlace=%d\n",
+ outp->base.base.name, outp->caps.dp_interlace);
+}
+
+static void
nv50_outp_release(struct nouveau_encoder *nv_encoder)
{
struct nv50_disp *disp = nv50_disp(nv_encoder->base.base.dev);
@@ -419,8 +432,7 @@ nv50_outp_atomic_check(struct drm_encoder *encoder,
}
struct nouveau_connector *
-nv50_outp_get_new_connector(struct nouveau_encoder *outp,
- struct drm_atomic_state *state)
+nv50_outp_get_new_connector(struct drm_atomic_state *state, struct nouveau_encoder *outp)
{
struct drm_connector *connector;
struct drm_connector_state *connector_state;
@@ -436,8 +448,7 @@ nv50_outp_get_new_connector(struct nouveau_encoder *outp,
}
struct nouveau_connector *
-nv50_outp_get_old_connector(struct nouveau_encoder *outp,
- struct drm_atomic_state *state)
+nv50_outp_get_old_connector(struct drm_atomic_state *state, struct nouveau_encoder *outp)
{
struct drm_connector *connector;
struct drm_connector_state *connector_state;
@@ -452,27 +463,44 @@ nv50_outp_get_old_connector(struct nouveau_encoder *outp,
return NULL;
}
+static struct nouveau_crtc *
+nv50_outp_get_new_crtc(const struct drm_atomic_state *state, const struct nouveau_encoder *outp)
+{
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *crtc_state;
+ const u32 mask = drm_encoder_mask(&outp->base.base);
+ int i;
+
+ for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
+ if (crtc_state->encoder_mask & mask)
+ return nouveau_crtc(crtc);
+ }
+
+ return NULL;
+}
+
/******************************************************************************
* DAC
*****************************************************************************/
static void
-nv50_dac_disable(struct drm_encoder *encoder, struct drm_atomic_state *state)
+nv50_dac_atomic_disable(struct drm_encoder *encoder, struct drm_atomic_state *state)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct nv50_core *core = nv50_disp(encoder->dev)->core;
const u32 ctrl = NVDEF(NV507D, DAC_SET_CONTROL, OWNER, NONE);
- if (nv_encoder->crtc)
- core->func->dac->ctrl(core, nv_encoder->or, ctrl, NULL);
+
+ core->func->dac->ctrl(core, nv_encoder->or, ctrl, NULL);
nv_encoder->crtc = NULL;
nv50_outp_release(nv_encoder);
}
static void
-nv50_dac_enable(struct drm_encoder *encoder, struct drm_atomic_state *state)
+nv50_dac_atomic_enable(struct drm_encoder *encoder, struct drm_atomic_state *state)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
- struct nv50_head_atom *asyh = nv50_head_atom(nv_crtc->base.state);
+ struct nouveau_crtc *nv_crtc = nv50_outp_get_new_crtc(state, nv_encoder);
+ struct nv50_head_atom *asyh =
+ nv50_head_atom(drm_atomic_get_new_crtc_state(state, &nv_crtc->base));
struct nv50_core *core = nv50_disp(encoder->dev)->core;
u32 ctrl = 0;
@@ -493,7 +521,7 @@ nv50_dac_enable(struct drm_encoder *encoder, struct drm_atomic_state *state)
core->func->dac->ctrl(core, nv_encoder->or, ctrl, asyh);
asyh->or.depth = 0;
- nv_encoder->crtc = encoder->crtc;
+ nv_encoder->crtc = &nv_crtc->base;
}
static enum drm_connector_status
@@ -526,8 +554,8 @@ nv50_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
static const struct drm_encoder_helper_funcs
nv50_dac_help = {
.atomic_check = nv50_outp_atomic_check,
- .atomic_enable = nv50_dac_enable,
- .atomic_disable = nv50_dac_disable,
+ .atomic_enable = nv50_dac_atomic_enable,
+ .atomic_disable = nv50_dac_atomic_disable,
.detect = nv50_dac_detect
};
@@ -593,34 +621,27 @@ nv50_audio_component_get_eld(struct device *kdev, int port, int dev_id,
struct nouveau_drm *drm = nouveau_drm(drm_dev);
struct drm_encoder *encoder;
struct nouveau_encoder *nv_encoder;
- struct drm_connector *connector;
struct nouveau_crtc *nv_crtc;
- struct drm_connector_list_iter conn_iter;
int ret = 0;
*enabled = false;
+ mutex_lock(&drm->audio.lock);
+
drm_for_each_encoder(encoder, drm->dev) {
struct nouveau_connector *nv_connector = NULL;
+ if (encoder->encoder_type == DRM_MODE_ENCODER_DPMST)
+ continue; /* TODO */
+
nv_encoder = nouveau_encoder(encoder);
+ nv_connector = nouveau_connector(nv_encoder->audio.connector);
+ nv_crtc = nouveau_crtc(nv_encoder->crtc);
- drm_connector_list_iter_begin(drm_dev, &conn_iter);
- drm_for_each_connector_iter(connector, &conn_iter) {
- if (connector->state->best_encoder == encoder) {
- nv_connector = nouveau_connector(connector);
- break;
- }
- }
- drm_connector_list_iter_end(&conn_iter);
- if (!nv_connector)
+ if (!nv_crtc || nv_encoder->or != port || nv_crtc->index != dev_id)
continue;
- nv_crtc = nouveau_crtc(encoder->crtc);
- if (!nv_crtc || nv_encoder->or != port ||
- nv_crtc->index != dev_id)
- continue;
- *enabled = nv_encoder->audio;
+ *enabled = nv_encoder->audio.enabled;
if (*enabled) {
ret = drm_eld_size(nv_connector->base.eld);
memcpy(buf, nv_connector->base.eld,
@@ -629,6 +650,8 @@ nv50_audio_component_get_eld(struct device *kdev, int port, int dev_id,
break;
}
+ mutex_unlock(&drm->audio.lock);
+
return ret;
}
@@ -678,17 +701,22 @@ static const struct component_ops nv50_audio_component_bind_ops = {
static void
nv50_audio_component_init(struct nouveau_drm *drm)
{
- if (!component_add(drm->dev->dev, &nv50_audio_component_bind_ops))
- drm->audio.component_registered = true;
+ if (component_add(drm->dev->dev, &nv50_audio_component_bind_ops))
+ return;
+
+ drm->audio.component_registered = true;
+ mutex_init(&drm->audio.lock);
}
static void
nv50_audio_component_fini(struct nouveau_drm *drm)
{
- if (drm->audio.component_registered) {
- component_del(drm->dev->dev, &nv50_audio_component_bind_ops);
- drm->audio.component_registered = false;
- }
+ if (!drm->audio.component_registered)
+ return;
+
+ component_del(drm->dev->dev, &nv50_audio_component_bind_ops);
+ drm->audio.component_registered = false;
+ mutex_destroy(&drm->audio.lock);
}
/******************************************************************************
@@ -711,24 +739,25 @@ nv50_audio_disable(struct drm_encoder *encoder, struct nouveau_crtc *nv_crtc)
(0x0100 << nv_crtc->index),
};
- if (!nv_encoder->audio)
- return;
-
- nv_encoder->audio = false;
- nvif_mthd(&disp->disp->object, 0, &args, sizeof(args));
+ mutex_lock(&drm->audio.lock);
+ if (nv_encoder->audio.enabled) {
+ nv_encoder->audio.enabled = false;
+ nv_encoder->audio.connector = NULL;
+ nvif_mthd(&disp->disp->object, 0, &args, sizeof(args));
+ }
+ mutex_unlock(&drm->audio.lock);
nv50_audio_component_eld_notify(drm->audio.component, nv_encoder->or,
nv_crtc->index);
}
static void
-nv50_audio_enable(struct drm_encoder *encoder, struct drm_atomic_state *state,
+nv50_audio_enable(struct drm_encoder *encoder, struct nouveau_crtc *nv_crtc,
+ struct nouveau_connector *nv_connector, struct drm_atomic_state *state,
struct drm_display_mode *mode)
{
struct nouveau_drm *drm = nouveau_drm(encoder->dev);
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
- struct nouveau_connector *nv_connector;
struct nv50_disp *disp = nv50_disp(encoder->dev);
struct __packed {
struct {
@@ -744,15 +773,19 @@ nv50_audio_enable(struct drm_encoder *encoder, struct drm_atomic_state *state,
(0x0100 << nv_crtc->index),
};
- nv_connector = nv50_outp_get_new_connector(nv_encoder, state);
if (!drm_detect_monitor_audio(nv_connector->edid))
return;
+ mutex_lock(&drm->audio.lock);
+
memcpy(args.data, nv_connector->base.eld, sizeof(args.data));
nvif_mthd(&disp->disp->object, 0, &args,
sizeof(args.base) + drm_eld_size(args.data));
- nv_encoder->audio = true;
+ nv_encoder->audio.enabled = true;
+ nv_encoder->audio.connector = &nv_connector->base;
+
+ mutex_unlock(&drm->audio.lock);
nv50_audio_component_eld_notify(drm->audio.component, nv_encoder->or,
nv_crtc->index);
@@ -781,12 +814,12 @@ nv50_hdmi_disable(struct drm_encoder *encoder, struct nouveau_crtc *nv_crtc)
}
static void
-nv50_hdmi_enable(struct drm_encoder *encoder, struct drm_atomic_state *state,
+nv50_hdmi_enable(struct drm_encoder *encoder, struct nouveau_crtc *nv_crtc,
+ struct nouveau_connector *nv_connector, struct drm_atomic_state *state,
struct drm_display_mode *mode)
{
struct nouveau_drm *drm = nouveau_drm(encoder->dev);
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
struct nv50_disp *disp = nv50_disp(encoder->dev);
struct {
struct nv50_disp_mthd_v1 base;
@@ -801,7 +834,6 @@ nv50_hdmi_enable(struct drm_encoder *encoder, struct drm_atomic_state *state,
.pwr.state = 1,
.pwr.rekey = 56, /* binary driver, and tegra, constant */
};
- struct nouveau_connector *nv_connector;
struct drm_hdmi_info *hdmi;
u32 max_ac_packet;
union hdmi_infoframe avi_frame;
@@ -811,7 +843,6 @@ nv50_hdmi_enable(struct drm_encoder *encoder, struct drm_atomic_state *state,
int ret;
int size;
- nv_connector = nv50_outp_get_new_connector(nv_encoder, state);
if (!drm_detect_hdmi_monitor(nv_connector->edid))
return;
@@ -857,7 +888,7 @@ nv50_hdmi_enable(struct drm_encoder *encoder, struct drm_atomic_state *state,
+ args.pwr.vendor_infoframe_length;
nvif_mthd(&disp->disp->object, 0, &args, size);
- nv50_audio_enable(encoder, state, mode);
+ nv50_audio_enable(encoder, nv_crtc, nv_connector, state, mode);
/* If SCDC is supported by the downstream monitor, update
* divider / scrambling settings to what we programmed above.
@@ -898,6 +929,7 @@ struct nv50_mstc {
struct nv50_msto {
struct drm_encoder encoder;
+ /* head is statically assigned on msto creation */
struct nv50_head *head;
struct nv50_mstc *mstc;
bool disabled;
@@ -1056,11 +1088,12 @@ nv50_dp_bpc_to_depth(unsigned int bpc)
}
static void
-nv50_msto_enable(struct drm_encoder *encoder, struct drm_atomic_state *state)
+nv50_msto_atomic_enable(struct drm_encoder *encoder, struct drm_atomic_state *state)
{
- struct nv50_head *head = nv50_head(encoder->crtc);
- struct nv50_head_atom *armh = nv50_head_atom(head->base.base.state);
struct nv50_msto *msto = nv50_msto(encoder);
+ struct nv50_head *head = msto->head;
+ struct nv50_head_atom *asyh =
+ nv50_head_atom(drm_atomic_get_new_crtc_state(state, &head->base.base));
struct nv50_mstc *mstc = NULL;
struct nv50_mstm *mstm = NULL;
struct drm_connector *connector;
@@ -1081,8 +1114,7 @@ nv50_msto_enable(struct drm_encoder *encoder, struct drm_atomic_state *state)
if (WARN_ON(!mstc))
return;
- r = drm_dp_mst_allocate_vcpi(&mstm->mgr, mstc->port, armh->dp.pbn,
- armh->dp.tu);
+ r = drm_dp_mst_allocate_vcpi(&mstm->mgr, mstc->port, asyh->dp.pbn, asyh->dp.tu);
if (!r)
DRM_DEBUG_KMS("Failed to allocate VCPI\n");
@@ -1094,15 +1126,15 @@ nv50_msto_enable(struct drm_encoder *encoder, struct drm_atomic_state *state)
else
proto = NV917D_SOR_SET_CONTROL_PROTOCOL_DP_B;
- mstm->outp->update(mstm->outp, head->base.index, armh, proto,
- nv50_dp_bpc_to_depth(armh->or.bpc));
+ mstm->outp->update(mstm->outp, head->base.index, asyh, proto,
+ nv50_dp_bpc_to_depth(asyh->or.bpc));
msto->mstc = mstc;
mstm->modified = true;
}
static void
-nv50_msto_disable(struct drm_encoder *encoder, struct drm_atomic_state *state)
+nv50_msto_atomic_disable(struct drm_encoder *encoder, struct drm_atomic_state *state)
{
struct nv50_msto *msto = nv50_msto(encoder);
struct nv50_mstc *mstc = msto->mstc;
@@ -1119,8 +1151,8 @@ nv50_msto_disable(struct drm_encoder *encoder, struct drm_atomic_state *state)
static const struct drm_encoder_helper_funcs
nv50_msto_help = {
- .atomic_disable = nv50_msto_disable,
- .atomic_enable = nv50_msto_enable,
+ .atomic_disable = nv50_msto_atomic_disable,
+ .atomic_enable = nv50_msto_atomic_enable,
.atomic_check = nv50_msto_atomic_check,
};
@@ -1616,43 +1648,38 @@ nv50_sor_update(struct nouveau_encoder *nv_encoder, u8 head,
}
static void
-nv50_sor_disable(struct drm_encoder *encoder,
- struct drm_atomic_state *state)
+nv50_sor_atomic_disable(struct drm_encoder *encoder, struct drm_atomic_state *state)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct nouveau_crtc *nv_crtc = nouveau_crtc(nv_encoder->crtc);
- struct nouveau_connector *nv_connector =
- nv50_outp_get_old_connector(nv_encoder, state);
-
- nv_encoder->crtc = NULL;
-
- if (nv_crtc) {
- struct drm_dp_aux *aux = &nv_connector->aux;
- u8 pwr;
+ struct nouveau_connector *nv_connector = nv50_outp_get_old_connector(state, nv_encoder);
+ struct drm_dp_aux *aux = &nv_connector->aux;
+ u8 pwr;
- if (nv_encoder->dcb->type == DCB_OUTPUT_DP) {
- int ret = drm_dp_dpcd_readb(aux, DP_SET_POWER, &pwr);
+ if (nv_encoder->dcb->type == DCB_OUTPUT_DP) {
+ int ret = drm_dp_dpcd_readb(aux, DP_SET_POWER, &pwr);
- if (ret == 0) {
- pwr &= ~DP_SET_POWER_MASK;
- pwr |= DP_SET_POWER_D3;
- drm_dp_dpcd_writeb(aux, DP_SET_POWER, pwr);
- }
+ if (ret == 0) {
+ pwr &= ~DP_SET_POWER_MASK;
+ pwr |= DP_SET_POWER_D3;
+ drm_dp_dpcd_writeb(aux, DP_SET_POWER, pwr);
}
-
- nv_encoder->update(nv_encoder, nv_crtc->index, NULL, 0, 0);
- nv50_audio_disable(encoder, nv_crtc);
- nv50_hdmi_disable(&nv_encoder->base.base, nv_crtc);
- nv50_outp_release(nv_encoder);
}
+
+ nv_encoder->update(nv_encoder, nv_crtc->index, NULL, 0, 0);
+ nv50_audio_disable(encoder, nv_crtc);
+ nv50_hdmi_disable(&nv_encoder->base.base, nv_crtc);
+ nv50_outp_release(nv_encoder);
+ nv_encoder->crtc = NULL;
}
static void
-nv50_sor_enable(struct drm_encoder *encoder, struct drm_atomic_state *state)
+nv50_sor_atomic_enable(struct drm_encoder *encoder, struct drm_atomic_state *state)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
- struct nv50_head_atom *asyh = nv50_head_atom(nv_crtc->base.state);
+ struct nouveau_crtc *nv_crtc = nv50_outp_get_new_crtc(state, nv_encoder);
+ struct nv50_head_atom *asyh =
+ nv50_head_atom(drm_atomic_get_new_crtc_state(state, &nv_crtc->base));
struct drm_display_mode *mode = &asyh->state.adjusted_mode;
struct {
struct nv50_disp_mthd_v1 base;
@@ -1672,8 +1699,8 @@ nv50_sor_enable(struct drm_encoder *encoder, struct drm_atomic_state *state)
u8 proto = NV507D_SOR_SET_CONTROL_PROTOCOL_CUSTOM;
u8 depth = NV837D_SOR_SET_CONTROL_PIXEL_DEPTH_DEFAULT;
- nv_connector = nv50_outp_get_new_connector(nv_encoder, state);
- nv_encoder->crtc = encoder->crtc;
+ nv_connector = nv50_outp_get_new_connector(state, nv_encoder);
+ nv_encoder->crtc = &nv_crtc->base;
if ((disp->disp->object.oclass == GT214_DISP ||
disp->disp->object.oclass >= GF110_DISP) &&
@@ -1699,7 +1726,7 @@ nv50_sor_enable(struct drm_encoder *encoder, struct drm_atomic_state *state)
proto = NV507D_SOR_SET_CONTROL_PROTOCOL_SINGLE_TMDS_B;
}
- nv50_hdmi_enable(&nv_encoder->base.base, state, mode);
+ nv50_hdmi_enable(&nv_encoder->base.base, nv_crtc, nv_connector, state, mode);
break;
case DCB_OUTPUT_LVDS:
proto = NV507D_SOR_SET_CONTROL_PROTOCOL_LVDS_CUSTOM;
@@ -1740,7 +1767,7 @@ nv50_sor_enable(struct drm_encoder *encoder, struct drm_atomic_state *state)
else
proto = NV887D_SOR_SET_CONTROL_PROTOCOL_DP_B;
- nv50_audio_enable(encoder, state, mode);
+ nv50_audio_enable(encoder, nv_crtc, nv_connector, state, mode);
break;
default:
BUG();
@@ -1753,8 +1780,8 @@ nv50_sor_enable(struct drm_encoder *encoder, struct drm_atomic_state *state)
static const struct drm_encoder_helper_funcs
nv50_sor_help = {
.atomic_check = nv50_outp_atomic_check,
- .atomic_enable = nv50_sor_enable,
- .atomic_disable = nv50_sor_disable,
+ .atomic_enable = nv50_sor_atomic_enable,
+ .atomic_disable = nv50_sor_atomic_disable,
};
static void
@@ -1821,6 +1848,7 @@ nv50_sor_create(struct drm_connector *connector, struct dcb_output *dcbe)
drm_connector_attach_encoder(connector, encoder);
disp->core->func->sor->get_caps(disp, nv_encoder, ffs(dcbe->or) - 1);
+ nv50_outp_dump_caps(drm, nv_encoder);
if (dcbe->type == DCB_OUTPUT_DP) {
struct nvkm_i2c_aux *aux =
@@ -1875,23 +1903,24 @@ nv50_pior_atomic_check(struct drm_encoder *encoder,
}
static void
-nv50_pior_disable(struct drm_encoder *encoder, struct drm_atomic_state *state)
+nv50_pior_atomic_disable(struct drm_encoder *encoder, struct drm_atomic_state *state)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct nv50_core *core = nv50_disp(encoder->dev)->core;
const u32 ctrl = NVDEF(NV507D, PIOR_SET_CONTROL, OWNER, NONE);
- if (nv_encoder->crtc)
- core->func->pior->ctrl(core, nv_encoder->or, ctrl, NULL);
+
+ core->func->pior->ctrl(core, nv_encoder->or, ctrl, NULL);
nv_encoder->crtc = NULL;
nv50_outp_release(nv_encoder);
}
static void
-nv50_pior_enable(struct drm_encoder *encoder, struct drm_atomic_state *state)
+nv50_pior_atomic_enable(struct drm_encoder *encoder, struct drm_atomic_state *state)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
- struct nv50_head_atom *asyh = nv50_head_atom(nv_crtc->base.state);
+ struct nouveau_crtc *nv_crtc = nv50_outp_get_new_crtc(state, nv_encoder);
+ struct nv50_head_atom *asyh =
+ nv50_head_atom(drm_atomic_get_new_crtc_state(state, &nv_crtc->base));
struct nv50_core *core = nv50_disp(encoder->dev)->core;
u32 ctrl = 0;
@@ -1929,8 +1958,8 @@ nv50_pior_enable(struct drm_encoder *encoder, struct drm_atomic_state *state)
static const struct drm_encoder_helper_funcs
nv50_pior_help = {
.atomic_check = nv50_pior_atomic_check,
- .atomic_enable = nv50_pior_enable,
- .atomic_disable = nv50_pior_disable,
+ .atomic_enable = nv50_pior_atomic_enable,
+ .atomic_disable = nv50_pior_atomic_disable,
};
static void
@@ -1991,6 +2020,7 @@ nv50_pior_create(struct drm_connector *connector, struct dcb_output *dcbe)
drm_connector_attach_encoder(connector, encoder);
disp->core->func->pior->get_caps(disp, nv_encoder, ffs(dcbe->or) - 1);
+ nv50_outp_dump_caps(drm, nv_encoder);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.h b/drivers/gpu/drm/nouveau/dispnv50/disp.h
index 92bddc083617..38dec11e7dda 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.h
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.h
@@ -95,7 +95,7 @@ struct nv50_outp_atom {
int nv50_dmac_create(struct nvif_device *device, struct nvif_object *disp,
const s32 *oclass, u8 head, void *data, u32 size,
- u64 syncbuf, struct nv50_dmac *dmac);
+ s64 syncbuf, struct nv50_dmac *dmac);
void nv50_dmac_destroy(struct nv50_dmac *);
/*
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head.c b/drivers/gpu/drm/nouveau/dispnv50/head.c
index 537c1ef2e464..ec361d17e900 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/head.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/head.c
@@ -503,7 +503,6 @@ nv50_head_destroy(struct drm_crtc *crtc)
static const struct drm_crtc_funcs
nv50_head_func = {
.reset = nv50_head_reset,
- .gamma_set = drm_atomic_helper_legacy_gamma_set,
.destroy = nv50_head_destroy,
.set_config = drm_atomic_helper_set_config,
.page_flip = drm_atomic_helper_page_flip,
@@ -518,7 +517,6 @@ nv50_head_func = {
static const struct drm_crtc_funcs
nvd9_head_func = {
.reset = nv50_head_reset,
- .gamma_set = drm_atomic_helper_legacy_gamma_set,
.destroy = nv50_head_destroy,
.set_config = drm_atomic_helper_set_config,
.page_flip = drm_atomic_helper_page_flip,
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head907d.c b/drivers/gpu/drm/nouveau/dispnv50/head907d.c
index 8f860e9c5224..85648d790743 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/head907d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/head907d.c
@@ -322,7 +322,7 @@ head907d_mode(struct nv50_head *head, struct nv50_head_atom *asyh)
const int i = head->base.index;
int ret;
- if ((ret = PUSH_WAIT(push, 14)))
+ if ((ret = PUSH_WAIT(push, 13)))
return ret;
PUSH_MTHD(push, NV907D, HEAD_SET_OVERSCAN_COLOR(i),
@@ -353,14 +353,7 @@ head907d_mode(struct nv50_head *head, struct nv50_head_atom *asyh)
PUSH_MTHD(push, NV907D, HEAD_SET_DEFAULT_BASE_COLOR(i),
NVVAL(NV907D, HEAD_SET_DEFAULT_BASE_COLOR, RED, 0) |
NVVAL(NV907D, HEAD_SET_DEFAULT_BASE_COLOR, GREEN, 0) |
- NVVAL(NV907D, HEAD_SET_DEFAULT_BASE_COLOR, BLUE, 0),
-
- HEAD_SET_CRC_CONTROL(i),
- NVDEF(NV907D, HEAD_SET_CRC_CONTROL, CONTROLLING_CHANNEL, CORE) |
- NVDEF(NV907D, HEAD_SET_CRC_CONTROL, EXPECT_BUFFER_COLLAPSE, FALSE) |
- NVDEF(NV907D, HEAD_SET_CRC_CONTROL, TIMESTAMP_MODE, FALSE) |
- NVDEF(NV907D, HEAD_SET_CRC_CONTROL, PRIMARY_OUTPUT, NONE) |
- NVDEF(NV907D, HEAD_SET_CRC_CONTROL, SECONDARY_OUTPUT, NONE));
+ NVVAL(NV907D, HEAD_SET_DEFAULT_BASE_COLOR, BLUE, 0));
PUSH_MTHD(push, NV907D, HEAD_SET_PIXEL_CLOCK_FREQUENCY(i),
NVVAL(NV907D, HEAD_SET_PIXEL_CLOCK_FREQUENCY, HERTZ, m->clock * 1000) |
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wimm.c b/drivers/gpu/drm/nouveau/dispnv50/wimm.c
index a1ac153d5e98..566fbddfc8d7 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wimm.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wimm.c
@@ -31,6 +31,7 @@ nv50_wimm_init(struct nouveau_drm *drm, struct nv50_wndw *wndw)
int version;
int (*init)(struct nouveau_drm *, s32, struct nv50_wndw *);
} wimms[] = {
+ { GA102_DISP_WINDOW_IMM_CHANNEL_DMA, 0, wimmc37b_init },
{ TU102_DISP_WINDOW_IMM_CHANNEL_DMA, 0, wimmc37b_init },
{ GV100_DISP_WINDOW_IMM_CHANNEL_DMA, 0, wimmc37b_init },
{}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wimmc37b.c b/drivers/gpu/drm/nouveau/dispnv50/wimmc37b.c
index 685b70871324..b390029c69ec 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wimmc37b.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wimmc37b.c
@@ -76,7 +76,7 @@ wimmc37b_init_(const struct nv50_wimm_func *func, struct nouveau_drm *drm,
int ret;
ret = nv50_dmac_create(&drm->client.device, &disp->disp->object,
- &oclass, 0, &args, sizeof(args), 0,
+ &oclass, 0, &args, sizeof(args), -1,
&wndw->wimm);
if (ret) {
NV_ERROR(drm, "wimm%04x allocation failed: %d\n", oclass, ret);
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.c b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
index 0356474ad6f6..ce451242f79e 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndw.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
@@ -784,6 +784,7 @@ nv50_wndw_new(struct nouveau_drm *drm, enum drm_plane_type type, int index,
int (*new)(struct nouveau_drm *, enum drm_plane_type,
int, s32, struct nv50_wndw **);
} wndws[] = {
+ { GA102_DISP_WINDOW_CHANNEL_DMA, 0, wndwc67e_new },
{ TU102_DISP_WINDOW_CHANNEL_DMA, 0, wndwc57e_new },
{ GV100_DISP_WINDOW_CHANNEL_DMA, 0, wndwc37e_new },
{}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.h b/drivers/gpu/drm/nouveau/dispnv50/wndw.h
index 3278e2880034..f4e0c5080034 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndw.h
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.h
@@ -129,6 +129,14 @@ int wndwc37e_update(struct nv50_wndw *, u32 *);
int wndwc57e_new(struct nouveau_drm *, enum drm_plane_type, int, s32,
struct nv50_wndw **);
+bool wndwc57e_ilut(struct nv50_wndw *, struct nv50_wndw_atom *, int);
+int wndwc57e_ilut_set(struct nv50_wndw *, struct nv50_wndw_atom *);
+int wndwc57e_ilut_clr(struct nv50_wndw *);
+int wndwc57e_csc_set(struct nv50_wndw *, struct nv50_wndw_atom *);
+int wndwc57e_csc_clr(struct nv50_wndw *);
+
+int wndwc67e_new(struct nouveau_drm *, enum drm_plane_type, int, s32,
+ struct nv50_wndw **);
int nv50_wndw_new(struct nouveau_drm *, enum drm_plane_type, int index,
struct nv50_wndw **);
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndwc57e.c b/drivers/gpu/drm/nouveau/dispnv50/wndwc57e.c
index 429be0bb0222..abdd3bb658b3 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndwc57e.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndwc57e.c
@@ -80,7 +80,7 @@ wndwc57e_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
return 0;
}
-static int
+int
wndwc57e_csc_clr(struct nv50_wndw *wndw)
{
struct nvif_push *push = wndw->wndw.push;
@@ -98,7 +98,7 @@ wndwc57e_csc_clr(struct nv50_wndw *wndw)
return 0;
}
-static int
+int
wndwc57e_csc_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
struct nvif_push *push = wndw->wndw.push;
@@ -111,7 +111,7 @@ wndwc57e_csc_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
return 0;
}
-static int
+int
wndwc57e_ilut_clr(struct nv50_wndw *wndw)
{
struct nvif_push *push = wndw->wndw.push;
@@ -124,7 +124,7 @@ wndwc57e_ilut_clr(struct nv50_wndw *wndw)
return 0;
}
-static int
+int
wndwc57e_ilut_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
struct nvif_push *push = wndw->wndw.push;
@@ -179,7 +179,7 @@ wndwc57e_ilut_load(struct drm_color_lut *in, int size, void __iomem *mem)
writew(readw(mem - 4), mem + 4);
}
-static bool
+bool
wndwc57e_ilut(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw, int size)
{
if (size = size ? size : 1024, size != 256 && size != 1024)
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndwc67e.c b/drivers/gpu/drm/nouveau/dispnv50/wndwc67e.c
new file mode 100644
index 000000000000..7a370fa1df20
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndwc67e.c
@@ -0,0 +1,106 @@
+/*
+ * Copyright 2021 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "wndw.h"
+#include "atom.h"
+
+#include <nvif/pushc37b.h>
+
+#include <nvhw/class/clc57e.h>
+
+static int
+wndwc67e_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
+{
+ struct nvif_push *push = wndw->wndw.push;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 17)))
+ return ret;
+
+ PUSH_MTHD(push, NVC57E, SET_PRESENT_CONTROL,
+ NVVAL(NVC57E, SET_PRESENT_CONTROL, MIN_PRESENT_INTERVAL, asyw->image.interval) |
+ NVVAL(NVC57E, SET_PRESENT_CONTROL, BEGIN_MODE, asyw->image.mode) |
+ NVDEF(NVC57E, SET_PRESENT_CONTROL, TIMESTAMP_MODE, DISABLE));
+
+ PUSH_MTHD(push, NVC57E, SET_SIZE,
+ NVVAL(NVC57E, SET_SIZE, WIDTH, asyw->image.w) |
+ NVVAL(NVC57E, SET_SIZE, HEIGHT, asyw->image.h),
+
+ SET_STORAGE,
+ NVVAL(NVC57E, SET_STORAGE, BLOCK_HEIGHT, asyw->image.blockh),
+
+ SET_PARAMS,
+ NVVAL(NVC57E, SET_PARAMS, FORMAT, asyw->image.format) |
+ NVDEF(NVC57E, SET_PARAMS, CLAMP_BEFORE_BLEND, DISABLE) |
+ NVDEF(NVC57E, SET_PARAMS, SWAP_UV, DISABLE) |
+ NVDEF(NVC57E, SET_PARAMS, FMT_ROUNDING_MODE, ROUND_TO_NEAREST),
+
+ SET_PLANAR_STORAGE(0),
+ NVVAL(NVC57E, SET_PLANAR_STORAGE, PITCH, asyw->image.blocks[0]) |
+ NVVAL(NVC57E, SET_PLANAR_STORAGE, PITCH, asyw->image.pitch[0] >> 6));
+
+ PUSH_MTHD(push, NVC57E, SET_CONTEXT_DMA_ISO(0), asyw->image.handle, 1);
+ PUSH_MTHD(push, NVC57E, SET_OFFSET(0), asyw->image.offset[0] >> 8);
+
+ PUSH_MTHD(push, NVC57E, SET_POINT_IN(0),
+ NVVAL(NVC57E, SET_POINT_IN, X, asyw->state.src_x >> 16) |
+ NVVAL(NVC57E, SET_POINT_IN, Y, asyw->state.src_y >> 16));
+
+ PUSH_MTHD(push, NVC57E, SET_SIZE_IN,
+ NVVAL(NVC57E, SET_SIZE_IN, WIDTH, asyw->state.src_w >> 16) |
+ NVVAL(NVC57E, SET_SIZE_IN, HEIGHT, asyw->state.src_h >> 16));
+
+ PUSH_MTHD(push, NVC57E, SET_SIZE_OUT,
+ NVVAL(NVC57E, SET_SIZE_OUT, WIDTH, asyw->state.crtc_w) |
+ NVVAL(NVC57E, SET_SIZE_OUT, HEIGHT, asyw->state.crtc_h));
+ return 0;
+}
+
+static const struct nv50_wndw_func
+wndwc67e = {
+ .acquire = wndwc37e_acquire,
+ .release = wndwc37e_release,
+ .sema_set = wndwc37e_sema_set,
+ .sema_clr = wndwc37e_sema_clr,
+ .ntfy_set = wndwc37e_ntfy_set,
+ .ntfy_clr = wndwc37e_ntfy_clr,
+ .ntfy_reset = corec37d_ntfy_init,
+ .ntfy_wait_begun = base507c_ntfy_wait_begun,
+ .ilut = wndwc57e_ilut,
+ .ilut_identity = true,
+ .ilut_size = 1024,
+ .xlut_set = wndwc57e_ilut_set,
+ .xlut_clr = wndwc57e_ilut_clr,
+ .csc = base907c_csc,
+ .csc_set = wndwc57e_csc_set,
+ .csc_clr = wndwc57e_csc_clr,
+ .image_set = wndwc67e_image_set,
+ .image_clr = wndwc37e_image_clr,
+ .blend_set = wndwc37e_blend_set,
+ .update = wndwc37e_update,
+};
+
+int
+wndwc67e_new(struct nouveau_drm *drm, enum drm_plane_type type, int index,
+ s32 oclass, struct nv50_wndw **pwndw)
+{
+ return wndwc37e_new_(&wndwc67e, drm, type, index, oclass, BIT(index >> 1), pwndw);
+}
diff --git a/drivers/gpu/drm/nouveau/include/nvif/cl0080.h b/drivers/gpu/drm/nouveau/include/nvif/cl0080.h
index cd9a2e687bb6..57d4f457a7d4 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/cl0080.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/cl0080.h
@@ -33,6 +33,7 @@ struct nv_device_info_v0 {
#define NV_DEVICE_INFO_V0_PASCAL 0x0a
#define NV_DEVICE_INFO_V0_VOLTA 0x0b
#define NV_DEVICE_INFO_V0_TURING 0x0c
+#define NV_DEVICE_INFO_V0_AMPERE 0x0d
__u8 family;
__u8 pad06[2];
__u64 ram_size;
diff --git a/drivers/gpu/drm/nouveau/include/nvif/class.h b/drivers/gpu/drm/nouveau/include/nvif/class.h
index 2c79beb41126..ba2c28ea43d2 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/class.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/class.h
@@ -88,6 +88,7 @@
#define GP102_DISP /* cl5070.h */ 0x00009870
#define GV100_DISP /* cl5070.h */ 0x0000c370
#define TU102_DISP /* cl5070.h */ 0x0000c570
+#define GA102_DISP /* cl5070.h */ 0x0000c670
#define GV100_DISP_CAPS 0x0000c373
@@ -103,6 +104,7 @@
#define GK104_DISP_CURSOR /* cl507a.h */ 0x0000917a
#define GV100_DISP_CURSOR /* cl507a.h */ 0x0000c37a
#define TU102_DISP_CURSOR /* cl507a.h */ 0x0000c57a
+#define GA102_DISP_CURSOR /* cl507a.h */ 0x0000c67a
#define NV50_DISP_OVERLAY /* cl507b.h */ 0x0000507b
#define G82_DISP_OVERLAY /* cl507b.h */ 0x0000827b
@@ -112,6 +114,7 @@
#define GV100_DISP_WINDOW_IMM_CHANNEL_DMA /* clc37b.h */ 0x0000c37b
#define TU102_DISP_WINDOW_IMM_CHANNEL_DMA /* clc37b.h */ 0x0000c57b
+#define GA102_DISP_WINDOW_IMM_CHANNEL_DMA /* clc37b.h */ 0x0000c67b
#define NV50_DISP_BASE_CHANNEL_DMA /* cl507c.h */ 0x0000507c
#define G82_DISP_BASE_CHANNEL_DMA /* cl507c.h */ 0x0000827c
@@ -135,6 +138,7 @@
#define GP102_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000987d
#define GV100_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000c37d
#define TU102_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000c57d
+#define GA102_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000c67d
#define NV50_DISP_OVERLAY_CHANNEL_DMA /* cl507e.h */ 0x0000507e
#define G82_DISP_OVERLAY_CHANNEL_DMA /* cl507e.h */ 0x0000827e
@@ -145,6 +149,7 @@
#define GV100_DISP_WINDOW_CHANNEL_DMA /* clc37e.h */ 0x0000c37e
#define TU102_DISP_WINDOW_CHANNEL_DMA /* clc37e.h */ 0x0000c57e
+#define GA102_DISP_WINDOW_CHANNEL_DMA /* clc37e.h */ 0x0000c67e
#define NV50_TESLA 0x00005097
#define G82_TESLA 0x00008297
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
index 5c007ce62fc3..c920939a1467 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
@@ -120,6 +120,7 @@ struct nvkm_device {
GP100 = 0x130,
GV100 = 0x140,
TU100 = 0x160,
+ GA100 = 0x170,
} card_type;
u32 chipset;
u8 chiprev;
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h
index 5a96c942d912..0f6fa6631a19 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h
@@ -37,4 +37,5 @@ int gp100_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
int gp102_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
int gv100_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
int tu102_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
+int ga102_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/conn.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/conn.h
index f5f59261ea81..d1beaad0c82b 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/conn.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/conn.h
@@ -14,6 +14,7 @@ enum dcb_connector_type {
DCB_CONNECTOR_LVDS_SPWG = 0x41,
DCB_CONNECTOR_DP = 0x46,
DCB_CONNECTOR_eDP = 0x47,
+ DCB_CONNECTOR_mDP = 0x48,
DCB_CONNECTOR_HDMI_0 = 0x60,
DCB_CONNECTOR_HDMI_1 = 0x61,
DCB_CONNECTOR_HDMI_C = 0x63,
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/devinit.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/devinit.h
index 1a39e52e09e3..50cc7c05eac4 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/devinit.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/devinit.h
@@ -32,4 +32,5 @@ int gm107_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
int gm200_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
int gv100_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
int tu102_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
+int ga100_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
index 34b56b10218a..2ecd52aec1d1 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
@@ -86,6 +86,8 @@ int gp100_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
int gp102_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
int gp10b_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
int gv100_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
+int ga100_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
+int ga102_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
#include <subdev/bios.h>
#include <subdev/bios/ramcfg.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gpio.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gpio.h
index eaacf8d80527..cdcce5ece6ff 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gpio.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gpio.h
@@ -37,4 +37,5 @@ int nv50_gpio_new(struct nvkm_device *, int, struct nvkm_gpio **);
int g94_gpio_new(struct nvkm_device *, int, struct nvkm_gpio **);
int gf119_gpio_new(struct nvkm_device *, int, struct nvkm_gpio **);
int gk104_gpio_new(struct nvkm_device *, int, struct nvkm_gpio **);
+int ga102_gpio_new(struct nvkm_device *, int, struct nvkm_gpio **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/i2c.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/i2c.h
index 81b977319640..640f649ce497 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/i2c.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/i2c.h
@@ -92,6 +92,7 @@ int g94_i2c_new(struct nvkm_device *, int, struct nvkm_i2c **);
int gf117_i2c_new(struct nvkm_device *, int, struct nvkm_i2c **);
int gf119_i2c_new(struct nvkm_device *, int, struct nvkm_i2c **);
int gk104_i2c_new(struct nvkm_device *, int, struct nvkm_i2c **);
+int gk110_i2c_new(struct nvkm_device *, int, struct nvkm_i2c **);
int gm200_i2c_new(struct nvkm_device *, int, struct nvkm_i2c **);
static inline int
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h
index 6641fe4c252c..e45ca4583967 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h
@@ -32,4 +32,5 @@ int gk20a_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
int gp100_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
int gp10b_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
int tu102_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
+int ga100_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c
index 9a5be6f32424..f08b31d84d4d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.c
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c
@@ -181,6 +181,7 @@ nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS)
struct nvif_device *device = &drm->client.device;
struct nvkm_gr *gr = nvxx_gr(device);
struct drm_nouveau_getparam *getparam = data;
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
switch (getparam->param) {
case NOUVEAU_GETPARAM_CHIPSET_ID:
@@ -188,13 +189,13 @@ nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS)
break;
case NOUVEAU_GETPARAM_PCI_VENDOR:
if (device->info.platform != NV_DEVICE_INFO_V0_SOC)
- getparam->value = dev->pdev->vendor;
+ getparam->value = pdev->vendor;
else
getparam->value = 0;
break;
case NOUVEAU_GETPARAM_PCI_DEVICE:
if (device->info.platform != NV_DEVICE_INFO_V0_SOC)
- getparam->value = dev->pdev->device;
+ getparam->value = pdev->device;
else
getparam->value = 0;
break;
@@ -205,7 +206,7 @@ nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS)
case NV_DEVICE_INFO_V0_PCIE: getparam->value = 2; break;
case NV_DEVICE_INFO_V0_SOC : getparam->value = 3; break;
case NV_DEVICE_INFO_V0_IGP :
- if (!pci_is_pcie(dev->pdev))
+ if (!pci_is_pcie(pdev))
getparam->value = 1;
else
getparam->value = 2;
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index 69a84d0197d0..7c15f6448428 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -377,7 +377,7 @@ nouveau_acpi_edid(struct drm_device *dev, struct drm_connector *connector)
return NULL;
}
- handle = ACPI_HANDLE(&dev->pdev->dev);
+ handle = ACPI_HANDLE(dev->dev);
if (!handle)
return NULL;
diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c
index c7a94c94dbf3..72f35a2babcb 100644
--- a/drivers/gpu/drm/nouveau/nouveau_backlight.c
+++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c
@@ -256,6 +256,7 @@ nouveau_backlight_init(struct drm_connector *connector)
case NV_DEVICE_INFO_V0_PASCAL:
case NV_DEVICE_INFO_V0_VOLTA:
case NV_DEVICE_INFO_V0_TURING:
+ case NV_DEVICE_INFO_V0_AMPERE: //XXX: not confirmed
ret = nv50_backlight_init(nv_encoder, &props, &ops);
break;
default:
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index d204ea8a5618..7cc683b8dc7a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -110,6 +110,9 @@ static int call_lvds_manufacturer_script(struct drm_device *dev, struct dcb_outp
struct nvbios *bios = &drm->vbios;
uint8_t sub = bios->data[bios->fp.xlated_entry + script] + (bios->fp.link_c_increment && dcbent->or & DCB_OUTPUT_C ? 1 : 0);
uint16_t scriptofs = ROM16(bios->data[bios->init_script_tbls_ptr + sub * 2]);
+#ifdef __powerpc__
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
+#endif
if (!bios->fp.xlated_entry || !sub || !scriptofs)
return -EINVAL;
@@ -123,8 +126,8 @@ static int call_lvds_manufacturer_script(struct drm_device *dev, struct dcb_outp
#ifdef __powerpc__
/* Powerbook specific quirks */
if (script == LVDS_RESET &&
- (dev->pdev->device == 0x0179 || dev->pdev->device == 0x0189 ||
- dev->pdev->device == 0x0329))
+ (pdev->device == 0x0179 || pdev->device == 0x0189 ||
+ pdev->device == 0x0329))
nv_write_tmds(dev, dcbent->or, 0, 0x02, 0x72);
#endif
@@ -2080,11 +2083,13 @@ nouveau_bios_init(struct drm_device *dev)
{
struct nouveau_drm *drm = nouveau_drm(dev);
struct nvbios *bios = &drm->vbios;
+ struct pci_dev *pdev;
int ret;
/* only relevant for PCI devices */
- if (!dev->pdev)
+ if (!dev_is_pci(dev->dev))
return 0;
+ pdev = to_pci_dev(dev->dev);
if (!NVInitVBIOS(dev))
return -ENODEV;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index c85b1af06b7b..33dc886d1d6d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -473,10 +473,10 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t domain, bool contig)
switch (bo->mem.mem_type) {
case TTM_PL_VRAM:
- drm->gem.vram_available -= bo->mem.size;
+ drm->gem.vram_available -= bo->base.size;
break;
case TTM_PL_TT:
- drm->gem.gart_available -= bo->mem.size;
+ drm->gem.gart_available -= bo->base.size;
break;
default:
break;
@@ -504,10 +504,10 @@ nouveau_bo_unpin(struct nouveau_bo *nvbo)
if (!nvbo->bo.pin_count) {
switch (bo->mem.mem_type) {
case TTM_PL_VRAM:
- drm->gem.vram_available += bo->mem.size;
+ drm->gem.vram_available += bo->base.size;
break;
case TTM_PL_TT:
- drm->gem.gart_available += bo->mem.size;
+ drm->gem.gart_available += bo->base.size;
break;
default:
break;
@@ -774,7 +774,10 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict,
return ret;
}
- mutex_lock_nested(&cli->mutex, SINGLE_DEPTH_NESTING);
+ if (drm_drv_uses_atomic_modeset(drm->dev))
+ mutex_lock(&cli->mutex);
+ else
+ mutex_lock_nested(&cli->mutex, SINGLE_DEPTH_NESTING);
ret = nouveau_fence_sync(nouveau_bo(bo), chan, true, ctx->interruptible);
if (ret == 0) {
ret = drm->ttm.move(chan, bo, &bo->mem, new_reg);
@@ -910,7 +913,7 @@ nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_resource *new_reg,
return 0;
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) {
- *new_tile = nv10_bo_set_tiling(dev, offset, new_reg->size,
+ *new_tile = nv10_bo_set_tiling(dev, offset, bo->base.size,
nvbo->mode, nvbo->zeta);
}
@@ -1232,9 +1235,8 @@ nouveau_ttm_tt_populate(struct ttm_bo_device *bdev,
return 0;
if (slave && ttm->sg) {
- /* make userspace faulting work */
- drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
- ttm_dma->dma_address, ttm->num_pages);
+ drm_prime_sg_to_dma_addr_array(ttm->sg, ttm_dma->dma_address,
+ ttm->num_pages);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c
index 5d191e58edf1..e48f1f7eb370 100644
--- a/drivers/gpu/drm/nouveau/nouveau_chan.c
+++ b/drivers/gpu/drm/nouveau/nouveau_chan.c
@@ -533,6 +533,7 @@ nouveau_channel_new(struct nouveau_drm *drm, struct nvif_device *device,
if (ret) {
NV_PRINTK(err, cli, "channel failed to initialise, %d\n", ret);
nouveau_channel_del(pchan);
+ goto done;
}
ret = nouveau_svmm_join((*pchan)->vmm->svmm, (*pchan)->inst);
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 8b4b3688c7ae..61e6d7412505 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -411,6 +411,7 @@ static struct nouveau_encoder *
nouveau_connector_ddc_detect(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
struct nouveau_encoder *nv_encoder = NULL, *found = NULL;
struct drm_encoder *encoder;
int ret;
@@ -438,11 +439,11 @@ nouveau_connector_ddc_detect(struct drm_connector *connector)
break;
if (switcheroo_ddc)
- vga_switcheroo_lock_ddc(dev->pdev);
+ vga_switcheroo_lock_ddc(pdev);
if (nvkm_probe_i2c(nv_encoder->i2c, 0x50))
found = nv_encoder;
if (switcheroo_ddc)
- vga_switcheroo_unlock_ddc(dev->pdev);
+ vga_switcheroo_unlock_ddc(pdev);
break;
}
@@ -490,6 +491,7 @@ nouveau_connector_set_encoder(struct drm_connector *connector,
struct nouveau_connector *nv_connector = nouveau_connector(connector);
struct nouveau_drm *drm = nouveau_drm(connector->dev);
struct drm_device *dev = connector->dev;
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
if (nv_connector->detected_encoder == nv_encoder)
return;
@@ -511,8 +513,8 @@ nouveau_connector_set_encoder(struct drm_connector *connector,
connector->doublescan_allowed = true;
if (drm->client.device.info.family == NV_DEVICE_INFO_V0_KELVIN ||
(drm->client.device.info.family == NV_DEVICE_INFO_V0_CELSIUS &&
- (dev->pdev->device & 0x0ff0) != 0x0100 &&
- (dev->pdev->device & 0x0ff0) != 0x0150))
+ (pdev->device & 0x0ff0) != 0x0100 &&
+ (pdev->device & 0x0ff0) != 0x0150))
/* HW is broken */
connector->interlace_allowed = false;
else
@@ -1210,6 +1212,7 @@ drm_conntype_from_dcb(enum dcb_connector_type dcb)
case DCB_CONNECTOR_DMS59_DP0:
case DCB_CONNECTOR_DMS59_DP1:
case DCB_CONNECTOR_DP :
+ case DCB_CONNECTOR_mDP :
case DCB_CONNECTOR_USB_C : return DRM_MODE_CONNECTOR_DisplayPort;
case DCB_CONNECTOR_eDP : return DRM_MODE_CONNECTOR_eDP;
case DCB_CONNECTOR_HDMI_0 :
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index bceb48a2dfca..17831ee897ea 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -286,11 +286,11 @@ nouveau_check_bl_size(struct nouveau_drm *drm, struct nouveau_bo *nvbo,
bl_size = bw * bh * (1 << tile_mode) * gob_size;
- DRM_DEBUG_KMS("offset=%u stride=%u h=%u tile_mode=0x%02x bw=%u bh=%u gob_size=%u bl_size=%llu size=%lu\n",
+ DRM_DEBUG_KMS("offset=%u stride=%u h=%u tile_mode=0x%02x bw=%u bh=%u gob_size=%u bl_size=%llu size=%zu\n",
offset, stride, h, tile_mode, bw, bh, gob_size, bl_size,
- nvbo->bo.mem.size);
+ nvbo->bo.base.size);
- if (bl_size + offset > nvbo->bo.mem.size)
+ if (bl_size + offset > nvbo->bo.base.size)
return -ERANGE;
return 0;
@@ -363,7 +363,7 @@ nouveau_framebuffer_new(struct drm_device *dev,
} else {
uint32_t size = mode_cmd->pitches[i] * height;
- if (size + mode_cmd->offsets[i] > nvbo->bo.mem.size)
+ if (size + mode_cmd->offsets[i] > nvbo->bo.base.size)
return -ERANGE;
}
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index d141a5f004af..1b2169e9c295 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -115,8 +115,8 @@ nouveau_platform_name(struct platform_device *platformdev)
static u64
nouveau_name(struct drm_device *dev)
{
- if (dev->pdev)
- return nouveau_pci_name(dev->pdev);
+ if (dev_is_pci(dev->dev))
+ return nouveau_pci_name(to_pci_dev(dev->dev));
else
return nouveau_platform_name(to_platform_device(dev->dev));
}
@@ -760,7 +760,6 @@ static int nouveau_drm_probe(struct pci_dev *pdev,
if (ret)
goto fail_drm;
- drm_dev->pdev = pdev;
pci_set_drvdata(pdev, drm_dev);
ret = nouveau_drm_device_init(drm_dev);
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 9d04d1b36434..d28ee6844245 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -55,7 +55,6 @@
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_placement.h>
#include <drm/ttm/ttm_memory.h>
-#include <drm/ttm/ttm_module.h>
#include <drm/drm_audio_component.h>
@@ -222,6 +221,7 @@ struct nouveau_drm {
struct {
struct drm_audio_component *component;
+ struct mutex lock;
bool component_registered;
} audio;
};
diff --git a/drivers/gpu/drm/nouveau/nouveau_encoder.h b/drivers/gpu/drm/nouveau/nouveau_encoder.h
index 21937f1c7dd9..1ffcc0a491fd 100644
--- a/drivers/gpu/drm/nouveau/nouveau_encoder.h
+++ b/drivers/gpu/drm/nouveau/nouveau_encoder.h
@@ -53,7 +53,12 @@ struct nouveau_encoder {
* actually programmed on the hw, not the proposed crtc */
struct drm_crtc *crtc;
u32 ctrl;
- bool audio;
+
+ /* Protected by nouveau_drm.audio.lock */
+ struct {
+ bool enabled;
+ struct drm_connector *connector;
+ } audio;
struct drm_display_mode mode;
int last_dpms;
@@ -141,11 +146,9 @@ enum drm_mode_status nv50_dp_mode_valid(struct drm_connector *,
unsigned *clock);
struct nouveau_connector *
-nv50_outp_get_new_connector(struct nouveau_encoder *outp,
- struct drm_atomic_state *state);
+nv50_outp_get_new_connector(struct drm_atomic_state *state, struct nouveau_encoder *outp);
struct nouveau_connector *
-nv50_outp_get_old_connector(struct nouveau_encoder *outp,
- struct drm_atomic_state *state);
+nv50_outp_get_old_connector(struct drm_atomic_state *state, struct nouveau_encoder *outp);
int nv50_mstm_detect(struct nouveau_encoder *encoder);
void nv50_mstm_remove(struct nv50_mstm *mstm);
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 24ec5339efb4..4fc0fa696461 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -396,7 +396,9 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
NV_INFO(drm, "allocated %dx%d fb: 0x%llx, bo %p\n",
fb->width, fb->height, nvbo->offset, nvbo);
- vga_switcheroo_client_fb_set(dev->pdev, info);
+ if (dev_is_pci(dev->dev))
+ vga_switcheroo_client_fb_set(to_pci_dev(dev->dev), info);
+
return 0;
out_unlock:
@@ -548,7 +550,7 @@ nouveau_fbcon_init(struct drm_device *dev)
int ret;
if (!dev->mode_config.num_crtc ||
- (dev->pdev->class >> 8) != PCI_CLASS_DISPLAY_VGA)
+ (to_pci_dev(dev->dev)->class >> 8) != PCI_CLASS_DISPLAY_VGA)
return 0;
fbcon = kzalloc(sizeof(struct nouveau_fbdev), GFP_KERNEL);
diff --git a/drivers/gpu/drm/nouveau/nouveau_prime.c b/drivers/gpu/drm/nouveau/nouveau_prime.c
index 2f16b5249283..347488685f74 100644
--- a/drivers/gpu/drm/nouveau/nouveau_prime.c
+++ b/drivers/gpu/drm/nouveau/nouveau_prime.c
@@ -30,9 +30,9 @@
struct sg_table *nouveau_gem_prime_get_sg_table(struct drm_gem_object *obj)
{
struct nouveau_bo *nvbo = nouveau_gem_object(obj);
- int npages = nvbo->bo.num_pages;
- return drm_prime_pages_to_sg(obj->dev, nvbo->bo.ttm->pages, npages);
+ return drm_prime_pages_to_sg(obj->dev, nvbo->bo.ttm->pages,
+ nvbo->bo.ttm->num_pages);
}
struct drm_gem_object *nouveau_gem_prime_import_sg_table(struct drm_device *dev,
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
index a2e23fd4906c..1cf52635ea74 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -84,7 +84,7 @@ nouveau_sgdma_create_ttm(struct ttm_buffer_object *bo, uint32_t page_flags)
if (!nvbe)
return NULL;
- if (ttm_dma_tt_init(&nvbe->ttm, bo, page_flags, caching)) {
+ if (ttm_sg_tt_init(&nvbe->ttm, bo, page_flags, caching)) {
kfree(nvbe);
return NULL;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c
index c85dd8afa3c3..7c4b374b3eca 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vga.c
+++ b/drivers/gpu/drm/nouveau/nouveau_vga.c
@@ -87,18 +87,20 @@ nouveau_vga_init(struct nouveau_drm *drm)
{
struct drm_device *dev = drm->dev;
bool runtime = nouveau_pmops_runtime();
+ struct pci_dev *pdev;
/* only relevant for PCI devices */
- if (!dev->pdev)
+ if (!dev_is_pci(dev->dev))
return;
+ pdev = to_pci_dev(dev->dev);
- vga_client_register(dev->pdev, dev, NULL, nouveau_vga_set_decode);
+ vga_client_register(pdev, dev, NULL, nouveau_vga_set_decode);
/* don't register Thunderbolt eGPU with vga_switcheroo */
- if (pci_is_thunderbolt_attached(dev->pdev))
+ if (pci_is_thunderbolt_attached(pdev))
return;
- vga_switcheroo_register_client(dev->pdev, &nouveau_switcheroo_ops, runtime);
+ vga_switcheroo_register_client(pdev, &nouveau_switcheroo_ops, runtime);
if (runtime && nouveau_is_v1_dsm() && !nouveau_is_optimus())
vga_switcheroo_init_domain_pm_ops(drm->dev->dev, &drm->vga_pm_domain);
@@ -109,17 +111,19 @@ nouveau_vga_fini(struct nouveau_drm *drm)
{
struct drm_device *dev = drm->dev;
bool runtime = nouveau_pmops_runtime();
+ struct pci_dev *pdev;
/* only relevant for PCI devices */
- if (!dev->pdev)
+ if (!dev_is_pci(dev->dev))
return;
+ pdev = to_pci_dev(dev->dev);
- vga_client_register(dev->pdev, NULL, NULL, NULL);
+ vga_client_register(pdev, NULL, NULL, NULL);
- if (pci_is_thunderbolt_attached(dev->pdev))
+ if (pci_is_thunderbolt_attached(pdev))
return;
- vga_switcheroo_unregister_client(dev->pdev);
+ vga_switcheroo_unregister_client(pdev);
if (runtime && nouveau_is_v1_dsm() && !nouveau_is_optimus())
vga_switcheroo_fini_domain_pm_ops(drm->dev->dev);
}
diff --git a/drivers/gpu/drm/nouveau/nv17_fence.c b/drivers/gpu/drm/nouveau/nv17_fence.c
index 1253fdec712d..b1cd8d7dd87d 100644
--- a/drivers/gpu/drm/nouveau/nv17_fence.c
+++ b/drivers/gpu/drm/nouveau/nv17_fence.c
@@ -80,7 +80,7 @@ nv17_fence_context_new(struct nouveau_channel *chan)
struct nv10_fence_chan *fctx;
struct ttm_resource *reg = &priv->bo->bo.mem;
u32 start = reg->start * PAGE_SIZE;
- u32 limit = start + reg->size - 1;
+ u32 limit = start + priv->bo->bo.base.size - 1;
int ret = 0;
fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL);
diff --git a/drivers/gpu/drm/nouveau/nv50_fence.c b/drivers/gpu/drm/nouveau/nv50_fence.c
index 447238e3cbe7..1625826505f6 100644
--- a/drivers/gpu/drm/nouveau/nv50_fence.c
+++ b/drivers/gpu/drm/nouveau/nv50_fence.c
@@ -39,7 +39,7 @@ nv50_fence_context_new(struct nouveau_channel *chan)
struct nv10_fence_chan *fctx;
struct ttm_resource *reg = &priv->bo->bo.mem;
u32 start = reg->start * PAGE_SIZE;
- u32 limit = start + reg->size - 1;
+ u32 limit = start + priv->bo->bo.base.size - 1;
int ret;
fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL);
diff --git a/drivers/gpu/drm/nouveau/nvif/disp.c b/drivers/gpu/drm/nouveau/nvif/disp.c
index 8d0d30e08f57..529cb60d5efb 100644
--- a/drivers/gpu/drm/nouveau/nvif/disp.c
+++ b/drivers/gpu/drm/nouveau/nvif/disp.c
@@ -35,6 +35,7 @@ nvif_disp_ctor(struct nvif_device *device, const char *name, s32 oclass,
struct nvif_disp *disp)
{
static const struct nvif_mclass disps[] = {
+ { GA102_DISP, -1 },
{ TU102_DISP, -1 },
{ GV100_DISP, -1 },
{ GP102_DISP, -1 },
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index 7851bec5f0e5..cdcc851e06f9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -1815,7 +1815,7 @@ nvf0_chipset = {
.fb = gk110_fb_new,
.fuse = gf100_fuse_new,
.gpio = gk104_gpio_new,
- .i2c = gk104_i2c_new,
+ .i2c = gk110_i2c_new,
.ibus = gk104_ibus_new,
.iccsense = gf100_iccsense_new,
.imem = nv50_instmem_new,
@@ -1853,7 +1853,7 @@ nvf1_chipset = {
.fb = gk110_fb_new,
.fuse = gf100_fuse_new,
.gpio = gk104_gpio_new,
- .i2c = gk104_i2c_new,
+ .i2c = gk110_i2c_new,
.ibus = gk104_ibus_new,
.iccsense = gf100_iccsense_new,
.imem = nv50_instmem_new,
@@ -1891,7 +1891,7 @@ nv106_chipset = {
.fb = gk110_fb_new,
.fuse = gf100_fuse_new,
.gpio = gk104_gpio_new,
- .i2c = gk104_i2c_new,
+ .i2c = gk110_i2c_new,
.ibus = gk104_ibus_new,
.iccsense = gf100_iccsense_new,
.imem = nv50_instmem_new,
@@ -1929,7 +1929,7 @@ nv108_chipset = {
.fb = gk110_fb_new,
.fuse = gf100_fuse_new,
.gpio = gk104_gpio_new,
- .i2c = gk104_i2c_new,
+ .i2c = gk110_i2c_new,
.ibus = gk104_ibus_new,
.iccsense = gf100_iccsense_new,
.imem = nv50_instmem_new,
@@ -1967,7 +1967,7 @@ nv117_chipset = {
.fb = gm107_fb_new,
.fuse = gm107_fuse_new,
.gpio = gk104_gpio_new,
- .i2c = gk104_i2c_new,
+ .i2c = gk110_i2c_new,
.ibus = gk104_ibus_new,
.iccsense = gf100_iccsense_new,
.imem = nv50_instmem_new,
@@ -2003,7 +2003,7 @@ nv118_chipset = {
.fb = gm107_fb_new,
.fuse = gm107_fuse_new,
.gpio = gk104_gpio_new,
- .i2c = gk104_i2c_new,
+ .i2c = gk110_i2c_new,
.ibus = gk104_ibus_new,
.iccsense = gf100_iccsense_new,
.imem = nv50_instmem_new,
@@ -2652,6 +2652,61 @@ nv168_chipset = {
.sec2 = tu102_sec2_new,
};
+static const struct nvkm_device_chip
+nv170_chipset = {
+ .name = "GA100",
+ .bar = tu102_bar_new,
+ .bios = nvkm_bios_new,
+ .devinit = ga100_devinit_new,
+ .fb = ga100_fb_new,
+ .gpio = gk104_gpio_new,
+ .i2c = gm200_i2c_new,
+ .ibus = gm200_ibus_new,
+ .imem = nv50_instmem_new,
+ .mc = ga100_mc_new,
+ .mmu = tu102_mmu_new,
+ .pci = gp100_pci_new,
+ .timer = gk20a_timer_new,
+};
+
+static const struct nvkm_device_chip
+nv172_chipset = {
+ .name = "GA102",
+ .bar = tu102_bar_new,
+ .bios = nvkm_bios_new,
+ .devinit = ga100_devinit_new,
+ .fb = ga102_fb_new,
+ .gpio = ga102_gpio_new,
+ .i2c = gm200_i2c_new,
+ .ibus = gm200_ibus_new,
+ .imem = nv50_instmem_new,
+ .mc = ga100_mc_new,
+ .mmu = tu102_mmu_new,
+ .pci = gp100_pci_new,
+ .timer = gk20a_timer_new,
+ .disp = ga102_disp_new,
+ .dma = gv100_dma_new,
+};
+
+static const struct nvkm_device_chip
+nv174_chipset = {
+ .name = "GA104",
+ .bar = tu102_bar_new,
+ .bios = nvkm_bios_new,
+ .devinit = ga100_devinit_new,
+ .fb = ga102_fb_new,
+ .gpio = ga102_gpio_new,
+ .i2c = gm200_i2c_new,
+ .ibus = gm200_ibus_new,
+ .imem = nv50_instmem_new,
+ .mc = ga100_mc_new,
+ .mmu = tu102_mmu_new,
+ .pci = gp100_pci_new,
+ .timer = gk20a_timer_new,
+ .disp = ga102_disp_new,
+ .dma = gv100_dma_new,
+};
+
static int
nvkm_device_event_ctor(struct nvkm_object *object, void *data, u32 size,
struct nvkm_notify *notify)
@@ -3063,6 +3118,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
case 0x130: device->card_type = GP100; break;
case 0x140: device->card_type = GV100; break;
case 0x160: device->card_type = TU100; break;
+ case 0x170: device->card_type = GA100; break;
default:
break;
}
@@ -3160,10 +3216,23 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
case 0x166: device->chip = &nv166_chipset; break;
case 0x167: device->chip = &nv167_chipset; break;
case 0x168: device->chip = &nv168_chipset; break;
+ case 0x172: device->chip = &nv172_chipset; break;
+ case 0x174: device->chip = &nv174_chipset; break;
default:
- nvdev_error(device, "unknown chipset (%08x)\n", boot0);
- ret = -ENODEV;
- goto done;
+ if (nvkm_boolopt(device->cfgopt, "NvEnableUnsupportedChipsets", false)) {
+ switch (device->chipset) {
+ case 0x170: device->chip = &nv170_chipset; break;
+ default:
+ break;
+ }
+ }
+
+ if (!device->chip) {
+ nvdev_error(device, "unknown chipset (%08x)\n", boot0);
+ ret = -ENODEV;
+ goto done;
+ }
+ break;
}
nvdev_info(device, "NVIDIA %s (%08x)\n",
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c
index 03c6d9aef075..147894798786 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c
@@ -176,6 +176,7 @@ nvkm_udevice_info(struct nvkm_udevice *udev, void *data, u32 size)
case GP100: args->v0.family = NV_DEVICE_INFO_V0_PASCAL; break;
case GV100: args->v0.family = NV_DEVICE_INFO_V0_VOLTA; break;
case TU100: args->v0.family = NV_DEVICE_INFO_V0_TURING; break;
+ case GA100: args->v0.family = NV_DEVICE_INFO_V0_AMPERE; break;
default:
args->v0.family = 0;
break;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
index cf075311cdd2..b03f043efe26 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
@@ -17,6 +17,7 @@ nvkm-y += nvkm/engine/disp/gp100.o
nvkm-y += nvkm/engine/disp/gp102.o
nvkm-y += nvkm/engine/disp/gv100.o
nvkm-y += nvkm/engine/disp/tu102.o
+nvkm-y += nvkm/engine/disp/ga102.o
nvkm-y += nvkm/engine/disp/vga.o
nvkm-y += nvkm/engine/disp/head.o
@@ -42,6 +43,7 @@ nvkm-y += nvkm/engine/disp/sorgm200.o
nvkm-y += nvkm/engine/disp/sorgp100.o
nvkm-y += nvkm/engine/disp/sorgv100.o
nvkm-y += nvkm/engine/disp/sortu102.o
+nvkm-y += nvkm/engine/disp/sorga102.o
nvkm-y += nvkm/engine/disp/outp.o
nvkm-y += nvkm/engine/disp/dp.o
@@ -75,6 +77,7 @@ nvkm-y += nvkm/engine/disp/rootgp100.o
nvkm-y += nvkm/engine/disp/rootgp102.o
nvkm-y += nvkm/engine/disp/rootgv100.o
nvkm-y += nvkm/engine/disp/roottu102.o
+nvkm-y += nvkm/engine/disp/rootga102.o
nvkm-y += nvkm/engine/disp/capsgv100.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c
index 3800aeb507d0..55fbfe28c6dc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c
@@ -33,6 +33,12 @@
#include <nvif/event.h>
+/* IED scripts are no longer used by UEFI/RM from Ampere, but have been updated for
+ * the x86 option ROM. However, the relevant VBIOS table versions weren't modified,
+ * so we're unable to detect this in a nice way.
+ */
+#define AMPERE_IED_HACK(disp) ((disp)->engine.subdev.device->card_type >= GA100)
+
struct lt_state {
struct nvkm_dp *dp;
u8 stat[6];
@@ -238,6 +244,19 @@ nvkm_dp_train_links(struct nvkm_dp *dp)
dp->dpcd[DPCD_RC02] &= ~DPCD_RC02_TPS3_SUPPORTED;
lt.pc2 = dp->dpcd[DPCD_RC02] & DPCD_RC02_TPS3_SUPPORTED;
+ if (AMPERE_IED_HACK(disp) && (lnkcmp = lt.dp->info.script[0])) {
+ /* Execute BeforeLinkTraining script from DP Info table. */
+ while (ior->dp.bw < nvbios_rd08(bios, lnkcmp))
+ lnkcmp += 3;
+ lnkcmp = nvbios_rd16(bios, lnkcmp + 1);
+
+ nvbios_init(&dp->outp.disp->engine.subdev, lnkcmp,
+ init.outp = &dp->outp.info;
+ init.or = ior->id;
+ init.link = ior->asy.link;
+ );
+ }
+
/* Set desired link configuration on the source. */
if ((lnkcmp = lt.dp->info.lnkcmp)) {
if (dp->version < 0x30) {
@@ -316,12 +335,14 @@ nvkm_dp_train_init(struct nvkm_dp *dp)
);
}
- /* Execute BeforeLinkTraining script from DP Info table. */
- nvbios_init(&dp->outp.disp->engine.subdev, dp->info.script[0],
- init.outp = &dp->outp.info;
- init.or = dp->outp.ior->id;
- init.link = dp->outp.ior->asy.link;
- );
+ if (!AMPERE_IED_HACK(dp->outp.disp)) {
+ /* Execute BeforeLinkTraining script from DP Info table. */
+ nvbios_init(&dp->outp.disp->engine.subdev, dp->info.script[0],
+ init.outp = &dp->outp.info;
+ init.or = dp->outp.ior->id;
+ init.link = dp->outp.ior->asy.link;
+ );
+ }
}
static const struct dp_rates {
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ga102.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ga102.c
new file mode 100644
index 000000000000..aa2e5645fe36
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ga102.c
@@ -0,0 +1,46 @@
+/*
+ * Copyright 2021 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "nv50.h"
+#include "head.h"
+#include "ior.h"
+#include "channv50.h"
+#include "rootnv50.h"
+
+static const struct nv50_disp_func
+ga102_disp = {
+ .init = tu102_disp_init,
+ .fini = gv100_disp_fini,
+ .intr = gv100_disp_intr,
+ .uevent = &gv100_disp_chan_uevent,
+ .super = gv100_disp_super,
+ .root = &ga102_disp_root_oclass,
+ .wndw = { .cnt = gv100_disp_wndw_cnt },
+ .head = { .cnt = gv100_head_cnt, .new = gv100_head_new },
+ .sor = { .cnt = gv100_sor_cnt, .new = ga102_sor_new },
+ .ramht_size = 0x2000,
+};
+
+int
+ga102_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
+{
+ return nv50_disp_new_(&ga102_disp, device, index, pdisp);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h
index 09f3038eff26..9f0bb7c6b010 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h
@@ -150,6 +150,8 @@ void gv100_sor_dp_audio(struct nvkm_ior *, int, bool);
void gv100_sor_dp_audio_sym(struct nvkm_ior *, int, u16, u32);
void gv100_sor_dp_watermark(struct nvkm_ior *, int, u8);
+void tu102_sor_dp_vcpi(struct nvkm_ior *, int, u8, u8, u16, u16);
+
void g84_hdmi_ctrl(struct nvkm_ior *, int, bool, u8, u8, u8 *, u8 , u8 *, u8);
void gt215_hdmi_ctrl(struct nvkm_ior *, int, bool, u8, u8, u8 *, u8 , u8 *, u8);
void gf119_hdmi_ctrl(struct nvkm_ior *, int, bool, u8, u8, u8 *, u8 , u8 *, u8);
@@ -207,4 +209,6 @@ int gv100_sor_cnt(struct nvkm_disp *, unsigned long *);
int gv100_sor_new(struct nvkm_disp *, int);
int tu102_sor_new(struct nvkm_disp *, int);
+
+int ga102_sor_new(struct nvkm_disp *, int);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h
index a677161c7f3a..db31b37752a2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h
@@ -86,6 +86,8 @@ void gv100_disp_intr(struct nv50_disp *);
void gv100_disp_super(struct work_struct *);
int gv100_disp_wndw_cnt(struct nvkm_disp *, unsigned long *);
+int tu102_disp_init(struct nv50_disp *);
+
void nv50_disp_dptmds_war_2(struct nv50_disp *, struct dcb_output *);
void nv50_disp_dptmds_war_3(struct nv50_disp *, struct dcb_output *);
void nv50_disp_update_sppll1(struct nv50_disp *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootga102.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootga102.c
new file mode 100644
index 000000000000..9af07c3cf9fc
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootga102.c
@@ -0,0 +1,52 @@
+/*
+ * Copyright 2021 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "rootnv50.h"
+#include "channv50.h"
+
+#include <nvif/class.h>
+
+static const struct nv50_disp_root_func
+ga102_disp_root = {
+ .user = {
+ {{-1,-1,GV100_DISP_CAPS }, gv100_disp_caps_new },
+ {{0,0,GA102_DISP_CURSOR }, gv100_disp_curs_new },
+ {{0,0,GA102_DISP_WINDOW_IMM_CHANNEL_DMA}, gv100_disp_wimm_new },
+ {{0,0,GA102_DISP_CORE_CHANNEL_DMA }, gv100_disp_core_new },
+ {{0,0,GA102_DISP_WINDOW_CHANNEL_DMA }, gv100_disp_wndw_new },
+ {}
+ },
+};
+
+static int
+ga102_disp_root_new(struct nvkm_disp *disp, const struct nvkm_oclass *oclass,
+ void *data, u32 size, struct nvkm_object **pobject)
+{
+ return nv50_disp_root_new_(&ga102_disp_root, disp, oclass, data, size, pobject);
+}
+
+const struct nvkm_disp_oclass
+ga102_disp_root_oclass = {
+ .base.oclass = GA102_DISP,
+ .base.minver = -1,
+ .base.maxver = -1,
+ .ctor = ga102_disp_root_new,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h
index 7070f5408d92..27bb170d0293 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h
@@ -41,4 +41,5 @@ extern const struct nvkm_disp_oclass gp100_disp_root_oclass;
extern const struct nvkm_disp_oclass gp102_disp_root_oclass;
extern const struct nvkm_disp_oclass gv100_disp_root_oclass;
extern const struct nvkm_disp_oclass tu102_disp_root_oclass;
+extern const struct nvkm_disp_oclass ga102_disp_root_oclass;
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorga102.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorga102.c
new file mode 100644
index 000000000000..033827de9116
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorga102.c
@@ -0,0 +1,140 @@
+/*
+ * Copyright 2021 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "ior.h"
+
+#include <subdev/timer.h>
+
+static int
+ga102_sor_dp_links(struct nvkm_ior *sor, struct nvkm_i2c_aux *aux)
+{
+ struct nvkm_device *device = sor->disp->engine.subdev.device;
+ const u32 soff = nv50_ior_base(sor);
+ const u32 loff = nv50_sor_link(sor);
+ u32 dpctrl = 0x00000000;
+ u32 clksor = 0x00000000;
+
+ switch (sor->dp.bw) {
+ case 0x06: clksor |= 0x00000000; break;
+ case 0x0a: clksor |= 0x00040000; break;
+ case 0x14: clksor |= 0x00080000; break;
+ case 0x1e: clksor |= 0x000c0000; break;
+ default:
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ dpctrl |= ((1 << sor->dp.nr) - 1) << 16;
+ if (sor->dp.mst)
+ dpctrl |= 0x40000000;
+ if (sor->dp.ef)
+ dpctrl |= 0x00004000;
+
+ nvkm_mask(device, 0x612300 + soff, 0x007c0000, clksor);
+
+ /*XXX*/
+ nvkm_msec(device, 40, NVKM_DELAY);
+ nvkm_mask(device, 0x612300 + soff, 0x00030000, 0x00010000);
+ nvkm_mask(device, 0x61c10c + loff, 0x00000003, 0x00000001);
+
+ nvkm_mask(device, 0x61c10c + loff, 0x401f4000, dpctrl);
+ return 0;
+}
+
+static void
+ga102_sor_clock(struct nvkm_ior *sor)
+{
+ struct nvkm_device *device = sor->disp->engine.subdev.device;
+ u32 div2 = 0;
+ if (sor->asy.proto == TMDS) {
+ if (sor->tmds.high_speed)
+ div2 = 1;
+ }
+ nvkm_wr32(device, 0x00ec08 + (sor->id * 0x10), 0x00000000);
+ nvkm_wr32(device, 0x00ec04 + (sor->id * 0x10), div2);
+}
+
+static const struct nvkm_ior_func
+ga102_sor_hda = {
+ .route = {
+ .get = gm200_sor_route_get,
+ .set = gm200_sor_route_set,
+ },
+ .state = gv100_sor_state,
+ .power = nv50_sor_power,
+ .clock = ga102_sor_clock,
+ .hdmi = {
+ .ctrl = gv100_hdmi_ctrl,
+ .scdc = gm200_hdmi_scdc,
+ },
+ .dp = {
+ .lanes = { 0, 1, 2, 3 },
+ .links = ga102_sor_dp_links,
+ .power = g94_sor_dp_power,
+ .pattern = gm107_sor_dp_pattern,
+ .drive = gm200_sor_dp_drive,
+ .vcpi = tu102_sor_dp_vcpi,
+ .audio = gv100_sor_dp_audio,
+ .audio_sym = gv100_sor_dp_audio_sym,
+ .watermark = gv100_sor_dp_watermark,
+ },
+ .hda = {
+ .hpd = gf119_hda_hpd,
+ .eld = gf119_hda_eld,
+ .device_entry = gv100_hda_device_entry,
+ },
+};
+
+static const struct nvkm_ior_func
+ga102_sor = {
+ .route = {
+ .get = gm200_sor_route_get,
+ .set = gm200_sor_route_set,
+ },
+ .state = gv100_sor_state,
+ .power = nv50_sor_power,
+ .clock = ga102_sor_clock,
+ .hdmi = {
+ .ctrl = gv100_hdmi_ctrl,
+ .scdc = gm200_hdmi_scdc,
+ },
+ .dp = {
+ .lanes = { 0, 1, 2, 3 },
+ .links = ga102_sor_dp_links,
+ .power = g94_sor_dp_power,
+ .pattern = gm107_sor_dp_pattern,
+ .drive = gm200_sor_dp_drive,
+ .vcpi = tu102_sor_dp_vcpi,
+ .audio = gv100_sor_dp_audio,
+ .audio_sym = gv100_sor_dp_audio_sym,
+ .watermark = gv100_sor_dp_watermark,
+ },
+};
+
+int
+ga102_sor_new(struct nvkm_disp *disp, int id)
+{
+ struct nvkm_device *device = disp->engine.subdev.device;
+ u32 hda = nvkm_rd32(device, 0x08a15c);
+ if (hda & BIT(id))
+ return nvkm_ior_new_(&ga102_sor_hda, disp, SOR, id);
+ return nvkm_ior_new_(&ga102_sor, disp, SOR, id);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sortu102.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sortu102.c
index 59865a934c4b..0cf9e8752d25 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sortu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sortu102.c
@@ -23,7 +23,7 @@
#include <subdev/timer.h>
-static void
+void
tu102_sor_dp_vcpi(struct nvkm_ior *sor, int head,
u8 slot, u8 slot_nr, u16 pbn, u16 aligned)
{
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/tu102.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/tu102.c
index 883ae4151ff8..4c85d1d4fbd4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/tu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/tu102.c
@@ -28,7 +28,7 @@
#include <core/gpuobj.h>
#include <subdev/timer.h>
-static int
+int
tu102_disp_init(struct nv50_disp *disp)
{
struct nvkm_device *device = disp->base.engine.subdev.device;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
index 5d4b695cab8e..c73b7eab776e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
@@ -36,19 +36,7 @@
#include <nvif/class.h>
#include <nvif/cl0080.h>
-struct gk104_fifo_engine_status {
- bool busy;
- bool faulted;
- bool chsw;
- bool save;
- bool load;
- struct {
- bool tsg;
- u32 id;
- } prev, next, *chan;
-};
-
-static void
+void
gk104_fifo_engine_status(struct gk104_fifo *fifo, int engn,
struct gk104_fifo_engine_status *status)
{
@@ -95,7 +83,7 @@ gk104_fifo_engine_status(struct gk104_fifo *fifo, int engn,
status->chan == &status->next ? "*" : " ");
}
-static int
+int
gk104_fifo_class_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
void *argv, u32 argc, struct nvkm_object **pobject)
{
@@ -112,7 +100,7 @@ gk104_fifo_class_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
return -EINVAL;
}
-static int
+int
gk104_fifo_class_get(struct nvkm_fifo *base, int index,
struct nvkm_oclass *oclass)
{
@@ -134,14 +122,14 @@ gk104_fifo_class_get(struct nvkm_fifo *base, int index,
return c;
}
-static void
+void
gk104_fifo_uevent_fini(struct nvkm_fifo *fifo)
{
struct nvkm_device *device = fifo->engine.subdev.device;
nvkm_mask(device, 0x002140, 0x80000000, 0x00000000);
}
-static void
+void
gk104_fifo_uevent_init(struct nvkm_fifo *fifo)
{
struct nvkm_device *device = fifo->engine.subdev.device;
@@ -556,7 +544,7 @@ gk104_fifo_bind_reason[] = {
{}
};
-static void
+void
gk104_fifo_intr_bind(struct gk104_fifo *fifo)
{
struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
@@ -627,7 +615,7 @@ gk104_fifo_intr_sched(struct gk104_fifo *fifo)
}
}
-static void
+void
gk104_fifo_intr_chsw(struct gk104_fifo *fifo)
{
struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
@@ -637,7 +625,7 @@ gk104_fifo_intr_chsw(struct gk104_fifo *fifo)
nvkm_wr32(device, 0x00256c, stat);
}
-static void
+void
gk104_fifo_intr_dropped_fault(struct gk104_fifo *fifo)
{
struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
@@ -680,7 +668,7 @@ static const struct nvkm_bitfield gk104_fifo_pbdma_intr_0[] = {
{}
};
-static void
+void
gk104_fifo_intr_pbdma_0(struct gk104_fifo *fifo, int unit)
{
struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
@@ -729,7 +717,7 @@ static const struct nvkm_bitfield gk104_fifo_pbdma_intr_1[] = {
{}
};
-static void
+void
gk104_fifo_intr_pbdma_1(struct gk104_fifo *fifo, int unit)
{
struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
@@ -750,7 +738,7 @@ gk104_fifo_intr_pbdma_1(struct gk104_fifo *fifo, int unit)
nvkm_wr32(device, 0x040148 + (unit * 0x2000), stat);
}
-static void
+void
gk104_fifo_intr_runlist(struct gk104_fifo *fifo)
{
struct nvkm_device *device = fifo->base.engine.subdev.device;
@@ -763,7 +751,7 @@ gk104_fifo_intr_runlist(struct gk104_fifo *fifo)
}
}
-static void
+void
gk104_fifo_intr_engine(struct gk104_fifo *fifo)
{
nvkm_fifo_uevent(&fifo->base);
@@ -861,7 +849,7 @@ gk104_fifo_intr(struct nvkm_fifo *base)
}
}
-static void
+void
gk104_fifo_fini(struct nvkm_fifo *base)
{
struct gk104_fifo *fifo = gk104_fifo(base);
@@ -871,7 +859,7 @@ gk104_fifo_fini(struct nvkm_fifo *base)
nvkm_mask(device, 0x002140, 0x10000000, 0x10000000);
}
-static int
+int
gk104_fifo_info(struct nvkm_fifo *base, u64 mthd, u64 *data)
{
struct gk104_fifo *fifo = gk104_fifo(base);
@@ -899,7 +887,7 @@ gk104_fifo_info(struct nvkm_fifo *base, u64 mthd, u64 *data)
}
}
-static int
+int
gk104_fifo_oneinit(struct nvkm_fifo *base)
{
struct gk104_fifo *fifo = gk104_fifo(base);
@@ -974,7 +962,7 @@ gk104_fifo_oneinit(struct nvkm_fifo *base)
return nvkm_memory_map(fifo->user.mem, 0, bar, fifo->user.bar, NULL, 0);
}
-static void
+void
gk104_fifo_init(struct nvkm_fifo *base)
{
struct gk104_fifo *fifo = gk104_fifo(base);
@@ -1006,7 +994,7 @@ gk104_fifo_init(struct nvkm_fifo *base)
nvkm_wr32(device, 0x002140, 0x7fffffff);
}
-static void *
+void *
gk104_fifo_dtor(struct nvkm_fifo *base)
{
struct gk104_fifo *fifo = gk104_fifo(base);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h
index 6407a4a174cf..4398b340e514 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h
@@ -87,11 +87,43 @@ struct gk104_fifo_func {
bool cgrp_force;
};
+struct gk104_fifo_engine_status {
+ bool busy;
+ bool faulted;
+ bool chsw;
+ bool save;
+ bool load;
+ struct {
+ bool tsg;
+ u32 id;
+ } prev, next, *chan;
+};
+
int gk104_fifo_new_(const struct gk104_fifo_func *, struct nvkm_device *,
int index, int nr, struct nvkm_fifo **);
void gk104_fifo_runlist_insert(struct gk104_fifo *, struct gk104_fifo_chan *);
void gk104_fifo_runlist_remove(struct gk104_fifo *, struct gk104_fifo_chan *);
void gk104_fifo_runlist_update(struct gk104_fifo *, int runl);
+void gk104_fifo_engine_status(struct gk104_fifo *fifo, int engn,
+ struct gk104_fifo_engine_status *status);
+void gk104_fifo_intr_bind(struct gk104_fifo *fifo);
+void gk104_fifo_intr_chsw(struct gk104_fifo *fifo);
+void gk104_fifo_intr_dropped_fault(struct gk104_fifo *fifo);
+void gk104_fifo_intr_pbdma_0(struct gk104_fifo *fifo, int unit);
+void gk104_fifo_intr_pbdma_1(struct gk104_fifo *fifo, int unit);
+void gk104_fifo_intr_runlist(struct gk104_fifo *fifo);
+void gk104_fifo_intr_engine(struct gk104_fifo *fifo);
+void *gk104_fifo_dtor(struct nvkm_fifo *base);
+int gk104_fifo_oneinit(struct nvkm_fifo *base);
+int gk104_fifo_info(struct nvkm_fifo *base, u64 mthd, u64 *data);
+void gk104_fifo_init(struct nvkm_fifo *base);
+void gk104_fifo_fini(struct nvkm_fifo *base);
+int gk104_fifo_class_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
+ void *argv, u32 argc, struct nvkm_object **pobject);
+int gk104_fifo_class_get(struct nvkm_fifo *base, int index,
+ struct nvkm_oclass *oclass);
+void gk104_fifo_uevent_fini(struct nvkm_fifo *fifo);
+void gk104_fifo_uevent_init(struct nvkm_fifo *fifo);
extern const struct gk104_fifo_pbdma_func gk104_fifo_pbdma;
int gk104_fifo_pbdma_nr(struct gk104_fifo *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/tu102.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/tu102.c
index 005f3e1729b9..14e5b70e0255 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/tu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/tu102.c
@@ -24,7 +24,13 @@
#include "changk104.h"
#include "user.h"
+#include <core/client.h>
#include <core/gpuobj.h>
+#include <subdev/bar.h>
+#include <subdev/fault.h>
+#include <subdev/top.h>
+#include <subdev/timer.h>
+#include <engine/sw.h>
#include <nvif/class.h>
@@ -109,8 +115,364 @@ tu102_fifo = {
.cgrp_force = true,
};
+static void
+tu102_fifo_recover_work(struct work_struct *w)
+{
+ struct gk104_fifo *fifo = container_of(w, typeof(*fifo), recover.work);
+ struct nvkm_device *device = fifo->base.engine.subdev.device;
+ struct nvkm_engine *engine;
+ unsigned long flags;
+ u32 engm, runm, todo;
+ int engn, runl;
+
+ spin_lock_irqsave(&fifo->base.lock, flags);
+ runm = fifo->recover.runm;
+ engm = fifo->recover.engm;
+ fifo->recover.engm = 0;
+ fifo->recover.runm = 0;
+ spin_unlock_irqrestore(&fifo->base.lock, flags);
+
+ nvkm_mask(device, 0x002630, runm, runm);
+
+ for (todo = engm; engn = __ffs(todo), todo; todo &= ~BIT(engn)) {
+ if ((engine = fifo->engine[engn].engine)) {
+ nvkm_subdev_fini(&engine->subdev, false);
+ WARN_ON(nvkm_subdev_init(&engine->subdev));
+ }
+ }
+
+ for (todo = runm; runl = __ffs(todo), todo; todo &= ~BIT(runl))
+ gk104_fifo_runlist_update(fifo, runl);
+
+ nvkm_mask(device, 0x002630, runm, 0x00000000);
+}
+
+static void tu102_fifo_recover_engn(struct gk104_fifo *fifo, int engn);
+
+static void
+tu102_fifo_recover_runl(struct gk104_fifo *fifo, int runl)
+{
+ struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ const u32 runm = BIT(runl);
+
+ assert_spin_locked(&fifo->base.lock);
+ if (fifo->recover.runm & runm)
+ return;
+ fifo->recover.runm |= runm;
+
+ /* Block runlist to prevent channel assignment(s) from changing. */
+ nvkm_mask(device, 0x002630, runm, runm);
+
+ /* Schedule recovery. */
+ nvkm_warn(subdev, "runlist %d: scheduled for recovery\n", runl);
+ schedule_work(&fifo->recover.work);
+}
+
+static struct gk104_fifo_chan *
+tu102_fifo_recover_chid(struct gk104_fifo *fifo, int runl, int chid)
+{
+ struct gk104_fifo_chan *chan;
+ struct nvkm_fifo_cgrp *cgrp;
+
+ list_for_each_entry(chan, &fifo->runlist[runl].chan, head) {
+ if (chan->base.chid == chid) {
+ list_del_init(&chan->head);
+ return chan;
+ }
+ }
+
+ list_for_each_entry(cgrp, &fifo->runlist[runl].cgrp, head) {
+ if (cgrp->id == chid) {
+ chan = list_first_entry(&cgrp->chan, typeof(*chan), head);
+ list_del_init(&chan->head);
+ if (!--cgrp->chan_nr)
+ list_del_init(&cgrp->head);
+ return chan;
+ }
+ }
+
+ return NULL;
+}
+
+static void
+tu102_fifo_recover_chan(struct nvkm_fifo *base, int chid)
+{
+ struct gk104_fifo *fifo = gk104_fifo(base);
+ struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ const u32 stat = nvkm_rd32(device, 0x800004 + (chid * 0x08));
+ const u32 runl = (stat & 0x000f0000) >> 16;
+ const bool used = (stat & 0x00000001);
+ unsigned long engn, engm = fifo->runlist[runl].engm;
+ struct gk104_fifo_chan *chan;
+
+ assert_spin_locked(&fifo->base.lock);
+ if (!used)
+ return;
+
+ /* Lookup SW state for channel, and mark it as dead. */
+ chan = tu102_fifo_recover_chid(fifo, runl, chid);
+ if (chan) {
+ chan->killed = true;
+ nvkm_fifo_kevent(&fifo->base, chid);
+ }
+
+ /* Disable channel. */
+ nvkm_wr32(device, 0x800004 + (chid * 0x08), stat | 0x00000800);
+ nvkm_warn(subdev, "channel %d: killed\n", chid);
+
+ /* Block channel assignments from changing during recovery. */
+ tu102_fifo_recover_runl(fifo, runl);
+
+ /* Schedule recovery for any engines the channel is on. */
+ for_each_set_bit(engn, &engm, fifo->engine_nr) {
+ struct gk104_fifo_engine_status status;
+
+ gk104_fifo_engine_status(fifo, engn, &status);
+ if (!status.chan || status.chan->id != chid)
+ continue;
+ tu102_fifo_recover_engn(fifo, engn);
+ }
+}
+
+static void
+tu102_fifo_recover_engn(struct gk104_fifo *fifo, int engn)
+{
+ struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ const u32 runl = fifo->engine[engn].runl;
+ const u32 engm = BIT(engn);
+ struct gk104_fifo_engine_status status;
+
+ assert_spin_locked(&fifo->base.lock);
+ if (fifo->recover.engm & engm)
+ return;
+ fifo->recover.engm |= engm;
+
+ /* Block channel assignments from changing during recovery. */
+ tu102_fifo_recover_runl(fifo, runl);
+
+ /* Determine which channel (if any) is currently on the engine. */
+ gk104_fifo_engine_status(fifo, engn, &status);
+ if (status.chan) {
+ /* The channel is not longer viable, kill it. */
+ tu102_fifo_recover_chan(&fifo->base, status.chan->id);
+ }
+
+ /* Preempt the runlist */
+ nvkm_wr32(device, 0x2638, BIT(runl));
+
+ /* Schedule recovery. */
+ nvkm_warn(subdev, "engine %d: scheduled for recovery\n", engn);
+ schedule_work(&fifo->recover.work);
+}
+
+static void
+tu102_fifo_fault(struct nvkm_fifo *base, struct nvkm_fault_data *info)
+{
+ struct gk104_fifo *fifo = gk104_fifo(base);
+ struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ const struct nvkm_enum *er, *ee, *ec, *ea;
+ struct nvkm_engine *engine = NULL;
+ struct nvkm_fifo_chan *chan;
+ unsigned long flags;
+ char ct[8] = "HUB/", en[16] = "";
+ int engn;
+
+ er = nvkm_enum_find(fifo->func->fault.reason, info->reason);
+ ee = nvkm_enum_find(fifo->func->fault.engine, info->engine);
+ if (info->hub) {
+ ec = nvkm_enum_find(fifo->func->fault.hubclient, info->client);
+ } else {
+ ec = nvkm_enum_find(fifo->func->fault.gpcclient, info->client);
+ snprintf(ct, sizeof(ct), "GPC%d/", info->gpc);
+ }
+ ea = nvkm_enum_find(fifo->func->fault.access, info->access);
+
+ if (ee && ee->data2) {
+ switch (ee->data2) {
+ case NVKM_SUBDEV_BAR:
+ nvkm_bar_bar1_reset(device);
+ break;
+ case NVKM_SUBDEV_INSTMEM:
+ nvkm_bar_bar2_reset(device);
+ break;
+ case NVKM_ENGINE_IFB:
+ nvkm_mask(device, 0x001718, 0x00000000, 0x00000000);
+ break;
+ default:
+ engine = nvkm_device_engine(device, ee->data2);
+ break;
+ }
+ }
+
+ if (ee == NULL) {
+ enum nvkm_devidx engidx = nvkm_top_fault(device, info->engine);
+
+ if (engidx < NVKM_SUBDEV_NR) {
+ const char *src = nvkm_subdev_name[engidx];
+ char *dst = en;
+
+ do {
+ *dst++ = toupper(*src++);
+ } while (*src);
+ engine = nvkm_device_engine(device, engidx);
+ }
+ } else {
+ snprintf(en, sizeof(en), "%s", ee->name);
+ }
+
+ spin_lock_irqsave(&fifo->base.lock, flags);
+ chan = nvkm_fifo_chan_inst_locked(&fifo->base, info->inst);
+
+ nvkm_error(subdev,
+ "fault %02x [%s] at %016llx engine %02x [%s] client %02x "
+ "[%s%s] reason %02x [%s] on channel %d [%010llx %s]\n",
+ info->access, ea ? ea->name : "", info->addr,
+ info->engine, ee ? ee->name : en,
+ info->client, ct, ec ? ec->name : "",
+ info->reason, er ? er->name : "", chan ? chan->chid : -1,
+ info->inst, chan ? chan->object.client->name : "unknown");
+
+ /* Kill the channel that caused the fault. */
+ if (chan)
+ tu102_fifo_recover_chan(&fifo->base, chan->chid);
+
+ /* Channel recovery will probably have already done this for the
+ * correct engine(s), but just in case we can't find the channel
+ * information...
+ */
+ for (engn = 0; engn < fifo->engine_nr && engine; engn++) {
+ if (fifo->engine[engn].engine == engine) {
+ tu102_fifo_recover_engn(fifo, engn);
+ break;
+ }
+ }
+
+ spin_unlock_irqrestore(&fifo->base.lock, flags);
+}
+
+static void
+tu102_fifo_intr_ctxsw_timeout(struct gk104_fifo *fifo)
+{
+ struct nvkm_device *device = fifo->base.engine.subdev.device;
+ unsigned long flags, engm;
+ u32 engn;
+
+ spin_lock_irqsave(&fifo->base.lock, flags);
+
+ engm = nvkm_rd32(device, 0x2a30);
+ nvkm_wr32(device, 0x2a30, engm);
+
+ for_each_set_bit(engn, &engm, 32)
+ tu102_fifo_recover_engn(fifo, engn);
+
+ spin_unlock_irqrestore(&fifo->base.lock, flags);
+}
+
+static void
+tu102_fifo_intr_sched(struct gk104_fifo *fifo)
+{
+ struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ u32 intr = nvkm_rd32(device, 0x00254c);
+ u32 code = intr & 0x000000ff;
+
+ nvkm_error(subdev, "SCHED_ERROR %02x\n", code);
+}
+
+static void
+tu102_fifo_intr(struct nvkm_fifo *base)
+{
+ struct gk104_fifo *fifo = gk104_fifo(base);
+ struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ u32 mask = nvkm_rd32(device, 0x002140);
+ u32 stat = nvkm_rd32(device, 0x002100) & mask;
+
+ if (stat & 0x00000001) {
+ gk104_fifo_intr_bind(fifo);
+ nvkm_wr32(device, 0x002100, 0x00000001);
+ stat &= ~0x00000001;
+ }
+
+ if (stat & 0x00000002) {
+ tu102_fifo_intr_ctxsw_timeout(fifo);
+ stat &= ~0x00000002;
+ }
+
+ if (stat & 0x00000100) {
+ tu102_fifo_intr_sched(fifo);
+ nvkm_wr32(device, 0x002100, 0x00000100);
+ stat &= ~0x00000100;
+ }
+
+ if (stat & 0x00010000) {
+ gk104_fifo_intr_chsw(fifo);
+ nvkm_wr32(device, 0x002100, 0x00010000);
+ stat &= ~0x00010000;
+ }
+
+ if (stat & 0x20000000) {
+ u32 mask = nvkm_rd32(device, 0x0025a0);
+
+ while (mask) {
+ u32 unit = __ffs(mask);
+
+ gk104_fifo_intr_pbdma_0(fifo, unit);
+ gk104_fifo_intr_pbdma_1(fifo, unit);
+ nvkm_wr32(device, 0x0025a0, (1 << unit));
+ mask &= ~(1 << unit);
+ }
+ stat &= ~0x20000000;
+ }
+
+ if (stat & 0x40000000) {
+ gk104_fifo_intr_runlist(fifo);
+ stat &= ~0x40000000;
+ }
+
+ if (stat & 0x80000000) {
+ nvkm_wr32(device, 0x002100, 0x80000000);
+ gk104_fifo_intr_engine(fifo);
+ stat &= ~0x80000000;
+ }
+
+ if (stat) {
+ nvkm_error(subdev, "INTR %08x\n", stat);
+ nvkm_mask(device, 0x002140, stat, 0x00000000);
+ nvkm_wr32(device, 0x002100, stat);
+ }
+}
+
+static const struct nvkm_fifo_func
+tu102_fifo_ = {
+ .dtor = gk104_fifo_dtor,
+ .oneinit = gk104_fifo_oneinit,
+ .info = gk104_fifo_info,
+ .init = gk104_fifo_init,
+ .fini = gk104_fifo_fini,
+ .intr = tu102_fifo_intr,
+ .fault = tu102_fifo_fault,
+ .uevent_init = gk104_fifo_uevent_init,
+ .uevent_fini = gk104_fifo_uevent_fini,
+ .recover_chan = tu102_fifo_recover_chan,
+ .class_get = gk104_fifo_class_get,
+ .class_new = gk104_fifo_class_new,
+};
+
int
tu102_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
{
- return gk104_fifo_new_(&tu102_fifo, device, index, 4096, pfifo);
+ struct gk104_fifo *fifo;
+
+ if (!(fifo = kzalloc(sizeof(*fifo), GFP_KERNEL)))
+ return -ENOMEM;
+ fifo->func = &tu102_fifo;
+ INIT_WORK(&fifo->recover.work, tu102_fifo_recover_work);
+ *pfifo = &fifo->base;
+
+ return nvkm_fifo_ctor(&tu102_fifo_, device, index, 4096, &fifo->base);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c
index 7deb81b6dbac..4b571cc6bc70 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c
@@ -75,7 +75,7 @@ shadow_image(struct nvkm_bios *bios, int idx, u32 offset, struct shadow *mthd)
nvkm_debug(subdev, "%08x: type %02x, %d bytes\n",
image.base, image.type, image.size);
- if (!shadow_fetch(bios, mthd, image.size)) {
+ if (!shadow_fetch(bios, mthd, image.base + image.size)) {
nvkm_debug(subdev, "%08x: fetch failed\n", image.base);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowramin.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowramin.c
index 3634cd0630b8..023ddc7c5399 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowramin.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowramin.c
@@ -64,6 +64,9 @@ pramin_init(struct nvkm_bios *bios, const char *name)
return NULL;
/* we can't get the bios image pointer without PDISP */
+ if (device->card_type >= GA100)
+ addr = device->chipset == 0x170; /*XXX: find the fuse reg for this */
+ else
if (device->card_type >= GM100)
addr = nvkm_rd32(device, 0x021c04);
else
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/Kbuild
index b3429371ed82..d1abb64841da 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/Kbuild
@@ -15,3 +15,4 @@ nvkm-y += nvkm/subdev/devinit/gm107.o
nvkm-y += nvkm/subdev/devinit/gm200.o
nvkm-y += nvkm/subdev/devinit/gv100.o
nvkm-y += nvkm/subdev/devinit/tu102.o
+nvkm-y += nvkm/subdev/devinit/ga100.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/ga100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/ga100.c
new file mode 100644
index 000000000000..636a92128f6c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/ga100.c
@@ -0,0 +1,76 @@
+/*
+ * Copyright 2021 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "nv50.h"
+
+#include <subdev/bios.h>
+#include <subdev/bios/pll.h>
+#include <subdev/clk/pll.h>
+
+static int
+ga100_devinit_pll_set(struct nvkm_devinit *init, u32 type, u32 freq)
+{
+ struct nvkm_subdev *subdev = &init->subdev;
+ struct nvkm_device *device = subdev->device;
+ struct nvbios_pll info;
+ int head = type - PLL_VPLL0;
+ int N, fN, M, P;
+ int ret;
+
+ ret = nvbios_pll_parse(device->bios, type, &info);
+ if (ret)
+ return ret;
+
+ ret = gt215_pll_calc(subdev, &info, freq, &N, &fN, &M, &P);
+ if (ret < 0)
+ return ret;
+
+ switch (info.type) {
+ case PLL_VPLL0:
+ case PLL_VPLL1:
+ case PLL_VPLL2:
+ case PLL_VPLL3:
+ nvkm_wr32(device, 0x00ef00 + (head * 0x40), 0x02080004);
+ nvkm_wr32(device, 0x00ef18 + (head * 0x40), (N << 16) | fN);
+ nvkm_wr32(device, 0x00ef04 + (head * 0x40), (P << 16) | M);
+ nvkm_wr32(device, 0x00e9c0 + (head * 0x04), 0x00000001);
+ break;
+ default:
+ nvkm_warn(subdev, "%08x/%dKhz unimplemented\n", type, freq);
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static const struct nvkm_devinit_func
+ga100_devinit = {
+ .init = nv50_devinit_init,
+ .post = tu102_devinit_post,
+ .pll_set = ga100_devinit_pll_set,
+};
+
+int
+ga100_devinit_new(struct nvkm_device *device, int index, struct nvkm_devinit **pinit)
+{
+ return nv50_devinit_new_(&ga100_devinit, device, index, pinit);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/priv.h
index 94723352137a..05961e624264 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/priv.h
@@ -19,4 +19,5 @@ void nvkm_devinit_ctor(const struct nvkm_devinit_func *, struct nvkm_device *,
int index, struct nvkm_devinit *);
int nv04_devinit_post(struct nvkm_devinit *, bool);
+int tu102_devinit_post(struct nvkm_devinit *, bool);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/tu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/tu102.c
index 397670e72fff..9a469bf482f2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/tu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/tu102.c
@@ -65,7 +65,7 @@ tu102_devinit_pll_set(struct nvkm_devinit *init, u32 type, u32 freq)
return ret;
}
-static int
+int
tu102_devinit_post(struct nvkm_devinit *base, bool post)
{
struct nv50_devinit *init = nv50_devinit(base);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/tu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/tu102.c
index 45a6a68b9f48..f080051b0c65 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/tu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/tu102.c
@@ -22,6 +22,7 @@
#include "priv.h"
#include <core/memory.h>
+#include <subdev/mc.h>
#include <subdev/mmu.h>
#include <engine/fifo.h>
@@ -34,6 +35,9 @@ tu102_fault_buffer_intr(struct nvkm_fault_buffer *buffer, bool enable)
* which don't appear to actually work anymore, but newer
* versions of RM don't appear to touch anything at all..
*/
+ struct nvkm_device *device = buffer->fault->subdev.device;
+
+ nvkm_mc_intr_mask(device, NVKM_SUBDEV_FAULT, enable);
}
static void
@@ -41,6 +45,11 @@ tu102_fault_buffer_fini(struct nvkm_fault_buffer *buffer)
{
struct nvkm_device *device = buffer->fault->subdev.device;
const u32 foff = buffer->id * 0x20;
+
+ /* Disable the fault interrupts */
+ nvkm_wr32(device, 0xb81408, 0x1);
+ nvkm_wr32(device, 0xb81410, 0x10);
+
nvkm_mask(device, 0xb83010 + foff, 0x80000000, 0x00000000);
}
@@ -50,6 +59,10 @@ tu102_fault_buffer_init(struct nvkm_fault_buffer *buffer)
struct nvkm_device *device = buffer->fault->subdev.device;
const u32 foff = buffer->id * 0x20;
+ /* Enable the fault interrupts */
+ nvkm_wr32(device, 0xb81208, 0x1);
+ nvkm_wr32(device, 0xb81210, 0x10);
+
nvkm_mask(device, 0xb83010 + foff, 0xc0000000, 0x40000000);
nvkm_wr32(device, 0xb83004 + foff, upper_32_bits(buffer->addr));
nvkm_wr32(device, 0xb83000 + foff, lower_32_bits(buffer->addr));
@@ -109,14 +122,20 @@ tu102_fault_intr(struct nvkm_fault *fault)
}
if (stat & 0x00000200) {
+ /* Clear the associated interrupt flag */
+ nvkm_wr32(device, 0xb81010, 0x10);
+
if (fault->buffer[0]) {
nvkm_event_send(&fault->event, 1, 0, NULL, 0);
stat &= ~0x00000200;
}
}
- /*XXX: guess, can't confirm until we get fw... */
+ /* Replayable MMU fault */
if (stat & 0x00000100) {
+ /* Clear the associated interrupt flag */
+ nvkm_wr32(device, 0xb81008, 0x1);
+
if (fault->buffer[1]) {
nvkm_event_send(&fault->event, 1, 1, NULL, 0);
stat &= ~0x00000100;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild
index 43a42159a3d0..5d0bab8ecb43 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild
@@ -32,6 +32,8 @@ nvkm-y += nvkm/subdev/fb/gp100.o
nvkm-y += nvkm/subdev/fb/gp102.o
nvkm-y += nvkm/subdev/fb/gp10b.o
nvkm-y += nvkm/subdev/fb/gv100.o
+nvkm-y += nvkm/subdev/fb/ga100.o
+nvkm-y += nvkm/subdev/fb/ga102.o
nvkm-y += nvkm/subdev/fb/ram.o
nvkm-y += nvkm/subdev/fb/ramnv04.o
@@ -52,6 +54,7 @@ nvkm-y += nvkm/subdev/fb/ramgk104.o
nvkm-y += nvkm/subdev/fb/ramgm107.o
nvkm-y += nvkm/subdev/fb/ramgm200.o
nvkm-y += nvkm/subdev/fb/ramgp100.o
+nvkm-y += nvkm/subdev/fb/ramga102.o
nvkm-y += nvkm/subdev/fb/sddr2.o
nvkm-y += nvkm/subdev/fb/sddr3.o
nvkm-y += nvkm/subdev/fb/gddr3.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga100.c
new file mode 100644
index 000000000000..bf82686851cd
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga100.c
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2021 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "gf100.h"
+#include "ram.h"
+
+static const struct nvkm_fb_func
+ga100_fb = {
+ .dtor = gf100_fb_dtor,
+ .oneinit = gf100_fb_oneinit,
+ .init = gp100_fb_init,
+ .init_page = gv100_fb_init_page,
+ .init_unkn = gp100_fb_init_unkn,
+ .ram_new = gp100_ram_new,
+ .default_bigpage = 16,
+};
+
+int
+ga100_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+{
+ return gp102_fb_new_(&ga100_fb, device, index, pfb);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga102.c
new file mode 100644
index 000000000000..bcecf84a6e67
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga102.c
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2021 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "gf100.h"
+#include "ram.h"
+
+static const struct nvkm_fb_func
+ga102_fb = {
+ .dtor = gf100_fb_dtor,
+ .oneinit = gf100_fb_oneinit,
+ .init = gp100_fb_init,
+ .init_page = gv100_fb_init_page,
+ .init_unkn = gp100_fb_init_unkn,
+ .ram_new = ga102_ram_new,
+ .default_bigpage = 16,
+};
+
+int
+ga102_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+{
+ return gp102_fb_new_(&ga102_fb, device, index, pfb);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gv100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gv100.c
index 10ff5d053f7e..feda86a5fba8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gv100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gv100.c
@@ -22,7 +22,7 @@
#include "gf100.h"
#include "ram.h"
-static int
+int
gv100_fb_init_page(struct nvkm_fb *fb)
{
return (fb->page == 16) ? 0 : -EINVAL;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
index 5be9c563350d..66932ac10d15 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
@@ -82,4 +82,6 @@ int gp102_fb_new_(const struct nvkm_fb_func *, struct nvkm_device *, int,
struct nvkm_fb **);
bool gp102_fb_vpr_scrub_required(struct nvkm_fb *);
int gp102_fb_vpr_scrub(struct nvkm_fb *);
+
+int gv100_fb_init_page(struct nvkm_fb *);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h
index d723a9b4e3c4..ea7d66f3dd82 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h
@@ -70,4 +70,5 @@ int gk104_ram_new(struct nvkm_fb *, struct nvkm_ram **);
int gm107_ram_new(struct nvkm_fb *, struct nvkm_ram **);
int gm200_ram_new(struct nvkm_fb *, struct nvkm_ram **);
int gp100_ram_new(struct nvkm_fb *, struct nvkm_ram **);
+int ga102_ram_new(struct nvkm_fb *, struct nvkm_ram **);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramga102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramga102.c
new file mode 100644
index 000000000000..298c136cefe0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramga102.c
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2021 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "ram.h"
+
+#include <subdev/bios.h>
+#include <subdev/bios/init.h>
+#include <subdev/bios/rammap.h>
+
+static const struct nvkm_ram_func
+ga102_ram = {
+};
+
+int
+ga102_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
+{
+ struct nvkm_device *device = fb->subdev.device;
+ enum nvkm_ram_type type = nvkm_fb_bios_memtype(device->bios);
+ u32 size = nvkm_rd32(device, 0x1183a4);
+
+ return nvkm_ram_new_(&ga102_ram, fb, type, (u64)size << 20, pram);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/Kbuild
index b2ad5922a1c2..efbbaa080de5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/Kbuild
@@ -5,3 +5,4 @@ nvkm-y += nvkm/subdev/gpio/nv50.o
nvkm-y += nvkm/subdev/gpio/g94.o
nvkm-y += nvkm/subdev/gpio/gf119.o
nvkm-y += nvkm/subdev/gpio/gk104.o
+nvkm-y += nvkm/subdev/gpio/ga102.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/ga102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/ga102.c
new file mode 100644
index 000000000000..62c791baf400
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/ga102.c
@@ -0,0 +1,118 @@
+/*
+ * Copyright 2021 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+static void
+ga102_gpio_reset(struct nvkm_gpio *gpio, u8 match)
+{
+ struct nvkm_device *device = gpio->subdev.device;
+ struct nvkm_bios *bios = device->bios;
+ u8 ver, len;
+ u16 entry;
+ int ent = -1;
+
+ while ((entry = dcb_gpio_entry(bios, 0, ++ent, &ver, &len))) {
+ u32 data = nvbios_rd32(bios, entry);
+ u8 line = (data & 0x0000003f);
+ u8 defs = !!(data & 0x00000080);
+ u8 func = (data & 0x0000ff00) >> 8;
+ u8 unk0 = (data & 0x00ff0000) >> 16;
+ u8 unk1 = (data & 0x1f000000) >> 24;
+
+ if ( func == DCB_GPIO_UNUSED ||
+ (match != DCB_GPIO_UNUSED && match != func))
+ continue;
+
+ nvkm_gpio_set(gpio, 0, func, line, defs);
+
+ nvkm_mask(device, 0x021200 + (line * 4), 0xff, unk0);
+ if (unk1--)
+ nvkm_mask(device, 0x00d740 + (unk1 * 4), 0xff, line);
+ }
+}
+
+static int
+ga102_gpio_drive(struct nvkm_gpio *gpio, int line, int dir, int out)
+{
+ struct nvkm_device *device = gpio->subdev.device;
+ u32 data = ((dir ^ 1) << 13) | (out << 12);
+ nvkm_mask(device, 0x021200 + (line * 4), 0x00003000, data);
+ nvkm_mask(device, 0x00d604, 0x00000001, 0x00000001); /* update? */
+ return 0;
+}
+
+static int
+ga102_gpio_sense(struct nvkm_gpio *gpio, int line)
+{
+ struct nvkm_device *device = gpio->subdev.device;
+ return !!(nvkm_rd32(device, 0x021200 + (line * 4)) & 0x00004000);
+}
+
+static void
+ga102_gpio_intr_stat(struct nvkm_gpio *gpio, u32 *hi, u32 *lo)
+{
+ struct nvkm_device *device = gpio->subdev.device;
+ u32 intr0 = nvkm_rd32(device, 0x021640);
+ u32 intr1 = nvkm_rd32(device, 0x02164c);
+ u32 stat0 = nvkm_rd32(device, 0x021648) & intr0;
+ u32 stat1 = nvkm_rd32(device, 0x021654) & intr1;
+ *lo = (stat1 & 0xffff0000) | (stat0 >> 16);
+ *hi = (stat1 << 16) | (stat0 & 0x0000ffff);
+ nvkm_wr32(device, 0x021640, intr0);
+ nvkm_wr32(device, 0x02164c, intr1);
+}
+
+static void
+ga102_gpio_intr_mask(struct nvkm_gpio *gpio, u32 type, u32 mask, u32 data)
+{
+ struct nvkm_device *device = gpio->subdev.device;
+ u32 inte0 = nvkm_rd32(device, 0x021648);
+ u32 inte1 = nvkm_rd32(device, 0x021654);
+ if (type & NVKM_GPIO_LO)
+ inte0 = (inte0 & ~(mask << 16)) | (data << 16);
+ if (type & NVKM_GPIO_HI)
+ inte0 = (inte0 & ~(mask & 0xffff)) | (data & 0xffff);
+ mask >>= 16;
+ data >>= 16;
+ if (type & NVKM_GPIO_LO)
+ inte1 = (inte1 & ~(mask << 16)) | (data << 16);
+ if (type & NVKM_GPIO_HI)
+ inte1 = (inte1 & ~mask) | data;
+ nvkm_wr32(device, 0x021648, inte0);
+ nvkm_wr32(device, 0x021654, inte1);
+}
+
+static const struct nvkm_gpio_func
+ga102_gpio = {
+ .lines = 32,
+ .intr_stat = ga102_gpio_intr_stat,
+ .intr_mask = ga102_gpio_intr_mask,
+ .drive = ga102_gpio_drive,
+ .sense = ga102_gpio_sense,
+ .reset = ga102_gpio_reset,
+};
+
+int
+ga102_gpio_new(struct nvkm_device *device, int index, struct nvkm_gpio **pgpio)
+{
+ return nvkm_gpio_new_(&ga102_gpio, device, index, pgpio);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/Kbuild
index 723d0284caef..819703913a00 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/Kbuild
@@ -7,6 +7,7 @@ nvkm-y += nvkm/subdev/i2c/g94.o
nvkm-y += nvkm/subdev/i2c/gf117.o
nvkm-y += nvkm/subdev/i2c/gf119.o
nvkm-y += nvkm/subdev/i2c/gk104.o
+nvkm-y += nvkm/subdev/i2c/gk110.o
nvkm-y += nvkm/subdev/i2c/gm200.o
nvkm-y += nvkm/subdev/i2c/pad.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h
index 30b48896965e..f920eabf8628 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h
@@ -3,6 +3,13 @@
#define __NVKM_I2C_AUX_H__
#include "pad.h"
+static inline void
+nvkm_i2c_aux_autodpcd(struct nvkm_i2c *i2c, int aux, bool enable)
+{
+ if (i2c->func->aux_autodpcd)
+ i2c->func->aux_autodpcd(i2c, aux, false);
+}
+
struct nvkm_i2c_aux_func {
bool address_only;
int (*xfer)(struct nvkm_i2c_aux *, bool retry, u8 type,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c
index db7769cb33eb..47068f6f9c55 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c
@@ -77,7 +77,8 @@ g94_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
u8 type, u32 addr, u8 *data, u8 *size)
{
struct g94_i2c_aux *aux = g94_i2c_aux(obj);
- struct nvkm_device *device = aux->base.pad->i2c->subdev.device;
+ struct nvkm_i2c *i2c = aux->base.pad->i2c;
+ struct nvkm_device *device = i2c->subdev.device;
const u32 base = aux->ch * 0x50;
u32 ctrl, stat, timeout, retries = 0;
u32 xbuf[4] = {};
@@ -96,6 +97,8 @@ g94_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
goto out;
}
+ nvkm_i2c_aux_autodpcd(i2c, aux->ch, false);
+
if (!(type & 1)) {
memcpy(xbuf, data, *size);
for (i = 0; i < 16; i += 4) {
@@ -128,7 +131,7 @@ g94_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
if (!timeout--) {
AUX_ERR(&aux->base, "timeout %08x", ctrl);
ret = -EIO;
- goto out;
+ goto out_err;
}
} while (ctrl & 0x00010000);
ret = 0;
@@ -154,7 +157,8 @@ g94_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
memcpy(data, xbuf, *size);
*size = stat & 0x0000001f;
}
-
+out_err:
+ nvkm_i2c_aux_autodpcd(i2c, aux->ch, true);
out:
g94_i2c_aux_fini(aux);
return ret < 0 ? ret : (stat & 0x000f0000) >> 16;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c
index edb6148cbca0..8bd1d442e465 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c
@@ -33,7 +33,7 @@ static void
gm200_i2c_aux_fini(struct gm200_i2c_aux *aux)
{
struct nvkm_device *device = aux->base.pad->i2c->subdev.device;
- nvkm_mask(device, 0x00d954 + (aux->ch * 0x50), 0x00310000, 0x00000000);
+ nvkm_mask(device, 0x00d954 + (aux->ch * 0x50), 0x00710000, 0x00000000);
}
static int
@@ -54,10 +54,10 @@ gm200_i2c_aux_init(struct gm200_i2c_aux *aux)
AUX_ERR(&aux->base, "begin idle timeout %08x", ctrl);
return -EBUSY;
}
- } while (ctrl & 0x03010000);
+ } while (ctrl & 0x07010000);
/* set some magic, and wait up to 1ms for it to appear */
- nvkm_mask(device, 0x00d954 + (aux->ch * 0x50), 0x00300000, ureq);
+ nvkm_mask(device, 0x00d954 + (aux->ch * 0x50), 0x00700000, ureq);
timeout = 1000;
do {
ctrl = nvkm_rd32(device, 0x00d954 + (aux->ch * 0x50));
@@ -67,7 +67,7 @@ gm200_i2c_aux_init(struct gm200_i2c_aux *aux)
gm200_i2c_aux_fini(aux);
return -EBUSY;
}
- } while ((ctrl & 0x03000000) != urep);
+ } while ((ctrl & 0x07000000) != urep);
return 0;
}
@@ -77,7 +77,8 @@ gm200_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
u8 type, u32 addr, u8 *data, u8 *size)
{
struct gm200_i2c_aux *aux = gm200_i2c_aux(obj);
- struct nvkm_device *device = aux->base.pad->i2c->subdev.device;
+ struct nvkm_i2c *i2c = aux->base.pad->i2c;
+ struct nvkm_device *device = i2c->subdev.device;
const u32 base = aux->ch * 0x50;
u32 ctrl, stat, timeout, retries = 0;
u32 xbuf[4] = {};
@@ -96,6 +97,8 @@ gm200_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
goto out;
}
+ nvkm_i2c_aux_autodpcd(i2c, aux->ch, false);
+
if (!(type & 1)) {
memcpy(xbuf, data, *size);
for (i = 0; i < 16; i += 4) {
@@ -128,7 +131,7 @@ gm200_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
if (!timeout--) {
AUX_ERR(&aux->base, "timeout %08x", ctrl);
ret = -EIO;
- goto out;
+ goto out_err;
}
} while (ctrl & 0x00010000);
ret = 0;
@@ -155,6 +158,8 @@ gm200_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
*size = stat & 0x0000001f;
}
+out_err:
+ nvkm_i2c_aux_autodpcd(i2c, aux->ch, true);
out:
gm200_i2c_aux_fini(aux);
return ret < 0 ? ret : (stat & 0x000f0000) >> 16;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gk110.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gk110.c
new file mode 100644
index 000000000000..8e3bfa1af52a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gk110.c
@@ -0,0 +1,45 @@
+/*
+ * Copyright 2021 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+#include "pad.h"
+
+static void
+gk110_aux_autodpcd(struct nvkm_i2c *i2c, int aux, bool enable)
+{
+ nvkm_mask(i2c->subdev.device, 0x00e4f8 + (aux * 0x50), 0x00010000, enable << 16);
+}
+
+static const struct nvkm_i2c_func
+gk110_i2c = {
+ .pad_x_new = gf119_i2c_pad_x_new,
+ .pad_s_new = gf119_i2c_pad_s_new,
+ .aux = 4,
+ .aux_stat = gk104_aux_stat,
+ .aux_mask = gk104_aux_mask,
+ .aux_autodpcd = gk110_aux_autodpcd,
+};
+
+int
+gk110_i2c_new(struct nvkm_device *device, int index, struct nvkm_i2c **pi2c)
+{
+ return nvkm_i2c_new_(&gk110_i2c, device, index, pi2c);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gm200.c
index a23c5f315221..7b2375bff8a9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gm200.c
@@ -24,6 +24,12 @@
#include "priv.h"
#include "pad.h"
+static void
+gm200_aux_autodpcd(struct nvkm_i2c *i2c, int aux, bool enable)
+{
+ nvkm_mask(i2c->subdev.device, 0x00d968 + (aux * 0x50), 0x00010000, enable << 16);
+}
+
static const struct nvkm_i2c_func
gm200_i2c = {
.pad_x_new = gf119_i2c_pad_x_new,
@@ -31,6 +37,7 @@ gm200_i2c = {
.aux = 8,
.aux_stat = gk104_aux_stat,
.aux_mask = gk104_aux_mask,
+ .aux_autodpcd = gm200_aux_autodpcd,
};
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/pad.h b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/pad.h
index 461016814f4f..44b7bb7d4777 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/pad.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/pad.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_I2C_PAD_H__
#define __NVKM_I2C_PAD_H__
-#include <subdev/i2c.h>
+#include "priv.h"
struct nvkm_i2c_pad {
const struct nvkm_i2c_pad_func *func;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/priv.h
index bd86bc298ebe..e35f6036fcfc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/priv.h
@@ -23,6 +23,10 @@ struct nvkm_i2c_func {
/* mask on/off interrupt types for a given set of auxch
*/
void (*aux_mask)(struct nvkm_i2c *, u32, u32, u32);
+
+ /* enable/disable HW-initiated DPCD reads
+ */
+ void (*aux_autodpcd)(struct nvkm_i2c *, int aux, bool enable);
};
void g94_aux_stat(struct nvkm_i2c *, u32 *, u32 *, u32 *, u32 *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf100.c
index 2340040942c9..1115376bc85f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf100.c
@@ -22,6 +22,7 @@
* Authors: Ben Skeggs
*/
#include "priv.h"
+#include <subdev/timer.h>
static void
gf100_ibus_intr_hub(struct nvkm_subdev *ibus, int i)
@@ -31,7 +32,6 @@ gf100_ibus_intr_hub(struct nvkm_subdev *ibus, int i)
u32 data = nvkm_rd32(device, 0x122124 + (i * 0x0400));
u32 stat = nvkm_rd32(device, 0x122128 + (i * 0x0400));
nvkm_debug(ibus, "HUB%d: %06x %08x (%08x)\n", i, addr, data, stat);
- nvkm_mask(device, 0x122128 + (i * 0x0400), 0x00000200, 0x00000000);
}
static void
@@ -42,7 +42,6 @@ gf100_ibus_intr_rop(struct nvkm_subdev *ibus, int i)
u32 data = nvkm_rd32(device, 0x124124 + (i * 0x0400));
u32 stat = nvkm_rd32(device, 0x124128 + (i * 0x0400));
nvkm_debug(ibus, "ROP%d: %06x %08x (%08x)\n", i, addr, data, stat);
- nvkm_mask(device, 0x124128 + (i * 0x0400), 0x00000200, 0x00000000);
}
static void
@@ -53,7 +52,6 @@ gf100_ibus_intr_gpc(struct nvkm_subdev *ibus, int i)
u32 data = nvkm_rd32(device, 0x128124 + (i * 0x0400));
u32 stat = nvkm_rd32(device, 0x128128 + (i * 0x0400));
nvkm_debug(ibus, "GPC%d: %06x %08x (%08x)\n", i, addr, data, stat);
- nvkm_mask(device, 0x128128 + (i * 0x0400), 0x00000200, 0x00000000);
}
void
@@ -90,6 +88,12 @@ gf100_ibus_intr(struct nvkm_subdev *ibus)
intr1 &= ~stat;
}
}
+
+ nvkm_mask(device, 0x121c4c, 0x0000003f, 0x00000002);
+ nvkm_msec(device, 2000,
+ if (!(nvkm_rd32(device, 0x121c4c) & 0x0000003f))
+ break;
+ );
}
static int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk104.c
index f3915f85838e..22e487b493ad 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk104.c
@@ -22,6 +22,7 @@
* Authors: Ben Skeggs
*/
#include "priv.h"
+#include <subdev/timer.h>
static void
gk104_ibus_intr_hub(struct nvkm_subdev *ibus, int i)
@@ -31,7 +32,6 @@ gk104_ibus_intr_hub(struct nvkm_subdev *ibus, int i)
u32 data = nvkm_rd32(device, 0x122124 + (i * 0x0800));
u32 stat = nvkm_rd32(device, 0x122128 + (i * 0x0800));
nvkm_debug(ibus, "HUB%d: %06x %08x (%08x)\n", i, addr, data, stat);
- nvkm_mask(device, 0x122128 + (i * 0x0800), 0x00000200, 0x00000000);
}
static void
@@ -42,7 +42,6 @@ gk104_ibus_intr_rop(struct nvkm_subdev *ibus, int i)
u32 data = nvkm_rd32(device, 0x124124 + (i * 0x0800));
u32 stat = nvkm_rd32(device, 0x124128 + (i * 0x0800));
nvkm_debug(ibus, "ROP%d: %06x %08x (%08x)\n", i, addr, data, stat);
- nvkm_mask(device, 0x124128 + (i * 0x0800), 0x00000200, 0x00000000);
}
static void
@@ -53,7 +52,6 @@ gk104_ibus_intr_gpc(struct nvkm_subdev *ibus, int i)
u32 data = nvkm_rd32(device, 0x128124 + (i * 0x0800));
u32 stat = nvkm_rd32(device, 0x128128 + (i * 0x0800));
nvkm_debug(ibus, "GPC%d: %06x %08x (%08x)\n", i, addr, data, stat);
- nvkm_mask(device, 0x128128 + (i * 0x0800), 0x00000200, 0x00000000);
}
void
@@ -90,6 +88,12 @@ gk104_ibus_intr(struct nvkm_subdev *ibus)
intr1 &= ~stat;
}
}
+
+ nvkm_mask(device, 0x12004c, 0x0000003f, 0x00000002);
+ nvkm_msec(device, 2000,
+ if (!(nvkm_rd32(device, 0x12004c) & 0x0000003f))
+ break;
+ );
}
static int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/Kbuild
index 2585ef07532a..ac2b34e9ac6a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/Kbuild
@@ -14,3 +14,4 @@ nvkm-y += nvkm/subdev/mc/gk20a.o
nvkm-y += nvkm/subdev/mc/gp100.o
nvkm-y += nvkm/subdev/mc/gp10b.o
nvkm-y += nvkm/subdev/mc/tu102.o
+nvkm-y += nvkm/subdev/mc/ga100.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/base.c
index 0e57ab2a709f..09f669ac6630 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/base.c
@@ -108,9 +108,6 @@ nvkm_mc_intr(struct nvkm_device *device, bool *handled)
if (stat)
nvkm_error(&mc->subdev, "intr %08x\n", stat);
*handled = intr != 0;
-
- if (mc->func->intr_hack)
- mc->func->intr_hack(mc, handled);
}
static u32
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/ga100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/ga100.c
new file mode 100644
index 000000000000..967eb3af11eb
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/ga100.c
@@ -0,0 +1,74 @@
+/*
+ * Copyright 2021 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+static void
+ga100_mc_intr_unarm(struct nvkm_mc *mc)
+{
+ nvkm_wr32(mc->subdev.device, 0xb81610, 0x00000004);
+}
+
+static void
+ga100_mc_intr_rearm(struct nvkm_mc *mc)
+{
+ nvkm_wr32(mc->subdev.device, 0xb81608, 0x00000004);
+}
+
+static void
+ga100_mc_intr_mask(struct nvkm_mc *mc, u32 mask, u32 intr)
+{
+ nvkm_wr32(mc->subdev.device, 0xb81210, mask & intr );
+ nvkm_wr32(mc->subdev.device, 0xb81410, mask & ~(mask & intr));
+}
+
+static u32
+ga100_mc_intr_stat(struct nvkm_mc *mc)
+{
+ u32 intr_top = nvkm_rd32(mc->subdev.device, 0xb81600), intr = 0x00000000;
+ if (intr_top & 0x00000004)
+ intr = nvkm_mask(mc->subdev.device, 0xb81010, 0x00000000, 0x00000000);
+ return intr;
+}
+
+static void
+ga100_mc_init(struct nvkm_mc *mc)
+{
+ nv50_mc_init(mc);
+ nvkm_wr32(mc->subdev.device, 0xb81210, 0xffffffff);
+}
+
+static const struct nvkm_mc_func
+ga100_mc = {
+ .init = ga100_mc_init,
+ .intr = gp100_mc_intr,
+ .intr_unarm = ga100_mc_intr_unarm,
+ .intr_rearm = ga100_mc_intr_rearm,
+ .intr_mask = ga100_mc_intr_mask,
+ .intr_stat = ga100_mc_intr_stat,
+ .reset = gk104_mc_reset,
+};
+
+int
+ga100_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc)
+{
+ return nvkm_mc_new_(&ga100_mc, device, index, pmc);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/priv.h
index 4aab753a6040..0d01b2c419ff 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/priv.h
@@ -26,7 +26,6 @@ struct nvkm_mc_func {
void (*intr_mask)(struct nvkm_mc *, u32 mask, u32 stat);
/* retrieve pending interrupt mask (NV_PMC_INTR) */
u32 (*intr_stat)(struct nvkm_mc *);
- void (*intr_hack)(struct nvkm_mc *, bool *handled);
const struct nvkm_mc_map *reset;
void (*unk260)(struct nvkm_mc *, u32);
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/tu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/tu102.c
index d098c44a4fcb..af0afd1ad6ee 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/tu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/tu102.c
@@ -19,37 +19,118 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
+#define tu102_mc(p) container_of((p), struct tu102_mc, base)
#include "priv.h"
+struct tu102_mc {
+ struct nvkm_mc base;
+ spinlock_t lock;
+ bool intr;
+ u32 mask;
+};
+
static void
-tu102_mc_intr_hack(struct nvkm_mc *mc, bool *handled)
+tu102_mc_intr_update(struct tu102_mc *mc)
{
- struct nvkm_device *device = mc->subdev.device;
- u32 stat = nvkm_rd32(device, 0xb81010);
- if (stat & 0x00000050) {
- struct nvkm_subdev *subdev =
- nvkm_device_subdev(device, NVKM_SUBDEV_FAULT);
- nvkm_wr32(device, 0xb81010, stat & 0x00000050);
- if (subdev)
- nvkm_subdev_intr(subdev);
- *handled = true;
+ struct nvkm_device *device = mc->base.subdev.device;
+ u32 mask = mc->intr ? mc->mask : 0, i;
+
+ for (i = 0; i < 2; i++) {
+ nvkm_wr32(device, 0x000180 + (i * 0x04), ~mask);
+ nvkm_wr32(device, 0x000160 + (i * 0x04), mask);
}
+
+ if (mask & 0x00000200)
+ nvkm_wr32(device, 0xb81608, 0x6);
+ else
+ nvkm_wr32(device, 0xb81610, 0x6);
+}
+
+void
+tu102_mc_intr_unarm(struct nvkm_mc *base)
+{
+ struct tu102_mc *mc = tu102_mc(base);
+ unsigned long flags;
+
+ spin_lock_irqsave(&mc->lock, flags);
+ mc->intr = false;
+ tu102_mc_intr_update(mc);
+ spin_unlock_irqrestore(&mc->lock, flags);
+}
+
+void
+tu102_mc_intr_rearm(struct nvkm_mc *base)
+{
+ struct tu102_mc *mc = tu102_mc(base);
+ unsigned long flags;
+
+ spin_lock_irqsave(&mc->lock, flags);
+ mc->intr = true;
+ tu102_mc_intr_update(mc);
+ spin_unlock_irqrestore(&mc->lock, flags);
+}
+
+void
+tu102_mc_intr_mask(struct nvkm_mc *base, u32 mask, u32 intr)
+{
+ struct tu102_mc *mc = tu102_mc(base);
+ unsigned long flags;
+
+ spin_lock_irqsave(&mc->lock, flags);
+ mc->mask = (mc->mask & ~mask) | intr;
+ tu102_mc_intr_update(mc);
+ spin_unlock_irqrestore(&mc->lock, flags);
+}
+
+static u32
+tu102_mc_intr_stat(struct nvkm_mc *mc)
+{
+ struct nvkm_device *device = mc->subdev.device;
+ u32 intr0 = nvkm_rd32(device, 0x000100);
+ u32 intr1 = nvkm_rd32(device, 0x000104);
+ u32 intr_top = nvkm_rd32(device, 0xb81600);
+
+ /* Turing and above route the MMU fault interrupts via a different
+ * interrupt tree with different control registers. For the moment remap
+ * them back to the old PMC vector.
+ */
+ if (intr_top & 0x00000006)
+ intr0 |= 0x00000200;
+
+ return intr0 | intr1;
}
+
static const struct nvkm_mc_func
tu102_mc = {
.init = nv50_mc_init,
.intr = gp100_mc_intr,
- .intr_unarm = gp100_mc_intr_unarm,
- .intr_rearm = gp100_mc_intr_rearm,
- .intr_mask = gp100_mc_intr_mask,
- .intr_stat = gf100_mc_intr_stat,
- .intr_hack = tu102_mc_intr_hack,
+ .intr_unarm = tu102_mc_intr_unarm,
+ .intr_rearm = tu102_mc_intr_rearm,
+ .intr_mask = tu102_mc_intr_mask,
+ .intr_stat = tu102_mc_intr_stat,
.reset = gk104_mc_reset,
};
int
+tu102_mc_new_(const struct nvkm_mc_func *func, struct nvkm_device *device,
+ int index, struct nvkm_mc **pmc)
+{
+ struct tu102_mc *mc;
+
+ if (!(mc = kzalloc(sizeof(*mc), GFP_KERNEL)))
+ return -ENOMEM;
+ nvkm_mc_ctor(func, device, index, &mc->base);
+ *pmc = &mc->base;
+
+ spin_lock_init(&mc->lock);
+ mc->intr = false;
+ mc->mask = 0x7fffffff;
+ return 0;
+}
+
+int
tu102_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc)
{
- return gp100_mc_new_(&tu102_mc, device, index, pmc);
+ return tu102_mc_new_(&tu102_mc, device, index, pmc);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c
index de91e9a26172..6d5212ae2fd5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c
@@ -316,9 +316,9 @@ nvkm_mmu_vram(struct nvkm_mmu *mmu)
{
struct nvkm_device *device = mmu->subdev.device;
struct nvkm_mm *mm = &device->fb->ram->vram;
- const u32 sizeN = nvkm_mm_heap_size(mm, NVKM_RAM_MM_NORMAL);
- const u32 sizeU = nvkm_mm_heap_size(mm, NVKM_RAM_MM_NOMAP);
- const u32 sizeM = nvkm_mm_heap_size(mm, NVKM_RAM_MM_MIXED);
+ const u64 sizeN = nvkm_mm_heap_size(mm, NVKM_RAM_MM_NORMAL);
+ const u64 sizeU = nvkm_mm_heap_size(mm, NVKM_RAM_MM_NOMAP);
+ const u64 sizeM = nvkm_mm_heap_size(mm, NVKM_RAM_MM_MIXED);
u8 type = NVKM_MEM_KIND * !!mmu->func->kind;
u8 heap = NVKM_MEM_VRAM;
int heapM, heapN, heapU;
diff --git a/drivers/gpu/drm/omapdrm/Kconfig b/drivers/gpu/drm/omapdrm/Kconfig
index 5417e7a47072..e7281da5bc6a 100644
--- a/drivers/gpu/drm/omapdrm/Kconfig
+++ b/drivers/gpu/drm/omapdrm/Kconfig
@@ -5,13 +5,129 @@ config DRM_OMAP
depends on ARCH_OMAP2PLUS || ARCH_MULTIPLATFORM
select OMAP2_DSS
select DRM_KMS_HELPER
+ select VIDEOMODE_HELPERS
+ select HDMI
default n
help
DRM display driver for OMAP2/3/4 based boards.
if DRM_OMAP
-source "drivers/gpu/drm/omapdrm/dss/Kconfig"
-source "drivers/gpu/drm/omapdrm/displays/Kconfig"
+config OMAP2_DSS_DEBUG
+ bool "Debug support"
+ default n
+ help
+ This enables printing of debug messages. Alternatively, debug messages
+ can also be enabled by setting CONFIG_DYNAMIC_DEBUG and then setting
+ appropriate flags in <debugfs>/dynamic_debug/control.
+
+config OMAP2_DSS_DEBUGFS
+ bool "Debugfs filesystem support"
+ depends on DEBUG_FS
+ default n
+ help
+ This enables debugfs for OMAPDSS at <debugfs>/omapdss. This enables
+ querying about clock configuration and register configuration of dss,
+ dispc, dsi, hdmi and rfbi.
+
+config OMAP2_DSS_COLLECT_IRQ_STATS
+ bool "Collect DSS IRQ statistics"
+ depends on OMAP2_DSS_DEBUGFS
+ default n
+ help
+ Collect DSS IRQ statistics, printable via debugfs.
+
+ The statistics can be found from
+ <debugfs>/omapdss/dispc_irq for DISPC interrupts, and
+ <debugfs>/omapdss/dsi_irq for DSI interrupts.
+
+config OMAP2_DSS_DPI
+ bool "DPI support"
+ default y
+ help
+ DPI Interface. This is the Parallel Display Interface.
+
+config OMAP2_DSS_VENC
+ bool "VENC support"
+ default y
+ help
+ OMAP Video Encoder support for S-Video and composite TV-out.
+
+config OMAP2_DSS_HDMI_COMMON
+ bool
+
+config OMAP4_DSS_HDMI
+ bool "HDMI support for OMAP4"
+ default y
+ select OMAP2_DSS_HDMI_COMMON
+ help
+ HDMI support for OMAP4 based SoCs.
+
+config OMAP4_DSS_HDMI_CEC
+ bool "Enable HDMI CEC support for OMAP4"
+ depends on OMAP4_DSS_HDMI
+ select CEC_CORE
+ default y
+ help
+ When selected the HDMI transmitter will support the CEC feature.
+
+config OMAP5_DSS_HDMI
+ bool "HDMI support for OMAP5"
+ default n
+ select OMAP2_DSS_HDMI_COMMON
+ help
+ HDMI Interface for OMAP5 and similar cores. This adds the High
+ Definition Multimedia Interface. See http://www.hdmi.org/ for HDMI
+ specification.
+
+config OMAP2_DSS_SDI
+ bool "SDI support"
+ default n
+ help
+ SDI (Serial Display Interface) support.
+
+ SDI is a high speed one-way display serial bus between the host
+ processor and a display.
+
+config OMAP2_DSS_DSI
+ bool "DSI support"
+ default n
+ select DRM_MIPI_DSI
+ help
+ MIPI DSI (Display Serial Interface) support.
+
+ DSI is a high speed half-duplex serial interface between the host
+ processor and a peripheral, such as a display or a framebuffer chip.
+
+ See http://www.mipi.org/ for DSI specifications.
+
+config OMAP2_DSS_MIN_FCK_PER_PCK
+ int "Minimum FCK/PCK ratio (for scaling)"
+ range 0 32
+ default 0
+ help
+ This can be used to adjust the minimum FCK/PCK ratio.
+
+ With this you can make sure that DISPC FCK is at least
+ n x PCK. Video plane scaling requires higher FCK than
+ normally.
+
+ If this is set to 0, there's no extra constraint on the
+ DISPC FCK. However, the FCK will at minimum be
+ 2xPCK (if active matrix) or 3xPCK (if passive matrix).
+
+ Max FCK is 173MHz, so this doesn't work if your PCK
+ is very high.
+
+config OMAP2_DSS_SLEEP_AFTER_VENC_RESET
+ bool "Sleep 20ms after VENC reset"
+ default y
+ help
+ There is a 20ms sleep after VENC reset which seemed to fix the
+ reset. The reason for the bug is unclear, and it's also unclear
+ on what platforms this happens.
+
+ This option enables the sleep, and is enabled by default. You can
+ disable the sleep if it doesn't cause problems on your platform.
endif
diff --git a/drivers/gpu/drm/omapdrm/Makefile b/drivers/gpu/drm/omapdrm/Makefile
index f115253115c5..21e8277ff88f 100644
--- a/drivers/gpu/drm/omapdrm/Makefile
+++ b/drivers/gpu/drm/omapdrm/Makefile
@@ -4,16 +4,12 @@
# Direct Rendering Infrastructure (DRI)
#
-obj-y += dss/
-obj-y += displays/
-
omapdrm-y := omap_drv.o \
omap_irq.o \
omap_debugfs.o \
omap_crtc.o \
omap_plane.o \
omap_encoder.o \
- omap_connector.o \
omap_fb.o \
omap_gem.o \
omap_gem_dmabuf.o \
@@ -22,4 +18,17 @@ omapdrm-y := omap_drv.o \
omapdrm-$(CONFIG_DRM_FBDEV_EMULATION) += omap_fbdev.o
-obj-$(CONFIG_DRM_OMAP) += omapdrm.o
+omapdrm-y += dss/base.o dss/output.o dss/dss.o dss/dispc.o \
+ dss/dispc_coefs.o dss/pll.o dss/video-pll.o
+omapdrm-$(CONFIG_OMAP2_DSS_DPI) += dss/dpi.o
+omapdrm-$(CONFIG_OMAP2_DSS_VENC) += dss/venc.o
+omapdrm-$(CONFIG_OMAP2_DSS_SDI) += dss/sdi.o
+omapdrm-$(CONFIG_OMAP2_DSS_DSI) += dss/dsi.o
+omapdrm-$(CONFIG_OMAP2_DSS_HDMI_COMMON) += dss/hdmi_common.o dss/hdmi_wp.o \
+ dss/hdmi_pll.o dss/hdmi_phy.o
+omapdrm-$(CONFIG_OMAP4_DSS_HDMI) += dss/hdmi4.o dss/hdmi4_core.o
+omapdrm-$(CONFIG_OMAP4_DSS_HDMI_CEC) += dss/hdmi4_cec.o
+omapdrm-$(CONFIG_OMAP5_DSS_HDMI) += dss/hdmi5.o dss/hdmi5_core.o
+ccflags-$(CONFIG_OMAP2_DSS_DEBUG) += -DDEBUG
+
+obj-$(CONFIG_DRM_OMAP) += omapdrm.o
diff --git a/drivers/gpu/drm/omapdrm/displays/Kconfig b/drivers/gpu/drm/omapdrm/displays/Kconfig
deleted file mode 100644
index f2be594c7eff..000000000000
--- a/drivers/gpu/drm/omapdrm/displays/Kconfig
+++ /dev/null
@@ -1,10 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-menu "OMAPDRM External Display Device Drivers"
-
-config DRM_OMAP_PANEL_DSI_CM
- tristate "Generic DSI Command Mode Panel"
- depends on BACKLIGHT_CLASS_DEVICE
- help
- Driver for generic DSI command mode panels.
-
-endmenu
diff --git a/drivers/gpu/drm/omapdrm/displays/Makefile b/drivers/gpu/drm/omapdrm/displays/Makefile
deleted file mode 100644
index 488ddf153613..000000000000
--- a/drivers/gpu/drm/omapdrm/displays/Makefile
+++ /dev/null
@@ -1,2 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_DRM_OMAP_PANEL_DSI_CM) += panel-dsi-cm.o
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c b/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c
deleted file mode 100644
index e39ce0c0c9a9..000000000000
--- a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c
+++ /dev/null
@@ -1,1385 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Generic DSI Command Mode panel driver
- *
- * Copyright (C) 2013 Texas Instruments Incorporated - https://www.ti.com/
- * Author: Tomi Valkeinen <[email protected]>
- */
-
-/* #define DEBUG */
-
-#include <linux/backlight.h>
-#include <linux/delay.h>
-#include <linux/gpio/consumer.h>
-#include <linux/interrupt.h>
-#include <linux/jiffies.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/sched/signal.h>
-#include <linux/slab.h>
-#include <linux/workqueue.h>
-#include <linux/of_device.h>
-#include <linux/regulator/consumer.h>
-
-#include <drm/drm_connector.h>
-
-#include <video/mipi_display.h>
-#include <video/of_display_timing.h>
-
-#include "../dss/omapdss.h"
-
-/* DSI Virtual channel. Hardcoded for now. */
-#define TCH 0
-
-#define DCS_READ_NUM_ERRORS 0x05
-#define DCS_BRIGHTNESS 0x51
-#define DCS_CTRL_DISPLAY 0x53
-#define DCS_GET_ID1 0xda
-#define DCS_GET_ID2 0xdb
-#define DCS_GET_ID3 0xdc
-
-struct panel_drv_data {
- struct omap_dss_device dssdev;
- struct omap_dss_device *src;
-
- struct videomode vm;
-
- struct platform_device *pdev;
-
- struct mutex lock;
-
- struct backlight_device *bldev;
- struct backlight_device *extbldev;
-
- unsigned long hw_guard_end; /* next value of jiffies when we can
- * issue the next sleep in/out command
- */
- unsigned long hw_guard_wait; /* max guard time in jiffies */
-
- /* panel HW configuration from DT or platform data */
- struct gpio_desc *reset_gpio;
- struct gpio_desc *ext_te_gpio;
-
- struct regulator *vpnl;
- struct regulator *vddi;
-
- bool use_dsi_backlight;
-
- int width_mm;
- int height_mm;
-
- struct omap_dsi_pin_config pin_config;
-
- /* runtime variables */
- bool enabled;
-
- bool te_enabled;
-
- atomic_t do_update;
- int channel;
-
- struct delayed_work te_timeout_work;
-
- bool intro_printed;
-
- struct workqueue_struct *workqueue;
-
- bool ulps_enabled;
- unsigned int ulps_timeout;
- struct delayed_work ulps_work;
-};
-
-#define to_panel_data(p) container_of(p, struct panel_drv_data, dssdev)
-
-static irqreturn_t dsicm_te_isr(int irq, void *data);
-static void dsicm_te_timeout_work_callback(struct work_struct *work);
-static int _dsicm_enable_te(struct panel_drv_data *ddata, bool enable);
-
-static int dsicm_panel_reset(struct panel_drv_data *ddata);
-
-static void dsicm_ulps_work(struct work_struct *work);
-
-static void dsicm_bl_power(struct panel_drv_data *ddata, bool enable)
-{
- struct backlight_device *backlight;
-
- if (ddata->bldev)
- backlight = ddata->bldev;
- else if (ddata->extbldev)
- backlight = ddata->extbldev;
- else
- return;
-
- if (enable) {
- backlight->props.fb_blank = FB_BLANK_UNBLANK;
- backlight->props.state = ~(BL_CORE_FBBLANK | BL_CORE_SUSPENDED);
- backlight->props.power = FB_BLANK_UNBLANK;
- } else {
- backlight->props.fb_blank = FB_BLANK_NORMAL;
- backlight->props.power = FB_BLANK_POWERDOWN;
- backlight->props.state |= BL_CORE_FBBLANK | BL_CORE_SUSPENDED;
- }
-
- backlight_update_status(backlight);
-}
-
-static void hw_guard_start(struct panel_drv_data *ddata, int guard_msec)
-{
- ddata->hw_guard_wait = msecs_to_jiffies(guard_msec);
- ddata->hw_guard_end = jiffies + ddata->hw_guard_wait;
-}
-
-static void hw_guard_wait(struct panel_drv_data *ddata)
-{
- unsigned long wait = ddata->hw_guard_end - jiffies;
-
- if ((long)wait > 0 && wait <= ddata->hw_guard_wait) {
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(wait);
- }
-}
-
-static int dsicm_dcs_read_1(struct panel_drv_data *ddata, u8 dcs_cmd, u8 *data)
-{
- struct omap_dss_device *src = ddata->src;
- int r;
- u8 buf[1];
-
- r = src->ops->dsi.dcs_read(src, ddata->channel, dcs_cmd, buf, 1);
-
- if (r < 0)
- return r;
-
- *data = buf[0];
-
- return 0;
-}
-
-static int dsicm_dcs_write_0(struct panel_drv_data *ddata, u8 dcs_cmd)
-{
- struct omap_dss_device *src = ddata->src;
-
- return src->ops->dsi.dcs_write(src, ddata->channel, &dcs_cmd, 1);
-}
-
-static int dsicm_dcs_write_1(struct panel_drv_data *ddata, u8 dcs_cmd, u8 param)
-{
- struct omap_dss_device *src = ddata->src;
- u8 buf[2] = { dcs_cmd, param };
-
- return src->ops->dsi.dcs_write(src, ddata->channel, buf, 2);
-}
-
-static int dsicm_sleep_in(struct panel_drv_data *ddata)
-
-{
- struct omap_dss_device *src = ddata->src;
- u8 cmd;
- int r;
-
- hw_guard_wait(ddata);
-
- cmd = MIPI_DCS_ENTER_SLEEP_MODE;
- r = src->ops->dsi.dcs_write_nosync(src, ddata->channel, &cmd, 1);
- if (r)
- return r;
-
- hw_guard_start(ddata, 120);
-
- usleep_range(5000, 10000);
-
- return 0;
-}
-
-static int dsicm_sleep_out(struct panel_drv_data *ddata)
-{
- int r;
-
- hw_guard_wait(ddata);
-
- r = dsicm_dcs_write_0(ddata, MIPI_DCS_EXIT_SLEEP_MODE);
- if (r)
- return r;
-
- hw_guard_start(ddata, 120);
-
- usleep_range(5000, 10000);
-
- return 0;
-}
-
-static int dsicm_get_id(struct panel_drv_data *ddata, u8 *id1, u8 *id2, u8 *id3)
-{
- int r;
-
- r = dsicm_dcs_read_1(ddata, DCS_GET_ID1, id1);
- if (r)
- return r;
- r = dsicm_dcs_read_1(ddata, DCS_GET_ID2, id2);
- if (r)
- return r;
- r = dsicm_dcs_read_1(ddata, DCS_GET_ID3, id3);
- if (r)
- return r;
-
- return 0;
-}
-
-static int dsicm_set_update_window(struct panel_drv_data *ddata,
- u16 x, u16 y, u16 w, u16 h)
-{
- struct omap_dss_device *src = ddata->src;
- int r;
- u16 x1 = x;
- u16 x2 = x + w - 1;
- u16 y1 = y;
- u16 y2 = y + h - 1;
-
- u8 buf[5];
- buf[0] = MIPI_DCS_SET_COLUMN_ADDRESS;
- buf[1] = (x1 >> 8) & 0xff;
- buf[2] = (x1 >> 0) & 0xff;
- buf[3] = (x2 >> 8) & 0xff;
- buf[4] = (x2 >> 0) & 0xff;
-
- r = src->ops->dsi.dcs_write_nosync(src, ddata->channel, buf, sizeof(buf));
- if (r)
- return r;
-
- buf[0] = MIPI_DCS_SET_PAGE_ADDRESS;
- buf[1] = (y1 >> 8) & 0xff;
- buf[2] = (y1 >> 0) & 0xff;
- buf[3] = (y2 >> 8) & 0xff;
- buf[4] = (y2 >> 0) & 0xff;
-
- r = src->ops->dsi.dcs_write_nosync(src, ddata->channel, buf, sizeof(buf));
- if (r)
- return r;
-
- src->ops->dsi.bta_sync(src, ddata->channel);
-
- return r;
-}
-
-static void dsicm_queue_ulps_work(struct panel_drv_data *ddata)
-{
- if (ddata->ulps_timeout > 0)
- queue_delayed_work(ddata->workqueue, &ddata->ulps_work,
- msecs_to_jiffies(ddata->ulps_timeout));
-}
-
-static void dsicm_cancel_ulps_work(struct panel_drv_data *ddata)
-{
- cancel_delayed_work(&ddata->ulps_work);
-}
-
-static int dsicm_enter_ulps(struct panel_drv_data *ddata)
-{
- struct omap_dss_device *src = ddata->src;
- int r;
-
- if (ddata->ulps_enabled)
- return 0;
-
- dsicm_cancel_ulps_work(ddata);
-
- r = _dsicm_enable_te(ddata, false);
- if (r)
- goto err;
-
- if (ddata->ext_te_gpio)
- disable_irq(gpiod_to_irq(ddata->ext_te_gpio));
-
- src->ops->dsi.disable(src, false, true);
-
- ddata->ulps_enabled = true;
-
- return 0;
-
-err:
- dev_err(&ddata->pdev->dev, "enter ULPS failed");
- dsicm_panel_reset(ddata);
-
- ddata->ulps_enabled = false;
-
- dsicm_queue_ulps_work(ddata);
-
- return r;
-}
-
-static int dsicm_exit_ulps(struct panel_drv_data *ddata)
-{
- struct omap_dss_device *src = ddata->src;
- int r;
-
- if (!ddata->ulps_enabled)
- return 0;
-
- src->ops->enable(src);
- src->ops->dsi.enable_hs(src, ddata->channel, true);
-
- r = _dsicm_enable_te(ddata, true);
- if (r) {
- dev_err(&ddata->pdev->dev, "failed to re-enable TE");
- goto err2;
- }
-
- if (ddata->ext_te_gpio)
- enable_irq(gpiod_to_irq(ddata->ext_te_gpio));
-
- dsicm_queue_ulps_work(ddata);
-
- ddata->ulps_enabled = false;
-
- return 0;
-
-err2:
- dev_err(&ddata->pdev->dev, "failed to exit ULPS");
-
- r = dsicm_panel_reset(ddata);
- if (!r) {
- if (ddata->ext_te_gpio)
- enable_irq(gpiod_to_irq(ddata->ext_te_gpio));
- ddata->ulps_enabled = false;
- }
-
- dsicm_queue_ulps_work(ddata);
-
- return r;
-}
-
-static int dsicm_wake_up(struct panel_drv_data *ddata)
-{
- if (ddata->ulps_enabled)
- return dsicm_exit_ulps(ddata);
-
- dsicm_cancel_ulps_work(ddata);
- dsicm_queue_ulps_work(ddata);
- return 0;
-}
-
-static int dsicm_bl_update_status(struct backlight_device *dev)
-{
- struct panel_drv_data *ddata = dev_get_drvdata(&dev->dev);
- struct omap_dss_device *src = ddata->src;
- int r = 0;
- int level;
-
- if (dev->props.fb_blank == FB_BLANK_UNBLANK &&
- dev->props.power == FB_BLANK_UNBLANK)
- level = dev->props.brightness;
- else
- level = 0;
-
- dev_dbg(&ddata->pdev->dev, "update brightness to %d\n", level);
-
- mutex_lock(&ddata->lock);
-
- if (ddata->enabled) {
- src->ops->dsi.bus_lock(src);
-
- r = dsicm_wake_up(ddata);
- if (!r)
- r = dsicm_dcs_write_1(ddata, DCS_BRIGHTNESS, level);
-
- src->ops->dsi.bus_unlock(src);
- }
-
- mutex_unlock(&ddata->lock);
-
- return r;
-}
-
-static int dsicm_bl_get_intensity(struct backlight_device *dev)
-{
- if (dev->props.fb_blank == FB_BLANK_UNBLANK &&
- dev->props.power == FB_BLANK_UNBLANK)
- return dev->props.brightness;
-
- return 0;
-}
-
-static const struct backlight_ops dsicm_bl_ops = {
- .get_brightness = dsicm_bl_get_intensity,
- .update_status = dsicm_bl_update_status,
-};
-
-static ssize_t dsicm_num_errors_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct panel_drv_data *ddata = dev_get_drvdata(dev);
- struct omap_dss_device *src = ddata->src;
- u8 errors = 0;
- int r;
-
- mutex_lock(&ddata->lock);
-
- if (ddata->enabled) {
- src->ops->dsi.bus_lock(src);
-
- r = dsicm_wake_up(ddata);
- if (!r)
- r = dsicm_dcs_read_1(ddata, DCS_READ_NUM_ERRORS,
- &errors);
-
- src->ops->dsi.bus_unlock(src);
- } else {
- r = -ENODEV;
- }
-
- mutex_unlock(&ddata->lock);
-
- if (r)
- return r;
-
- return snprintf(buf, PAGE_SIZE, "%d\n", errors);
-}
-
-static ssize_t dsicm_hw_revision_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct panel_drv_data *ddata = dev_get_drvdata(dev);
- struct omap_dss_device *src = ddata->src;
- u8 id1, id2, id3;
- int r;
-
- mutex_lock(&ddata->lock);
-
- if (ddata->enabled) {
- src->ops->dsi.bus_lock(src);
-
- r = dsicm_wake_up(ddata);
- if (!r)
- r = dsicm_get_id(ddata, &id1, &id2, &id3);
-
- src->ops->dsi.bus_unlock(src);
- } else {
- r = -ENODEV;
- }
-
- mutex_unlock(&ddata->lock);
-
- if (r)
- return r;
-
- return snprintf(buf, PAGE_SIZE, "%02x.%02x.%02x\n", id1, id2, id3);
-}
-
-static ssize_t dsicm_store_ulps(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct panel_drv_data *ddata = dev_get_drvdata(dev);
- struct omap_dss_device *src = ddata->src;
- unsigned long t;
- int r;
-
- r = kstrtoul(buf, 0, &t);
- if (r)
- return r;
-
- mutex_lock(&ddata->lock);
-
- if (ddata->enabled) {
- src->ops->dsi.bus_lock(src);
-
- if (t)
- r = dsicm_enter_ulps(ddata);
- else
- r = dsicm_wake_up(ddata);
-
- src->ops->dsi.bus_unlock(src);
- }
-
- mutex_unlock(&ddata->lock);
-
- if (r)
- return r;
-
- return count;
-}
-
-static ssize_t dsicm_show_ulps(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct panel_drv_data *ddata = dev_get_drvdata(dev);
- unsigned int t;
-
- mutex_lock(&ddata->lock);
- t = ddata->ulps_enabled;
- mutex_unlock(&ddata->lock);
-
- return snprintf(buf, PAGE_SIZE, "%u\n", t);
-}
-
-static ssize_t dsicm_store_ulps_timeout(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct panel_drv_data *ddata = dev_get_drvdata(dev);
- struct omap_dss_device *src = ddata->src;
- unsigned long t;
- int r;
-
- r = kstrtoul(buf, 0, &t);
- if (r)
- return r;
-
- mutex_lock(&ddata->lock);
- ddata->ulps_timeout = t;
-
- if (ddata->enabled) {
- /* dsicm_wake_up will restart the timer */
- src->ops->dsi.bus_lock(src);
- r = dsicm_wake_up(ddata);
- src->ops->dsi.bus_unlock(src);
- }
-
- mutex_unlock(&ddata->lock);
-
- if (r)
- return r;
-
- return count;
-}
-
-static ssize_t dsicm_show_ulps_timeout(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct panel_drv_data *ddata = dev_get_drvdata(dev);
- unsigned int t;
-
- mutex_lock(&ddata->lock);
- t = ddata->ulps_timeout;
- mutex_unlock(&ddata->lock);
-
- return snprintf(buf, PAGE_SIZE, "%u\n", t);
-}
-
-static DEVICE_ATTR(num_dsi_errors, S_IRUGO, dsicm_num_errors_show, NULL);
-static DEVICE_ATTR(hw_revision, S_IRUGO, dsicm_hw_revision_show, NULL);
-static DEVICE_ATTR(ulps, S_IRUGO | S_IWUSR,
- dsicm_show_ulps, dsicm_store_ulps);
-static DEVICE_ATTR(ulps_timeout, S_IRUGO | S_IWUSR,
- dsicm_show_ulps_timeout, dsicm_store_ulps_timeout);
-
-static struct attribute *dsicm_attrs[] = {
- &dev_attr_num_dsi_errors.attr,
- &dev_attr_hw_revision.attr,
- &dev_attr_ulps.attr,
- &dev_attr_ulps_timeout.attr,
- NULL,
-};
-
-static const struct attribute_group dsicm_attr_group = {
- .attrs = dsicm_attrs,
-};
-
-static void dsicm_hw_reset(struct panel_drv_data *ddata)
-{
- gpiod_set_value(ddata->reset_gpio, 1);
- udelay(10);
- /* reset the panel */
- gpiod_set_value(ddata->reset_gpio, 0);
- /* assert reset */
- udelay(10);
- gpiod_set_value(ddata->reset_gpio, 1);
- /* wait after releasing reset */
- usleep_range(5000, 10000);
-}
-
-static int dsicm_power_on(struct panel_drv_data *ddata)
-{
- struct omap_dss_device *src = ddata->src;
- u8 id1, id2, id3;
- int r;
- struct omap_dss_dsi_config dsi_config = {
- .mode = OMAP_DSS_DSI_CMD_MODE,
- .pixel_format = OMAP_DSS_DSI_FMT_RGB888,
- .vm = &ddata->vm,
- .hs_clk_min = 150000000,
- .hs_clk_max = 300000000,
- .lp_clk_min = 7000000,
- .lp_clk_max = 10000000,
- };
-
- if (ddata->vpnl) {
- r = regulator_enable(ddata->vpnl);
- if (r) {
- dev_err(&ddata->pdev->dev,
- "failed to enable VPNL: %d\n", r);
- return r;
- }
- }
-
- if (ddata->vddi) {
- r = regulator_enable(ddata->vddi);
- if (r) {
- dev_err(&ddata->pdev->dev,
- "failed to enable VDDI: %d\n", r);
- goto err_vpnl;
- }
- }
-
- if (ddata->pin_config.num_pins > 0) {
- r = src->ops->dsi.configure_pins(src, &ddata->pin_config);
- if (r) {
- dev_err(&ddata->pdev->dev,
- "failed to configure DSI pins\n");
- goto err_vddi;
- }
- }
-
- r = src->ops->dsi.set_config(src, &dsi_config);
- if (r) {
- dev_err(&ddata->pdev->dev, "failed to configure DSI\n");
- goto err_vddi;
- }
-
- src->ops->enable(src);
-
- dsicm_hw_reset(ddata);
-
- src->ops->dsi.enable_hs(src, ddata->channel, false);
-
- r = dsicm_sleep_out(ddata);
- if (r)
- goto err;
-
- r = dsicm_get_id(ddata, &id1, &id2, &id3);
- if (r)
- goto err;
-
- r = dsicm_dcs_write_1(ddata, DCS_BRIGHTNESS, 0xff);
- if (r)
- goto err;
-
- r = dsicm_dcs_write_1(ddata, DCS_CTRL_DISPLAY,
- (1<<2) | (1<<5)); /* BL | BCTRL */
- if (r)
- goto err;
-
- r = dsicm_dcs_write_1(ddata, MIPI_DCS_SET_PIXEL_FORMAT,
- MIPI_DCS_PIXEL_FMT_24BIT);
- if (r)
- goto err;
-
- r = dsicm_dcs_write_0(ddata, MIPI_DCS_SET_DISPLAY_ON);
- if (r)
- goto err;
-
- r = _dsicm_enable_te(ddata, ddata->te_enabled);
- if (r)
- goto err;
-
- r = src->ops->dsi.enable_video_output(src, ddata->channel);
- if (r)
- goto err;
-
- ddata->enabled = true;
-
- if (!ddata->intro_printed) {
- dev_info(&ddata->pdev->dev, "panel revision %02x.%02x.%02x\n",
- id1, id2, id3);
- ddata->intro_printed = true;
- }
-
- src->ops->dsi.enable_hs(src, ddata->channel, true);
-
- return 0;
-err:
- dev_err(&ddata->pdev->dev, "error while enabling panel, issuing HW reset\n");
-
- dsicm_hw_reset(ddata);
-
- src->ops->dsi.disable(src, true, false);
-err_vddi:
- if (ddata->vddi)
- regulator_disable(ddata->vddi);
-err_vpnl:
- if (ddata->vpnl)
- regulator_disable(ddata->vpnl);
-
- return r;
-}
-
-static void dsicm_power_off(struct panel_drv_data *ddata)
-{
- struct omap_dss_device *src = ddata->src;
- int r;
-
- src->ops->dsi.disable_video_output(src, ddata->channel);
-
- r = dsicm_dcs_write_0(ddata, MIPI_DCS_SET_DISPLAY_OFF);
- if (!r)
- r = dsicm_sleep_in(ddata);
-
- if (r) {
- dev_err(&ddata->pdev->dev,
- "error disabling panel, issuing HW reset\n");
- dsicm_hw_reset(ddata);
- }
-
- src->ops->dsi.disable(src, true, false);
-
- if (ddata->vddi)
- regulator_disable(ddata->vddi);
- if (ddata->vpnl)
- regulator_disable(ddata->vpnl);
-
- ddata->enabled = false;
-}
-
-static int dsicm_panel_reset(struct panel_drv_data *ddata)
-{
- dev_err(&ddata->pdev->dev, "performing LCD reset\n");
-
- dsicm_power_off(ddata);
- dsicm_hw_reset(ddata);
- return dsicm_power_on(ddata);
-}
-
-static int dsicm_connect(struct omap_dss_device *src,
- struct omap_dss_device *dst)
-{
- struct panel_drv_data *ddata = to_panel_data(dst);
- struct device *dev = &ddata->pdev->dev;
- int r;
-
- r = src->ops->dsi.request_vc(src, &ddata->channel);
- if (r) {
- dev_err(dev, "failed to get virtual channel\n");
- return r;
- }
-
- r = src->ops->dsi.set_vc_id(src, ddata->channel, TCH);
- if (r) {
- dev_err(dev, "failed to set VC_ID\n");
- src->ops->dsi.release_vc(src, ddata->channel);
- return r;
- }
-
- ddata->src = src;
- return 0;
-}
-
-static void dsicm_disconnect(struct omap_dss_device *src,
- struct omap_dss_device *dst)
-{
- struct panel_drv_data *ddata = to_panel_data(dst);
-
- src->ops->dsi.release_vc(src, ddata->channel);
- ddata->src = NULL;
-}
-
-static void dsicm_enable(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *src = ddata->src;
- int r;
-
- mutex_lock(&ddata->lock);
-
- src->ops->dsi.bus_lock(src);
-
- r = dsicm_power_on(ddata);
-
- src->ops->dsi.bus_unlock(src);
-
- if (r)
- goto err;
-
- mutex_unlock(&ddata->lock);
-
- dsicm_bl_power(ddata, true);
-
- return;
-err:
- dev_dbg(&ddata->pdev->dev, "enable failed (%d)\n", r);
- mutex_unlock(&ddata->lock);
-}
-
-static void dsicm_disable(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *src = ddata->src;
- int r;
-
- dsicm_bl_power(ddata, false);
-
- mutex_lock(&ddata->lock);
-
- dsicm_cancel_ulps_work(ddata);
-
- src->ops->dsi.bus_lock(src);
-
- r = dsicm_wake_up(ddata);
- if (!r)
- dsicm_power_off(ddata);
-
- src->ops->dsi.bus_unlock(src);
-
- mutex_unlock(&ddata->lock);
-}
-
-static void dsicm_framedone_cb(int err, void *data)
-{
- struct panel_drv_data *ddata = data;
- struct omap_dss_device *src = ddata->src;
-
- dev_dbg(&ddata->pdev->dev, "framedone, err %d\n", err);
- src->ops->dsi.bus_unlock(src);
-}
-
-static irqreturn_t dsicm_te_isr(int irq, void *data)
-{
- struct panel_drv_data *ddata = data;
- struct omap_dss_device *src = ddata->src;
- int old;
- int r;
-
- old = atomic_cmpxchg(&ddata->do_update, 1, 0);
-
- if (old) {
- cancel_delayed_work(&ddata->te_timeout_work);
-
- r = src->ops->dsi.update(src, ddata->channel, dsicm_framedone_cb,
- ddata);
- if (r)
- goto err;
- }
-
- return IRQ_HANDLED;
-err:
- dev_err(&ddata->pdev->dev, "start update failed\n");
- src->ops->dsi.bus_unlock(src);
- return IRQ_HANDLED;
-}
-
-static void dsicm_te_timeout_work_callback(struct work_struct *work)
-{
- struct panel_drv_data *ddata = container_of(work, struct panel_drv_data,
- te_timeout_work.work);
- struct omap_dss_device *src = ddata->src;
-
- dev_err(&ddata->pdev->dev, "TE not received for 250ms!\n");
-
- atomic_set(&ddata->do_update, 0);
- src->ops->dsi.bus_unlock(src);
-}
-
-static int dsicm_update(struct omap_dss_device *dssdev,
- u16 x, u16 y, u16 w, u16 h)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *src = ddata->src;
- int r;
-
- dev_dbg(&ddata->pdev->dev, "update %d, %d, %d x %d\n", x, y, w, h);
-
- mutex_lock(&ddata->lock);
- src->ops->dsi.bus_lock(src);
-
- r = dsicm_wake_up(ddata);
- if (r)
- goto err;
-
- if (!ddata->enabled) {
- r = 0;
- goto err;
- }
-
- /* XXX no need to send this every frame, but dsi break if not done */
- r = dsicm_set_update_window(ddata, 0, 0, ddata->vm.hactive,
- ddata->vm.vactive);
- if (r)
- goto err;
-
- if (ddata->te_enabled && ddata->ext_te_gpio) {
- schedule_delayed_work(&ddata->te_timeout_work,
- msecs_to_jiffies(250));
- atomic_set(&ddata->do_update, 1);
- } else {
- r = src->ops->dsi.update(src, ddata->channel, dsicm_framedone_cb,
- ddata);
- if (r)
- goto err;
- }
-
- /* note: no bus_unlock here. unlock is src framedone_cb */
- mutex_unlock(&ddata->lock);
- return 0;
-err:
- src->ops->dsi.bus_unlock(src);
- mutex_unlock(&ddata->lock);
- return r;
-}
-
-static int dsicm_sync(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *src = ddata->src;
-
- dev_dbg(&ddata->pdev->dev, "sync\n");
-
- mutex_lock(&ddata->lock);
- src->ops->dsi.bus_lock(src);
- src->ops->dsi.bus_unlock(src);
- mutex_unlock(&ddata->lock);
-
- dev_dbg(&ddata->pdev->dev, "sync done\n");
-
- return 0;
-}
-
-static int _dsicm_enable_te(struct panel_drv_data *ddata, bool enable)
-{
- struct omap_dss_device *src = ddata->src;
- int r;
-
- if (enable)
- r = dsicm_dcs_write_1(ddata, MIPI_DCS_SET_TEAR_ON, 0);
- else
- r = dsicm_dcs_write_0(ddata, MIPI_DCS_SET_TEAR_OFF);
-
- if (!ddata->ext_te_gpio)
- src->ops->dsi.enable_te(src, enable);
-
- /* possible panel bug */
- msleep(100);
-
- return r;
-}
-
-static int dsicm_enable_te(struct omap_dss_device *dssdev, bool enable)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *src = ddata->src;
- int r;
-
- mutex_lock(&ddata->lock);
-
- if (ddata->te_enabled == enable)
- goto end;
-
- src->ops->dsi.bus_lock(src);
-
- if (ddata->enabled) {
- r = dsicm_wake_up(ddata);
- if (r)
- goto err;
-
- r = _dsicm_enable_te(ddata, enable);
- if (r)
- goto err;
- }
-
- ddata->te_enabled = enable;
-
- src->ops->dsi.bus_unlock(src);
-end:
- mutex_unlock(&ddata->lock);
-
- return 0;
-err:
- src->ops->dsi.bus_unlock(src);
- mutex_unlock(&ddata->lock);
-
- return r;
-}
-
-static int dsicm_get_te(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- int r;
-
- mutex_lock(&ddata->lock);
- r = ddata->te_enabled;
- mutex_unlock(&ddata->lock);
-
- return r;
-}
-
-static int dsicm_memory_read(struct omap_dss_device *dssdev,
- void *buf, size_t size,
- u16 x, u16 y, u16 w, u16 h)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *src = ddata->src;
- int r;
- int first = 1;
- int plen;
- unsigned int buf_used = 0;
-
- if (size < w * h * 3)
- return -ENOMEM;
-
- mutex_lock(&ddata->lock);
-
- if (!ddata->enabled) {
- r = -ENODEV;
- goto err1;
- }
-
- size = min((u32)w * h * 3,
- ddata->vm.hactive * ddata->vm.vactive * 3);
-
- src->ops->dsi.bus_lock(src);
-
- r = dsicm_wake_up(ddata);
- if (r)
- goto err2;
-
- /* plen 1 or 2 goes into short packet. until checksum error is fixed,
- * use short packets. plen 32 works, but bigger packets seem to cause
- * an error. */
- if (size % 2)
- plen = 1;
- else
- plen = 2;
-
- dsicm_set_update_window(ddata, x, y, w, h);
-
- r = src->ops->dsi.set_max_rx_packet_size(src, ddata->channel, plen);
- if (r)
- goto err2;
-
- while (buf_used < size) {
- u8 dcs_cmd = first ? 0x2e : 0x3e;
- first = 0;
-
- r = src->ops->dsi.dcs_read(src, ddata->channel, dcs_cmd,
- buf + buf_used, size - buf_used);
-
- if (r < 0) {
- dev_err(dssdev->dev, "read error\n");
- goto err3;
- }
-
- buf_used += r;
-
- if (r < plen) {
- dev_err(&ddata->pdev->dev, "short read\n");
- break;
- }
-
- if (signal_pending(current)) {
- dev_err(&ddata->pdev->dev, "signal pending, "
- "aborting memory read\n");
- r = -ERESTARTSYS;
- goto err3;
- }
- }
-
- r = buf_used;
-
-err3:
- src->ops->dsi.set_max_rx_packet_size(src, ddata->channel, 1);
-err2:
- src->ops->dsi.bus_unlock(src);
-err1:
- mutex_unlock(&ddata->lock);
- return r;
-}
-
-static void dsicm_ulps_work(struct work_struct *work)
-{
- struct panel_drv_data *ddata = container_of(work, struct panel_drv_data,
- ulps_work.work);
- struct omap_dss_device *dssdev = &ddata->dssdev;
- struct omap_dss_device *src = ddata->src;
-
- mutex_lock(&ddata->lock);
-
- if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE || !ddata->enabled) {
- mutex_unlock(&ddata->lock);
- return;
- }
-
- src->ops->dsi.bus_lock(src);
-
- dsicm_enter_ulps(ddata);
-
- src->ops->dsi.bus_unlock(src);
- mutex_unlock(&ddata->lock);
-}
-
-static int dsicm_get_modes(struct omap_dss_device *dssdev,
- struct drm_connector *connector)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
-
- connector->display_info.width_mm = ddata->width_mm;
- connector->display_info.height_mm = ddata->height_mm;
-
- return omapdss_display_get_modes(connector, &ddata->vm);
-}
-
-static int dsicm_check_timings(struct omap_dss_device *dssdev,
- struct drm_display_mode *mode)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- int ret = 0;
-
- if (mode->hdisplay != ddata->vm.hactive)
- ret = -EINVAL;
-
- if (mode->vdisplay != ddata->vm.vactive)
- ret = -EINVAL;
-
- if (ret) {
- dev_warn(dssdev->dev, "wrong resolution: %d x %d",
- mode->hdisplay, mode->vdisplay);
- dev_warn(dssdev->dev, "panel resolution: %d x %d",
- ddata->vm.hactive, ddata->vm.vactive);
- }
-
- return ret;
-}
-
-static const struct omap_dss_device_ops dsicm_ops = {
- .connect = dsicm_connect,
- .disconnect = dsicm_disconnect,
-
- .enable = dsicm_enable,
- .disable = dsicm_disable,
-
- .get_modes = dsicm_get_modes,
- .check_timings = dsicm_check_timings,
-};
-
-static const struct omap_dss_driver dsicm_dss_driver = {
- .update = dsicm_update,
- .sync = dsicm_sync,
-
- .enable_te = dsicm_enable_te,
- .get_te = dsicm_get_te,
-
- .memory_read = dsicm_memory_read,
-};
-
-static int dsicm_probe_of(struct platform_device *pdev)
-{
- struct device_node *node = pdev->dev.of_node;
- struct backlight_device *backlight;
- struct panel_drv_data *ddata = platform_get_drvdata(pdev);
- struct display_timing timing;
- int err;
-
- ddata->reset_gpio = devm_gpiod_get(&pdev->dev, "reset", GPIOD_OUT_LOW);
- if (IS_ERR(ddata->reset_gpio)) {
- err = PTR_ERR(ddata->reset_gpio);
- dev_err(&pdev->dev, "reset gpio request failed: %d", err);
- return err;
- }
-
- ddata->ext_te_gpio = devm_gpiod_get_optional(&pdev->dev, "te",
- GPIOD_IN);
- if (IS_ERR(ddata->ext_te_gpio)) {
- err = PTR_ERR(ddata->ext_te_gpio);
- dev_err(&pdev->dev, "TE gpio request failed: %d", err);
- return err;
- }
-
- err = of_get_display_timing(node, "panel-timing", &timing);
- if (!err) {
- videomode_from_timing(&timing, &ddata->vm);
- if (!ddata->vm.pixelclock)
- ddata->vm.pixelclock =
- ddata->vm.hactive * ddata->vm.vactive * 60;
- } else {
- dev_warn(&pdev->dev,
- "failed to get video timing, using defaults\n");
- }
-
- ddata->width_mm = 0;
- of_property_read_u32(node, "width-mm", &ddata->width_mm);
-
- ddata->height_mm = 0;
- of_property_read_u32(node, "height-mm", &ddata->height_mm);
-
- ddata->vpnl = devm_regulator_get_optional(&pdev->dev, "vpnl");
- if (IS_ERR(ddata->vpnl)) {
- err = PTR_ERR(ddata->vpnl);
- if (err == -EPROBE_DEFER)
- return err;
- ddata->vpnl = NULL;
- }
-
- ddata->vddi = devm_regulator_get_optional(&pdev->dev, "vddi");
- if (IS_ERR(ddata->vddi)) {
- err = PTR_ERR(ddata->vddi);
- if (err == -EPROBE_DEFER)
- return err;
- ddata->vddi = NULL;
- }
-
- backlight = devm_of_find_backlight(&pdev->dev);
- if (IS_ERR(backlight))
- return PTR_ERR(backlight);
-
- /* If no backlight device is found assume native backlight support */
- if (backlight)
- ddata->extbldev = backlight;
- else
- ddata->use_dsi_backlight = true;
-
- /* TODO: ulps */
-
- return 0;
-}
-
-static int dsicm_probe(struct platform_device *pdev)
-{
- struct panel_drv_data *ddata;
- struct backlight_device *bldev = NULL;
- struct device *dev = &pdev->dev;
- struct omap_dss_device *dssdev;
- int r;
-
- dev_dbg(dev, "probe\n");
-
- ddata = devm_kzalloc(dev, sizeof(*ddata), GFP_KERNEL);
- if (!ddata)
- return -ENOMEM;
-
- platform_set_drvdata(pdev, ddata);
- ddata->pdev = pdev;
-
- ddata->vm.hactive = 864;
- ddata->vm.vactive = 480;
- ddata->vm.pixelclock = 864 * 480 * 60;
-
- r = dsicm_probe_of(pdev);
- if (r)
- return r;
-
- dssdev = &ddata->dssdev;
- dssdev->dev = dev;
- dssdev->ops = &dsicm_ops;
- dssdev->driver = &dsicm_dss_driver;
- dssdev->type = OMAP_DISPLAY_TYPE_DSI;
- dssdev->display = true;
- dssdev->owner = THIS_MODULE;
- dssdev->of_port = 0;
- dssdev->ops_flags = OMAP_DSS_DEVICE_OP_MODES;
-
- dssdev->caps = OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE |
- OMAP_DSS_DISPLAY_CAP_TEAR_ELIM;
-
- omapdss_display_init(dssdev);
- omapdss_device_register(dssdev);
-
- mutex_init(&ddata->lock);
-
- atomic_set(&ddata->do_update, 0);
-
- if (ddata->ext_te_gpio) {
- r = devm_request_irq(dev, gpiod_to_irq(ddata->ext_te_gpio),
- dsicm_te_isr,
- IRQF_TRIGGER_RISING,
- "taal vsync", ddata);
-
- if (r) {
- dev_err(dev, "IRQ request failed\n");
- goto err_reg;
- }
-
- INIT_DEFERRABLE_WORK(&ddata->te_timeout_work,
- dsicm_te_timeout_work_callback);
-
- dev_dbg(dev, "Using GPIO TE\n");
- }
-
- ddata->workqueue = create_singlethread_workqueue("dsicm_wq");
- if (!ddata->workqueue) {
- r = -ENOMEM;
- goto err_reg;
- }
- INIT_DELAYED_WORK(&ddata->ulps_work, dsicm_ulps_work);
-
- dsicm_hw_reset(ddata);
-
- if (ddata->use_dsi_backlight) {
- struct backlight_properties props = { 0 };
- props.max_brightness = 255;
- props.type = BACKLIGHT_RAW;
-
- bldev = devm_backlight_device_register(dev, dev_name(dev),
- dev, ddata, &dsicm_bl_ops, &props);
- if (IS_ERR(bldev)) {
- r = PTR_ERR(bldev);
- goto err_bl;
- }
-
- ddata->bldev = bldev;
- }
-
- r = sysfs_create_group(&dev->kobj, &dsicm_attr_group);
- if (r) {
- dev_err(dev, "failed to create sysfs files\n");
- goto err_bl;
- }
-
- return 0;
-
-err_bl:
- destroy_workqueue(ddata->workqueue);
-err_reg:
- if (ddata->extbldev)
- put_device(&ddata->extbldev->dev);
-
- return r;
-}
-
-static int __exit dsicm_remove(struct platform_device *pdev)
-{
- struct panel_drv_data *ddata = platform_get_drvdata(pdev);
- struct omap_dss_device *dssdev = &ddata->dssdev;
-
- dev_dbg(&pdev->dev, "remove\n");
-
- omapdss_device_unregister(dssdev);
-
- if (omapdss_device_is_enabled(dssdev))
- dsicm_disable(dssdev);
- omapdss_device_disconnect(ddata->src, dssdev);
-
- sysfs_remove_group(&pdev->dev.kobj, &dsicm_attr_group);
-
- if (ddata->extbldev)
- put_device(&ddata->extbldev->dev);
-
- dsicm_cancel_ulps_work(ddata);
- destroy_workqueue(ddata->workqueue);
-
- /* reset, to be sure that the panel is in a valid state */
- dsicm_hw_reset(ddata);
-
- return 0;
-}
-
-static const struct of_device_id dsicm_of_match[] = {
- { .compatible = "omapdss,panel-dsi-cm", },
- {},
-};
-
-MODULE_DEVICE_TABLE(of, dsicm_of_match);
-
-static struct platform_driver dsicm_driver = {
- .probe = dsicm_probe,
- .remove = __exit_p(dsicm_remove),
- .driver = {
- .name = "panel-dsi-cm",
- .of_match_table = dsicm_of_match,
- .suppress_bind_attrs = true,
- },
-};
-
-module_platform_driver(dsicm_driver);
-
-MODULE_AUTHOR("Tomi Valkeinen <[email protected]>");
-MODULE_DESCRIPTION("Generic DSI Command Mode Panel Driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/omapdrm/dss/Kconfig b/drivers/gpu/drm/omapdrm/dss/Kconfig
deleted file mode 100644
index e11b258a2294..000000000000
--- a/drivers/gpu/drm/omapdrm/dss/Kconfig
+++ /dev/null
@@ -1,135 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-config OMAP2_DSS_INIT
- bool
-
-config OMAP_DSS_BASE
- tristate
-
-menuconfig OMAP2_DSS
- tristate "OMAP2+ Display Subsystem support"
- select OMAP_DSS_BASE
- select VIDEOMODE_HELPERS
- select OMAP2_DSS_INIT
- select HDMI
- help
- OMAP2+ Display Subsystem support.
-
-if OMAP2_DSS
-
-config OMAP2_DSS_DEBUG
- bool "Debug support"
- default n
- help
- This enables printing of debug messages. Alternatively, debug messages
- can also be enabled by setting CONFIG_DYNAMIC_DEBUG and then setting
- appropriate flags in <debugfs>/dynamic_debug/control.
-
-config OMAP2_DSS_DEBUGFS
- bool "Debugfs filesystem support"
- depends on DEBUG_FS
- default n
- help
- This enables debugfs for OMAPDSS at <debugfs>/omapdss. This enables
- querying about clock configuration and register configuration of dss,
- dispc, dsi, hdmi and rfbi.
-
-config OMAP2_DSS_COLLECT_IRQ_STATS
- bool "Collect DSS IRQ statistics"
- depends on OMAP2_DSS_DEBUGFS
- default n
- help
- Collect DSS IRQ statistics, printable via debugfs.
-
- The statistics can be found from
- <debugfs>/omapdss/dispc_irq for DISPC interrupts, and
- <debugfs>/omapdss/dsi_irq for DSI interrupts.
-
-config OMAP2_DSS_DPI
- bool "DPI support"
- default y
- help
- DPI Interface. This is the Parallel Display Interface.
-
-config OMAP2_DSS_VENC
- bool "VENC support"
- default y
- help
- OMAP Video Encoder support for S-Video and composite TV-out.
-
-config OMAP2_DSS_HDMI_COMMON
- bool
-
-config OMAP4_DSS_HDMI
- bool "HDMI support for OMAP4"
- default y
- select OMAP2_DSS_HDMI_COMMON
- help
- HDMI support for OMAP4 based SoCs.
-
-config OMAP4_DSS_HDMI_CEC
- bool "Enable HDMI CEC support for OMAP4"
- depends on OMAP4_DSS_HDMI
- select CEC_CORE
- default y
- help
- When selected the HDMI transmitter will support the CEC feature.
-
-config OMAP5_DSS_HDMI
- bool "HDMI support for OMAP5"
- default n
- select OMAP2_DSS_HDMI_COMMON
- help
- HDMI Interface for OMAP5 and similar cores. This adds the High
- Definition Multimedia Interface. See https://www.hdmi.org/ for HDMI
- specification.
-
-config OMAP2_DSS_SDI
- bool "SDI support"
- default n
- help
- SDI (Serial Display Interface) support.
-
- SDI is a high speed one-way display serial bus between the host
- processor and a display.
-
-config OMAP2_DSS_DSI
- bool "DSI support"
- default n
- help
- MIPI DSI (Display Serial Interface) support.
-
- DSI is a high speed half-duplex serial interface between the host
- processor and a peripheral, such as a display or a framebuffer chip.
-
- See https://www.mipi.org/ for DSI specifications.
-
-config OMAP2_DSS_MIN_FCK_PER_PCK
- int "Minimum FCK/PCK ratio (for scaling)"
- range 0 32
- default 0
- help
- This can be used to adjust the minimum FCK/PCK ratio.
-
- With this you can make sure that DISPC FCK is at least
- n x PCK. Video plane scaling requires higher FCK than
- normally.
-
- If this is set to 0, there's no extra constraint on the
- DISPC FCK. However, the FCK will at minimum be
- 2xPCK (if active matrix) or 3xPCK (if passive matrix).
-
- Max FCK is 173MHz, so this doesn't work if your PCK
- is very high.
-
-config OMAP2_DSS_SLEEP_AFTER_VENC_RESET
- bool "Sleep 20ms after VENC reset"
- default y
- help
- There is a 20ms sleep after VENC reset which seemed to fix the
- reset. The reason for the bug is unclear, and it's also unclear
- on what platforms this happens.
-
- This option enables the sleep, and is enabled by default. You can
- disable the sleep if it doesn't cause problems on your platform.
-
-endif
diff --git a/drivers/gpu/drm/omapdrm/dss/Makefile b/drivers/gpu/drm/omapdrm/dss/Makefile
deleted file mode 100644
index f967e6948f2e..000000000000
--- a/drivers/gpu/drm/omapdrm/dss/Makefile
+++ /dev/null
@@ -1,20 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_OMAP2_DSS_INIT) += omapdss-boot-init.o
-
-obj-$(CONFIG_OMAP_DSS_BASE) += omapdss-base.o
-omapdss-base-y := base.o display.o output.o
-
-obj-$(CONFIG_OMAP2_DSS) += omapdss.o
-# Core DSS files
-omapdss-y := dss.o dispc.o dispc_coefs.o \
- pll.o video-pll.o
-omapdss-$(CONFIG_OMAP2_DSS_DPI) += dpi.o
-omapdss-$(CONFIG_OMAP2_DSS_VENC) += venc.o
-omapdss-$(CONFIG_OMAP2_DSS_SDI) += sdi.o
-omapdss-$(CONFIG_OMAP2_DSS_DSI) += dsi.o
-omapdss-$(CONFIG_OMAP2_DSS_HDMI_COMMON) += hdmi_common.o hdmi_wp.o hdmi_pll.o \
- hdmi_phy.o
-omapdss-$(CONFIG_OMAP4_DSS_HDMI) += hdmi4.o hdmi4_core.o
-omapdss-$(CONFIG_OMAP4_DSS_HDMI_CEC) += hdmi4_cec.o
-omapdss-$(CONFIG_OMAP5_DSS_HDMI) += hdmi5.o hdmi5_core.o
-ccflags-$(CONFIG_OMAP2_DSS_DEBUG) += -DDEBUG
diff --git a/drivers/gpu/drm/omapdrm/dss/base.c b/drivers/gpu/drm/omapdrm/dss/base.c
index cf50430e6363..050ca7eafac5 100644
--- a/drivers/gpu/drm/omapdrm/dss/base.c
+++ b/drivers/gpu/drm/omapdrm/dss/base.c
@@ -16,32 +16,10 @@
#include "dss.h"
#include "omapdss.h"
-static struct dss_device *dss_device;
-
-struct dss_device *omapdss_get_dss(void)
-{
- return dss_device;
-}
-EXPORT_SYMBOL(omapdss_get_dss);
-
-void omapdss_set_dss(struct dss_device *dss)
-{
- dss_device = dss;
-}
-EXPORT_SYMBOL(omapdss_set_dss);
-
struct dispc_device *dispc_get_dispc(struct dss_device *dss)
{
return dss->dispc;
}
-EXPORT_SYMBOL(dispc_get_dispc);
-
-const struct dispc_ops *dispc_get_ops(struct dss_device *dss)
-{
- return dss->dispc_ops;
-}
-EXPORT_SYMBOL(dispc_get_ops);
-
/* -----------------------------------------------------------------------------
* OMAP DSS Devices Handling
@@ -56,7 +34,6 @@ void omapdss_device_register(struct omap_dss_device *dssdev)
list_add_tail(&dssdev->list, &omapdss_devices_list);
mutex_unlock(&omapdss_devices_lock);
}
-EXPORT_SYMBOL_GPL(omapdss_device_register);
void omapdss_device_unregister(struct omap_dss_device *dssdev)
{
@@ -64,7 +41,6 @@ void omapdss_device_unregister(struct omap_dss_device *dssdev)
list_del(&dssdev->list);
mutex_unlock(&omapdss_devices_lock);
}
-EXPORT_SYMBOL_GPL(omapdss_device_unregister);
static bool omapdss_device_is_registered(struct device_node *node)
{
@@ -86,24 +62,16 @@ static bool omapdss_device_is_registered(struct device_node *node)
struct omap_dss_device *omapdss_device_get(struct omap_dss_device *dssdev)
{
- if (!try_module_get(dssdev->owner))
- return NULL;
-
- if (get_device(dssdev->dev) == NULL) {
- module_put(dssdev->owner);
+ if (get_device(dssdev->dev) == NULL)
return NULL;
- }
return dssdev;
}
-EXPORT_SYMBOL(omapdss_device_get);
void omapdss_device_put(struct omap_dss_device *dssdev)
{
put_device(dssdev->dev);
- module_put(dssdev->owner);
}
-EXPORT_SYMBOL(omapdss_device_put);
struct omap_dss_device *omapdss_find_device_by_node(struct device_node *node)
{
@@ -149,7 +117,7 @@ struct omap_dss_device *omapdss_device_next_output(struct omap_dss_device *from)
goto done;
}
- if (dssdev->id && (dssdev->next || dssdev->bridge))
+ if (dssdev->id && dssdev->bridge)
goto done;
}
@@ -164,7 +132,6 @@ done:
mutex_unlock(&omapdss_devices_lock);
return dssdev;
}
-EXPORT_SYMBOL(omapdss_device_next_output);
static bool omapdss_device_is_connected(struct omap_dss_device *dssdev)
{
@@ -175,8 +142,6 @@ int omapdss_device_connect(struct dss_device *dss,
struct omap_dss_device *src,
struct omap_dss_device *dst)
{
- int ret;
-
dev_dbg(&dss->pdev->dev, "connect(%s, %s)\n",
src ? dev_name(src->dev) : "NULL",
dst ? dev_name(dst->dev) : "NULL");
@@ -195,17 +160,8 @@ int omapdss_device_connect(struct dss_device *dss,
dst->dss = dss;
- if (dst->ops && dst->ops->connect) {
- ret = dst->ops->connect(src, dst);
- if (ret < 0) {
- dst->dss = NULL;
- return ret;
- }
- }
-
return 0;
}
-EXPORT_SYMBOL_GPL(omapdss_device_connect);
void omapdss_device_disconnect(struct omap_dss_device *src,
struct omap_dss_device *dst)
@@ -222,43 +178,12 @@ void omapdss_device_disconnect(struct omap_dss_device *src,
}
if (!dst->id && !omapdss_device_is_connected(dst)) {
- WARN_ON(!dst->display);
+ WARN_ON(1);
return;
}
- WARN_ON(dst->state != OMAP_DSS_DISPLAY_DISABLED);
-
- if (dst->ops && dst->ops->disconnect)
- dst->ops->disconnect(src, dst);
dst->dss = NULL;
}
-EXPORT_SYMBOL_GPL(omapdss_device_disconnect);
-
-void omapdss_device_enable(struct omap_dss_device *dssdev)
-{
- if (!dssdev)
- return;
-
- if (dssdev->ops && dssdev->ops->enable)
- dssdev->ops->enable(dssdev);
-
- omapdss_device_enable(dssdev->next);
-
- dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
-}
-EXPORT_SYMBOL_GPL(omapdss_device_enable);
-
-void omapdss_device_disable(struct omap_dss_device *dssdev)
-{
- if (!dssdev)
- return;
-
- omapdss_device_disable(dssdev->next);
-
- if (dssdev->ops && dssdev->ops->disable)
- dssdev->ops->disable(dssdev);
-}
-EXPORT_SYMBOL_GPL(omapdss_device_disable);
/* -----------------------------------------------------------------------------
* Components Handling
@@ -344,7 +269,6 @@ void omapdss_gather_components(struct device *dev)
for_each_available_child_of_node(dev->of_node, child)
omapdss_walk_device(dev, child, true);
}
-EXPORT_SYMBOL(omapdss_gather_components);
static bool omapdss_component_is_loaded(struct omapdss_comp_node *comp)
{
@@ -369,8 +293,3 @@ bool omapdss_stack_is_ready(void)
return true;
}
-EXPORT_SYMBOL(omapdss_stack_is_ready);
-
-MODULE_AUTHOR("Tomi Valkeinen <[email protected]>");
-MODULE_DESCRIPTION("OMAP Display Subsystem Base");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/omapdrm/dss/dispc.c b/drivers/gpu/drm/omapdrm/dss/dispc.c
index 599183879caf..f4cbef8ccace 100644
--- a/drivers/gpu/drm/omapdrm/dss/dispc.c
+++ b/drivers/gpu/drm/omapdrm/dss/dispc.c
@@ -351,8 +351,6 @@ static unsigned long dispc_plane_pclk_rate(struct dispc_device *dispc,
static unsigned long dispc_plane_lclk_rate(struct dispc_device *dispc,
enum omap_plane_id plane);
-static void dispc_clear_irqstatus(struct dispc_device *dispc, u32 mask);
-
static inline void dispc_write_reg(struct dispc_device *dispc, u16 idx, u32 val)
{
__raw_writel(val, dispc->base + idx);
@@ -379,12 +377,12 @@ static void mgr_fld_write(struct dispc_device *dispc, enum omap_channel channel,
REG_FLD_MOD(dispc, rfld->reg, val, rfld->high, rfld->low);
}
-static int dispc_get_num_ovls(struct dispc_device *dispc)
+int dispc_get_num_ovls(struct dispc_device *dispc)
{
return dispc->feat->num_ovls;
}
-static int dispc_get_num_mgrs(struct dispc_device *dispc)
+int dispc_get_num_mgrs(struct dispc_device *dispc)
{
return dispc->feat->num_mgrs;
}
@@ -670,13 +668,13 @@ void dispc_runtime_put(struct dispc_device *dispc)
WARN_ON(r < 0 && r != -ENOSYS);
}
-static u32 dispc_mgr_get_vsync_irq(struct dispc_device *dispc,
+u32 dispc_mgr_get_vsync_irq(struct dispc_device *dispc,
enum omap_channel channel)
{
return mgr_desc[channel].vsync_irq;
}
-static u32 dispc_mgr_get_framedone_irq(struct dispc_device *dispc,
+u32 dispc_mgr_get_framedone_irq(struct dispc_device *dispc,
enum omap_channel channel)
{
if (channel == OMAP_DSS_CHANNEL_DIGIT && dispc->feat->no_framedone_tv)
@@ -685,18 +683,18 @@ static u32 dispc_mgr_get_framedone_irq(struct dispc_device *dispc,
return mgr_desc[channel].framedone_irq;
}
-static u32 dispc_mgr_get_sync_lost_irq(struct dispc_device *dispc,
+u32 dispc_mgr_get_sync_lost_irq(struct dispc_device *dispc,
enum omap_channel channel)
{
return mgr_desc[channel].sync_lost_irq;
}
-static u32 dispc_wb_get_framedone_irq(struct dispc_device *dispc)
+u32 dispc_wb_get_framedone_irq(struct dispc_device *dispc)
{
return DISPC_IRQ_FRAMEDONEWB;
}
-static void dispc_mgr_enable(struct dispc_device *dispc,
+void dispc_mgr_enable(struct dispc_device *dispc,
enum omap_channel channel, bool enable)
{
mgr_fld_write(dispc, channel, DISPC_MGR_FLD_ENABLE, enable);
@@ -710,13 +708,13 @@ static bool dispc_mgr_is_enabled(struct dispc_device *dispc,
return !!mgr_fld_read(dispc, channel, DISPC_MGR_FLD_ENABLE);
}
-static bool dispc_mgr_go_busy(struct dispc_device *dispc,
+bool dispc_mgr_go_busy(struct dispc_device *dispc,
enum omap_channel channel)
{
return mgr_fld_read(dispc, channel, DISPC_MGR_FLD_GO) == 1;
}
-static void dispc_mgr_go(struct dispc_device *dispc, enum omap_channel channel)
+void dispc_mgr_go(struct dispc_device *dispc, enum omap_channel channel)
{
WARN_ON(!dispc_mgr_is_enabled(dispc, channel));
WARN_ON(dispc_mgr_go_busy(dispc, channel));
@@ -726,12 +724,12 @@ static void dispc_mgr_go(struct dispc_device *dispc, enum omap_channel channel)
mgr_fld_write(dispc, channel, DISPC_MGR_FLD_GO, 1);
}
-static bool dispc_wb_go_busy(struct dispc_device *dispc)
+bool dispc_wb_go_busy(struct dispc_device *dispc)
{
return REG_GET(dispc, DISPC_CONTROL2, 6, 6) == 1;
}
-static void dispc_wb_go(struct dispc_device *dispc)
+void dispc_wb_go(struct dispc_device *dispc)
{
enum omap_plane_id plane = OMAP_DSS_WB;
bool enable, go;
@@ -877,50 +875,62 @@ static void dispc_ovl_write_color_conv_coef(struct dispc_device *dispc,
#undef CVAL
}
-static void dispc_wb_write_color_conv_coef(struct dispc_device *dispc,
- const struct csc_coef_rgb2yuv *ct)
-{
- const enum omap_plane_id plane = OMAP_DSS_WB;
-
-#define CVAL(x, y) (FLD_VAL(x, 26, 16) | FLD_VAL(y, 10, 0))
+/* YUV -> RGB, ITU-R BT.601, full range */
+static const struct csc_coef_yuv2rgb coefs_yuv2rgb_bt601_full = {
+ 256, 0, 358, /* ry, rcb, rcr |1.000 0.000 1.402|*/
+ 256, -88, -182, /* gy, gcb, gcr |1.000 -0.344 -0.714|*/
+ 256, 452, 0, /* by, bcb, bcr |1.000 1.772 0.000|*/
+ true, /* full range */
+};
- dispc_write_reg(dispc, DISPC_OVL_CONV_COEF(plane, 0), CVAL(ct->yg, ct->yr));
- dispc_write_reg(dispc, DISPC_OVL_CONV_COEF(plane, 1), CVAL(ct->crr, ct->yb));
- dispc_write_reg(dispc, DISPC_OVL_CONV_COEF(plane, 2), CVAL(ct->crb, ct->crg));
- dispc_write_reg(dispc, DISPC_OVL_CONV_COEF(plane, 3), CVAL(ct->cbg, ct->cbr));
- dispc_write_reg(dispc, DISPC_OVL_CONV_COEF(plane, 4), CVAL(0, ct->cbb));
+/* YUV -> RGB, ITU-R BT.601, limited range */
+static const struct csc_coef_yuv2rgb coefs_yuv2rgb_bt601_lim = {
+ 298, 0, 409, /* ry, rcb, rcr |1.164 0.000 1.596|*/
+ 298, -100, -208, /* gy, gcb, gcr |1.164 -0.392 -0.813|*/
+ 298, 516, 0, /* by, bcb, bcr |1.164 2.017 0.000|*/
+ false, /* limited range */
+};
- REG_FLD_MOD(dispc, DISPC_OVL_ATTRIBUTES(plane), ct->full_range, 11, 11);
+/* YUV -> RGB, ITU-R BT.709, full range */
+static const struct csc_coef_yuv2rgb coefs_yuv2rgb_bt709_full = {
+ 256, 0, 402, /* ry, rcb, rcr |1.000 0.000 1.570|*/
+ 256, -48, -120, /* gy, gcb, gcr |1.000 -0.187 -0.467|*/
+ 256, 475, 0, /* by, bcb, bcr |1.000 1.856 0.000|*/
+ true, /* full range */
+};
-#undef CVAL
-}
+/* YUV -> RGB, ITU-R BT.709, limited range */
+static const struct csc_coef_yuv2rgb coefs_yuv2rgb_bt709_lim = {
+ 298, 0, 459, /* ry, rcb, rcr |1.164 0.000 1.793|*/
+ 298, -55, -136, /* gy, gcb, gcr |1.164 -0.213 -0.533|*/
+ 298, 541, 0, /* by, bcb, bcr |1.164 2.112 0.000|*/
+ false, /* limited range */
+};
-static void dispc_setup_color_conv_coef(struct dispc_device *dispc)
+static void dispc_ovl_set_csc(struct dispc_device *dispc,
+ enum omap_plane_id plane,
+ enum drm_color_encoding color_encoding,
+ enum drm_color_range color_range)
{
- int i;
- int num_ovl = dispc_get_num_ovls(dispc);
-
- /* YUV -> RGB, ITU-R BT.601, limited range */
- const struct csc_coef_yuv2rgb coefs_yuv2rgb_bt601_lim = {
- 298, 0, 409, /* ry, rcb, rcr */
- 298, -100, -208, /* gy, gcb, gcr */
- 298, 516, 0, /* by, bcb, bcr */
- false, /* limited range */
- };
+ const struct csc_coef_yuv2rgb *csc;
- /* RGB -> YUV, ITU-R BT.601, limited range */
- const struct csc_coef_rgb2yuv coefs_rgb2yuv_bt601_lim = {
- 66, 129, 25, /* yr, yg, yb */
- -38, -74, 112, /* cbr, cbg, cbb */
- 112, -94, -18, /* crr, crg, crb */
- false, /* limited range */
- };
-
- for (i = 1; i < num_ovl; i++)
- dispc_ovl_write_color_conv_coef(dispc, i, &coefs_yuv2rgb_bt601_lim);
+ switch (color_encoding) {
+ default:
+ case DRM_COLOR_YCBCR_BT601:
+ if (color_range == DRM_COLOR_YCBCR_FULL_RANGE)
+ csc = &coefs_yuv2rgb_bt601_full;
+ else
+ csc = &coefs_yuv2rgb_bt601_lim;
+ break;
+ case DRM_COLOR_YCBCR_BT709:
+ if (color_range == DRM_COLOR_YCBCR_FULL_RANGE)
+ csc = &coefs_yuv2rgb_bt709_full;
+ else
+ csc = &coefs_yuv2rgb_bt709_lim;
+ break;
+ }
- if (dispc->feat->has_writeback)
- dispc_wb_write_color_conv_coef(dispc, &coefs_rgb2yuv_bt601_lim);
+ dispc_ovl_write_color_conv_coef(dispc, plane, csc);
}
static void dispc_ovl_set_ba0(struct dispc_device *dispc,
@@ -1285,7 +1295,7 @@ static bool dispc_ovl_color_mode_supported(struct dispc_device *dispc,
return false;
}
-static const u32 *dispc_ovl_get_color_modes(struct dispc_device *dispc,
+const u32 *dispc_ovl_get_color_modes(struct dispc_device *dispc,
enum omap_plane_id plane)
{
return dispc->feat->supported_color_modes[plane];
@@ -2601,7 +2611,9 @@ static int dispc_ovl_setup_common(struct dispc_device *dispc,
u8 pre_mult_alpha, u8 global_alpha,
enum omap_dss_rotation_type rotation_type,
bool replication, const struct videomode *vm,
- bool mem_to_mem)
+ bool mem_to_mem,
+ enum drm_color_encoding color_encoding,
+ enum drm_color_range color_range)
{
bool five_taps = true;
bool fieldmode = false;
@@ -2750,6 +2762,9 @@ static int dispc_ovl_setup_common(struct dispc_device *dispc,
fieldmode, fourcc, rotation);
dispc_ovl_set_output_size(dispc, plane, out_width, out_height);
dispc_ovl_set_vid_color_conv(dispc, plane, cconv);
+
+ if (plane != OMAP_DSS_WB)
+ dispc_ovl_set_csc(dispc, plane, color_encoding, color_range);
}
dispc_ovl_set_rotation_attrs(dispc, plane, rotation, rotation_type,
@@ -2764,7 +2779,7 @@ static int dispc_ovl_setup_common(struct dispc_device *dispc,
return 0;
}
-static int dispc_ovl_setup(struct dispc_device *dispc,
+int dispc_ovl_setup(struct dispc_device *dispc,
enum omap_plane_id plane,
const struct omap_overlay_info *oi,
const struct videomode *vm, bool mem_to_mem,
@@ -2786,12 +2801,13 @@ static int dispc_ovl_setup(struct dispc_device *dispc,
oi->screen_width, oi->pos_x, oi->pos_y, oi->width, oi->height,
oi->out_width, oi->out_height, oi->fourcc, oi->rotation,
oi->zorder, oi->pre_mult_alpha, oi->global_alpha,
- oi->rotation_type, replication, vm, mem_to_mem);
+ oi->rotation_type, replication, vm, mem_to_mem,
+ oi->color_encoding, oi->color_range);
return r;
}
-static int dispc_wb_setup(struct dispc_device *dispc,
+int dispc_wb_setup(struct dispc_device *dispc,
const struct omap_dss_writeback_info *wi,
bool mem_to_mem, const struct videomode *vm,
enum dss_writeback_channel channel_in)
@@ -2819,7 +2835,8 @@ static int dispc_wb_setup(struct dispc_device *dispc,
wi->buf_width, pos_x, pos_y, in_width, in_height, wi->width,
wi->height, wi->fourcc, wi->rotation, zorder,
wi->pre_mult_alpha, global_alpha, wi->rotation_type,
- replication, vm, mem_to_mem);
+ replication, vm, mem_to_mem, DRM_COLOR_YCBCR_BT601,
+ DRM_COLOR_YCBCR_LIMITED_RANGE);
if (r)
return r;
@@ -2874,12 +2891,12 @@ static int dispc_wb_setup(struct dispc_device *dispc,
return 0;
}
-static bool dispc_has_writeback(struct dispc_device *dispc)
+bool dispc_has_writeback(struct dispc_device *dispc)
{
return dispc->feat->has_writeback;
}
-static int dispc_ovl_enable(struct dispc_device *dispc,
+int dispc_ovl_enable(struct dispc_device *dispc,
enum omap_plane_id plane, bool enable)
{
DSSDBG("dispc_enable_plane %d, %d\n", plane, enable);
@@ -2970,7 +2987,7 @@ static void dispc_mgr_enable_alpha_fixed_zorder(struct dispc_device *dispc,
REG_FLD_MOD(dispc, DISPC_CONFIG, enable, 19, 19);
}
-static void dispc_mgr_setup(struct dispc_device *dispc,
+void dispc_mgr_setup(struct dispc_device *dispc,
enum omap_channel channel,
const struct omap_overlay_manager_info *info)
{
@@ -3049,7 +3066,7 @@ static void dispc_mgr_enable_stallmode(struct dispc_device *dispc,
mgr_fld_write(dispc, channel, DISPC_MGR_FLD_STALLMODE, enable);
}
-static void dispc_mgr_set_lcd_config(struct dispc_device *dispc,
+void dispc_mgr_set_lcd_config(struct dispc_device *dispc,
enum omap_channel channel,
const struct dss_lcd_mgr_config *config)
{
@@ -3098,7 +3115,7 @@ static bool _dispc_mgr_pclk_ok(struct dispc_device *dispc,
return pclk <= dispc->feat->max_tv_pclk;
}
-static int dispc_mgr_check_timings(struct dispc_device *dispc,
+int dispc_mgr_check_timings(struct dispc_device *dispc,
enum omap_channel channel,
const struct videomode *vm)
{
@@ -3191,7 +3208,7 @@ static int vm_flag_to_int(enum display_flags flags, enum display_flags high,
}
/* change name to mode? */
-static void dispc_mgr_set_timings(struct dispc_device *dispc,
+void dispc_mgr_set_timings(struct dispc_device *dispc,
enum omap_channel channel,
const struct videomode *vm)
{
@@ -3735,17 +3752,17 @@ int dispc_mgr_get_clock_div(struct dispc_device *dispc,
return 0;
}
-static u32 dispc_read_irqstatus(struct dispc_device *dispc)
+u32 dispc_read_irqstatus(struct dispc_device *dispc)
{
return dispc_read_reg(dispc, DISPC_IRQSTATUS);
}
-static void dispc_clear_irqstatus(struct dispc_device *dispc, u32 mask)
+void dispc_clear_irqstatus(struct dispc_device *dispc, u32 mask)
{
dispc_write_reg(dispc, DISPC_IRQSTATUS, mask);
}
-static void dispc_write_irqenable(struct dispc_device *dispc, u32 mask)
+void dispc_write_irqenable(struct dispc_device *dispc, u32 mask)
{
u32 old_mask = dispc_read_reg(dispc, DISPC_IRQENABLE);
@@ -3769,7 +3786,7 @@ void dispc_disable_sidle(struct dispc_device *dispc)
REG_FLD_MOD(dispc, DISPC_SYSCONFIG, 1, 4, 3); /* SIDLEMODE: no idle */
}
-static u32 dispc_mgr_gamma_size(struct dispc_device *dispc,
+u32 dispc_mgr_gamma_size(struct dispc_device *dispc,
enum omap_channel channel)
{
const struct dispc_gamma_desc *gdesc = &mgr_desc[channel].gamma;
@@ -3824,7 +3841,7 @@ static const struct drm_color_lut dispc_mgr_gamma_default_lut[] = {
{ .red = U16_MAX, .green = U16_MAX, .blue = U16_MAX, },
};
-static void dispc_mgr_set_gamma(struct dispc_device *dispc,
+void dispc_mgr_set_gamma(struct dispc_device *dispc,
enum omap_channel channel,
const struct drm_color_lut *lut,
unsigned int length)
@@ -3930,8 +3947,6 @@ static void _omap_dispc_initial_config(struct dispc_device *dispc)
dispc->feat->has_gamma_table)
REG_FLD_MOD(dispc, DISPC_CONFIG, 1, 9, 9);
- dispc_setup_color_conv_coef(dispc);
-
dispc_set_loadmode(dispc, OMAP_DSS_LOAD_FRAME_ONLY);
dispc_init_fifos(dispc);
@@ -4482,7 +4497,7 @@ static irqreturn_t dispc_irq_handler(int irq, void *arg)
return dispc->user_handler(irq, dispc->user_data);
}
-static int dispc_request_irq(struct dispc_device *dispc, irq_handler_t handler,
+int dispc_request_irq(struct dispc_device *dispc, irq_handler_t handler,
void *dev_id)
{
int r;
@@ -4506,7 +4521,7 @@ static int dispc_request_irq(struct dispc_device *dispc, irq_handler_t handler,
return r;
}
-static void dispc_free_irq(struct dispc_device *dispc, void *dev_id)
+void dispc_free_irq(struct dispc_device *dispc, void *dev_id)
{
devm_free_irq(&dispc->pdev->dev, dispc->irq, dispc);
@@ -4514,7 +4529,7 @@ static void dispc_free_irq(struct dispc_device *dispc, void *dev_id)
dispc->user_data = NULL;
}
-static u32 dispc_get_memory_bandwidth_limit(struct dispc_device *dispc)
+u32 dispc_get_memory_bandwidth_limit(struct dispc_device *dispc)
{
u32 limit = 0;
@@ -4684,47 +4699,6 @@ static void dispc_errata_i734_wa(struct dispc_device *dispc)
REG_FLD_MOD(dispc, DISPC_CONFIG, gatestate, 8, 4);
}
-static const struct dispc_ops dispc_ops = {
- .read_irqstatus = dispc_read_irqstatus,
- .clear_irqstatus = dispc_clear_irqstatus,
- .write_irqenable = dispc_write_irqenable,
-
- .request_irq = dispc_request_irq,
- .free_irq = dispc_free_irq,
-
- .runtime_get = dispc_runtime_get,
- .runtime_put = dispc_runtime_put,
-
- .get_num_ovls = dispc_get_num_ovls,
- .get_num_mgrs = dispc_get_num_mgrs,
-
- .get_memory_bandwidth_limit = dispc_get_memory_bandwidth_limit,
-
- .mgr_enable = dispc_mgr_enable,
- .mgr_is_enabled = dispc_mgr_is_enabled,
- .mgr_get_vsync_irq = dispc_mgr_get_vsync_irq,
- .mgr_get_framedone_irq = dispc_mgr_get_framedone_irq,
- .mgr_get_sync_lost_irq = dispc_mgr_get_sync_lost_irq,
- .mgr_go_busy = dispc_mgr_go_busy,
- .mgr_go = dispc_mgr_go,
- .mgr_set_lcd_config = dispc_mgr_set_lcd_config,
- .mgr_check_timings = dispc_mgr_check_timings,
- .mgr_set_timings = dispc_mgr_set_timings,
- .mgr_setup = dispc_mgr_setup,
- .mgr_gamma_size = dispc_mgr_gamma_size,
- .mgr_set_gamma = dispc_mgr_set_gamma,
-
- .ovl_enable = dispc_ovl_enable,
- .ovl_setup = dispc_ovl_setup,
- .ovl_get_color_modes = dispc_ovl_get_color_modes,
-
- .wb_get_framedone_irq = dispc_wb_get_framedone_irq,
- .wb_setup = dispc_wb_setup,
- .has_writeback = dispc_has_writeback,
- .wb_go_busy = dispc_wb_go_busy,
- .wb_go = dispc_wb_go,
-};
-
/* DISPC HW IP initialisation */
static const struct of_device_id dispc_of_match[] = {
{ .compatible = "ti,omap2-dispc", .data = &omap24xx_dispc_feats },
@@ -4826,7 +4800,6 @@ static int dispc_bind(struct device *dev, struct device *master, void *data)
dispc_runtime_put(dispc);
dss->dispc = dispc;
- dss->dispc_ops = &dispc_ops;
dispc->debugfs = dss_debugfs_create_file(dss, "dispc", dispc_dump_regs,
dispc);
@@ -4848,7 +4821,6 @@ static void dispc_unbind(struct device *dev, struct device *master, void *data)
dss_debugfs_remove_file(dispc->debugfs);
dss->dispc = NULL;
- dss->dispc_ops = NULL;
pm_runtime_disable(dev);
diff --git a/drivers/gpu/drm/omapdrm/dss/display.c b/drivers/gpu/drm/omapdrm/dss/display.c
deleted file mode 100644
index 3b82158b1bfd..000000000000
--- a/drivers/gpu/drm/omapdrm/dss/display.c
+++ /dev/null
@@ -1,60 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2009 Nokia Corporation
- * Author: Tomi Valkeinen <[email protected]>
- *
- * Some code and ideas taken from drivers/video/omap/ driver
- * by Imre Deak.
- */
-
-#define DSS_SUBSYS_NAME "DISPLAY"
-
-#include <linux/kernel.h>
-#include <linux/of.h>
-
-#include <drm/drm_connector.h>
-#include <drm/drm_modes.h>
-
-#include "omapdss.h"
-
-static int disp_num_counter;
-
-void omapdss_display_init(struct omap_dss_device *dssdev)
-{
- int id;
-
- /*
- * Note: this presumes that all displays either have an DT alias, or
- * none has.
- */
- id = of_alias_get_id(dssdev->dev->of_node, "display");
- if (id < 0)
- id = disp_num_counter++;
-
- /* Use 'label' property for name, if it exists */
- of_property_read_string(dssdev->dev->of_node, "label", &dssdev->name);
-
- if (dssdev->name == NULL)
- dssdev->name = devm_kasprintf(dssdev->dev, GFP_KERNEL,
- "display%u", id);
-}
-EXPORT_SYMBOL_GPL(omapdss_display_init);
-
-int omapdss_display_get_modes(struct drm_connector *connector,
- const struct videomode *vm)
-{
- struct drm_display_mode *mode;
-
- mode = drm_mode_create(connector->dev);
- if (!mode)
- return 0;
-
- drm_display_mode_from_videomode(vm, mode);
-
- mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
- drm_mode_set_name(mode);
- drm_mode_probed_add(connector, mode);
-
- return 1;
-}
-EXPORT_SYMBOL_GPL(omapdss_display_get_modes);
diff --git a/drivers/gpu/drm/omapdrm/dss/dpi.c b/drivers/gpu/drm/omapdrm/dss/dpi.c
index 1d2992daef40..030f997eccd0 100644
--- a/drivers/gpu/drm/omapdrm/dss/dpi.c
+++ b/drivers/gpu/drm/omapdrm/dss/dpi.c
@@ -641,7 +641,6 @@ static int dpi_init_output_port(struct dpi_data *dpi, struct device_node *port)
out->type = OMAP_DISPLAY_TYPE_DPI;
out->dispc_channel = dpi_get_channel(dpi);
out->of_port = port_num;
- out->owner = THIS_MODULE;
r = omapdss_device_init_output(out, &dpi->bridge);
if (r < 0) {
diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c
index 735a4e9027d0..8e11612f5fe1 100644
--- a/drivers/gpu/drm/omapdrm/dss/dsi.c
+++ b/drivers/gpu/drm/omapdrm/dss/dsi.c
@@ -14,7 +14,9 @@
#include <linux/device.h>
#include <linux/err.h>
#include <linux/interrupt.h>
+#include <linux/irq.h>
#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
#include <linux/mutex.h>
#include <linux/module.h>
#include <linux/semaphore.h>
@@ -33,6 +35,9 @@
#include <linux/component.h>
#include <linux/sys_soc.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_panel.h>
#include <video/mipi_display.h>
#include "omapdss.h"
@@ -40,73 +45,7 @@
#define DSI_CATCH_MISSING_TE
-struct dsi_reg { u16 module; u16 idx; };
-
-#define DSI_REG(mod, idx) ((const struct dsi_reg) { mod, idx })
-
-/* DSI Protocol Engine */
-
-#define DSI_PROTO 0
-#define DSI_PROTO_SZ 0x200
-
-#define DSI_REVISION DSI_REG(DSI_PROTO, 0x0000)
-#define DSI_SYSCONFIG DSI_REG(DSI_PROTO, 0x0010)
-#define DSI_SYSSTATUS DSI_REG(DSI_PROTO, 0x0014)
-#define DSI_IRQSTATUS DSI_REG(DSI_PROTO, 0x0018)
-#define DSI_IRQENABLE DSI_REG(DSI_PROTO, 0x001C)
-#define DSI_CTRL DSI_REG(DSI_PROTO, 0x0040)
-#define DSI_GNQ DSI_REG(DSI_PROTO, 0x0044)
-#define DSI_COMPLEXIO_CFG1 DSI_REG(DSI_PROTO, 0x0048)
-#define DSI_COMPLEXIO_IRQ_STATUS DSI_REG(DSI_PROTO, 0x004C)
-#define DSI_COMPLEXIO_IRQ_ENABLE DSI_REG(DSI_PROTO, 0x0050)
-#define DSI_CLK_CTRL DSI_REG(DSI_PROTO, 0x0054)
-#define DSI_TIMING1 DSI_REG(DSI_PROTO, 0x0058)
-#define DSI_TIMING2 DSI_REG(DSI_PROTO, 0x005C)
-#define DSI_VM_TIMING1 DSI_REG(DSI_PROTO, 0x0060)
-#define DSI_VM_TIMING2 DSI_REG(DSI_PROTO, 0x0064)
-#define DSI_VM_TIMING3 DSI_REG(DSI_PROTO, 0x0068)
-#define DSI_CLK_TIMING DSI_REG(DSI_PROTO, 0x006C)
-#define DSI_TX_FIFO_VC_SIZE DSI_REG(DSI_PROTO, 0x0070)
-#define DSI_RX_FIFO_VC_SIZE DSI_REG(DSI_PROTO, 0x0074)
-#define DSI_COMPLEXIO_CFG2 DSI_REG(DSI_PROTO, 0x0078)
-#define DSI_RX_FIFO_VC_FULLNESS DSI_REG(DSI_PROTO, 0x007C)
-#define DSI_VM_TIMING4 DSI_REG(DSI_PROTO, 0x0080)
-#define DSI_TX_FIFO_VC_EMPTINESS DSI_REG(DSI_PROTO, 0x0084)
-#define DSI_VM_TIMING5 DSI_REG(DSI_PROTO, 0x0088)
-#define DSI_VM_TIMING6 DSI_REG(DSI_PROTO, 0x008C)
-#define DSI_VM_TIMING7 DSI_REG(DSI_PROTO, 0x0090)
-#define DSI_STOPCLK_TIMING DSI_REG(DSI_PROTO, 0x0094)
-#define DSI_VC_CTRL(n) DSI_REG(DSI_PROTO, 0x0100 + (n * 0x20))
-#define DSI_VC_TE(n) DSI_REG(DSI_PROTO, 0x0104 + (n * 0x20))
-#define DSI_VC_LONG_PACKET_HEADER(n) DSI_REG(DSI_PROTO, 0x0108 + (n * 0x20))
-#define DSI_VC_LONG_PACKET_PAYLOAD(n) DSI_REG(DSI_PROTO, 0x010C + (n * 0x20))
-#define DSI_VC_SHORT_PACKET_HEADER(n) DSI_REG(DSI_PROTO, 0x0110 + (n * 0x20))
-#define DSI_VC_IRQSTATUS(n) DSI_REG(DSI_PROTO, 0x0118 + (n * 0x20))
-#define DSI_VC_IRQENABLE(n) DSI_REG(DSI_PROTO, 0x011C + (n * 0x20))
-
-/* DSIPHY_SCP */
-
-#define DSI_PHY 1
-#define DSI_PHY_OFFSET 0x200
-#define DSI_PHY_SZ 0x40
-
-#define DSI_DSIPHY_CFG0 DSI_REG(DSI_PHY, 0x0000)
-#define DSI_DSIPHY_CFG1 DSI_REG(DSI_PHY, 0x0004)
-#define DSI_DSIPHY_CFG2 DSI_REG(DSI_PHY, 0x0008)
-#define DSI_DSIPHY_CFG5 DSI_REG(DSI_PHY, 0x0014)
-#define DSI_DSIPHY_CFG10 DSI_REG(DSI_PHY, 0x0028)
-
-/* DSI_PLL_CTRL_SCP */
-
-#define DSI_PLL 2
-#define DSI_PLL_OFFSET 0x300
-#define DSI_PLL_SZ 0x20
-
-#define DSI_PLL_CONTROL DSI_REG(DSI_PLL, 0x0000)
-#define DSI_PLL_STATUS DSI_REG(DSI_PLL, 0x0004)
-#define DSI_PLL_GO DSI_REG(DSI_PLL, 0x0008)
-#define DSI_PLL_CONFIGURATION1 DSI_REG(DSI_PLL, 0x000C)
-#define DSI_PLL_CONFIGURATION2 DSI_REG(DSI_PLL, 0x0010)
+#include "dsi.h"
#define REG_GET(dsi, idx, start, end) \
FLD_GET(dsi_read_reg(dsi, idx), start, end)
@@ -114,324 +53,36 @@ struct dsi_reg { u16 module; u16 idx; };
#define REG_FLD_MOD(dsi, idx, val, start, end) \
dsi_write_reg(dsi, idx, FLD_MOD(dsi_read_reg(dsi, idx), val, start, end))
-/* Global interrupts */
-#define DSI_IRQ_VC0 (1 << 0)
-#define DSI_IRQ_VC1 (1 << 1)
-#define DSI_IRQ_VC2 (1 << 2)
-#define DSI_IRQ_VC3 (1 << 3)
-#define DSI_IRQ_WAKEUP (1 << 4)
-#define DSI_IRQ_RESYNC (1 << 5)
-#define DSI_IRQ_PLL_LOCK (1 << 7)
-#define DSI_IRQ_PLL_UNLOCK (1 << 8)
-#define DSI_IRQ_PLL_RECALL (1 << 9)
-#define DSI_IRQ_COMPLEXIO_ERR (1 << 10)
-#define DSI_IRQ_HS_TX_TIMEOUT (1 << 14)
-#define DSI_IRQ_LP_RX_TIMEOUT (1 << 15)
-#define DSI_IRQ_TE_TRIGGER (1 << 16)
-#define DSI_IRQ_ACK_TRIGGER (1 << 17)
-#define DSI_IRQ_SYNC_LOST (1 << 18)
-#define DSI_IRQ_LDO_POWER_GOOD (1 << 19)
-#define DSI_IRQ_TA_TIMEOUT (1 << 20)
-#define DSI_IRQ_ERROR_MASK \
- (DSI_IRQ_HS_TX_TIMEOUT | DSI_IRQ_LP_RX_TIMEOUT | DSI_IRQ_SYNC_LOST | \
- DSI_IRQ_TA_TIMEOUT)
-#define DSI_IRQ_CHANNEL_MASK 0xf
-
-/* Virtual channel interrupts */
-#define DSI_VC_IRQ_CS (1 << 0)
-#define DSI_VC_IRQ_ECC_CORR (1 << 1)
-#define DSI_VC_IRQ_PACKET_SENT (1 << 2)
-#define DSI_VC_IRQ_FIFO_TX_OVF (1 << 3)
-#define DSI_VC_IRQ_FIFO_RX_OVF (1 << 4)
-#define DSI_VC_IRQ_BTA (1 << 5)
-#define DSI_VC_IRQ_ECC_NO_CORR (1 << 6)
-#define DSI_VC_IRQ_FIFO_TX_UDF (1 << 7)
-#define DSI_VC_IRQ_PP_BUSY_CHANGE (1 << 8)
-#define DSI_VC_IRQ_ERROR_MASK \
- (DSI_VC_IRQ_CS | DSI_VC_IRQ_ECC_CORR | DSI_VC_IRQ_FIFO_TX_OVF | \
- DSI_VC_IRQ_FIFO_RX_OVF | DSI_VC_IRQ_ECC_NO_CORR | \
- DSI_VC_IRQ_FIFO_TX_UDF)
-
-/* ComplexIO interrupts */
-#define DSI_CIO_IRQ_ERRSYNCESC1 (1 << 0)
-#define DSI_CIO_IRQ_ERRSYNCESC2 (1 << 1)
-#define DSI_CIO_IRQ_ERRSYNCESC3 (1 << 2)
-#define DSI_CIO_IRQ_ERRSYNCESC4 (1 << 3)
-#define DSI_CIO_IRQ_ERRSYNCESC5 (1 << 4)
-#define DSI_CIO_IRQ_ERRESC1 (1 << 5)
-#define DSI_CIO_IRQ_ERRESC2 (1 << 6)
-#define DSI_CIO_IRQ_ERRESC3 (1 << 7)
-#define DSI_CIO_IRQ_ERRESC4 (1 << 8)
-#define DSI_CIO_IRQ_ERRESC5 (1 << 9)
-#define DSI_CIO_IRQ_ERRCONTROL1 (1 << 10)
-#define DSI_CIO_IRQ_ERRCONTROL2 (1 << 11)
-#define DSI_CIO_IRQ_ERRCONTROL3 (1 << 12)
-#define DSI_CIO_IRQ_ERRCONTROL4 (1 << 13)
-#define DSI_CIO_IRQ_ERRCONTROL5 (1 << 14)
-#define DSI_CIO_IRQ_STATEULPS1 (1 << 15)
-#define DSI_CIO_IRQ_STATEULPS2 (1 << 16)
-#define DSI_CIO_IRQ_STATEULPS3 (1 << 17)
-#define DSI_CIO_IRQ_STATEULPS4 (1 << 18)
-#define DSI_CIO_IRQ_STATEULPS5 (1 << 19)
-#define DSI_CIO_IRQ_ERRCONTENTIONLP0_1 (1 << 20)
-#define DSI_CIO_IRQ_ERRCONTENTIONLP1_1 (1 << 21)
-#define DSI_CIO_IRQ_ERRCONTENTIONLP0_2 (1 << 22)
-#define DSI_CIO_IRQ_ERRCONTENTIONLP1_2 (1 << 23)
-#define DSI_CIO_IRQ_ERRCONTENTIONLP0_3 (1 << 24)
-#define DSI_CIO_IRQ_ERRCONTENTIONLP1_3 (1 << 25)
-#define DSI_CIO_IRQ_ERRCONTENTIONLP0_4 (1 << 26)
-#define DSI_CIO_IRQ_ERRCONTENTIONLP1_4 (1 << 27)
-#define DSI_CIO_IRQ_ERRCONTENTIONLP0_5 (1 << 28)
-#define DSI_CIO_IRQ_ERRCONTENTIONLP1_5 (1 << 29)
-#define DSI_CIO_IRQ_ULPSACTIVENOT_ALL0 (1 << 30)
-#define DSI_CIO_IRQ_ULPSACTIVENOT_ALL1 (1 << 31)
-#define DSI_CIO_IRQ_ERROR_MASK \
- (DSI_CIO_IRQ_ERRSYNCESC1 | DSI_CIO_IRQ_ERRSYNCESC2 | \
- DSI_CIO_IRQ_ERRSYNCESC3 | DSI_CIO_IRQ_ERRSYNCESC4 | \
- DSI_CIO_IRQ_ERRSYNCESC5 | \
- DSI_CIO_IRQ_ERRESC1 | DSI_CIO_IRQ_ERRESC2 | \
- DSI_CIO_IRQ_ERRESC3 | DSI_CIO_IRQ_ERRESC4 | \
- DSI_CIO_IRQ_ERRESC5 | \
- DSI_CIO_IRQ_ERRCONTROL1 | DSI_CIO_IRQ_ERRCONTROL2 | \
- DSI_CIO_IRQ_ERRCONTROL3 | DSI_CIO_IRQ_ERRCONTROL4 | \
- DSI_CIO_IRQ_ERRCONTROL5 | \
- DSI_CIO_IRQ_ERRCONTENTIONLP0_1 | DSI_CIO_IRQ_ERRCONTENTIONLP1_1 | \
- DSI_CIO_IRQ_ERRCONTENTIONLP0_2 | DSI_CIO_IRQ_ERRCONTENTIONLP1_2 | \
- DSI_CIO_IRQ_ERRCONTENTIONLP0_3 | DSI_CIO_IRQ_ERRCONTENTIONLP1_3 | \
- DSI_CIO_IRQ_ERRCONTENTIONLP0_4 | DSI_CIO_IRQ_ERRCONTENTIONLP1_4 | \
- DSI_CIO_IRQ_ERRCONTENTIONLP0_5 | DSI_CIO_IRQ_ERRCONTENTIONLP1_5)
-
-typedef void (*omap_dsi_isr_t) (void *arg, u32 mask);
-struct dsi_data;
-
-static int dsi_display_init_dispc(struct dsi_data *dsi);
-static void dsi_display_uninit_dispc(struct dsi_data *dsi);
-
-static int dsi_vc_send_null(struct dsi_data *dsi, int channel);
-
-/* DSI PLL HSDIV indices */
-#define HSDIV_DISPC 0
-#define HSDIV_DSI 1
-
-#define DSI_MAX_NR_ISRS 2
-#define DSI_MAX_NR_LANES 5
-
-enum dsi_model {
- DSI_MODEL_OMAP3,
- DSI_MODEL_OMAP4,
- DSI_MODEL_OMAP5,
-};
-
-enum dsi_lane_function {
- DSI_LANE_UNUSED = 0,
- DSI_LANE_CLK,
- DSI_LANE_DATA1,
- DSI_LANE_DATA2,
- DSI_LANE_DATA3,
- DSI_LANE_DATA4,
-};
-
-struct dsi_lane_config {
- enum dsi_lane_function function;
- u8 polarity;
-};
-
-struct dsi_isr_data {
- omap_dsi_isr_t isr;
- void *arg;
- u32 mask;
-};
-
-enum fifo_size {
- DSI_FIFO_SIZE_0 = 0,
- DSI_FIFO_SIZE_32 = 1,
- DSI_FIFO_SIZE_64 = 2,
- DSI_FIFO_SIZE_96 = 3,
- DSI_FIFO_SIZE_128 = 4,
-};
-
-enum dsi_vc_source {
- DSI_VC_SOURCE_L4 = 0,
- DSI_VC_SOURCE_VP,
-};
-
-struct dsi_irq_stats {
- unsigned long last_reset;
- unsigned int irq_count;
- unsigned int dsi_irqs[32];
- unsigned int vc_irqs[4][32];
- unsigned int cio_irqs[32];
-};
-
-struct dsi_isr_tables {
- struct dsi_isr_data isr_table[DSI_MAX_NR_ISRS];
- struct dsi_isr_data isr_table_vc[4][DSI_MAX_NR_ISRS];
- struct dsi_isr_data isr_table_cio[DSI_MAX_NR_ISRS];
-};
-
-struct dsi_clk_calc_ctx {
- struct dsi_data *dsi;
- struct dss_pll *pll;
-
- /* inputs */
-
- const struct omap_dss_dsi_config *config;
-
- unsigned long req_pck_min, req_pck_nom, req_pck_max;
-
- /* outputs */
-
- struct dss_pll_clock_info dsi_cinfo;
- struct dispc_clock_info dispc_cinfo;
-
- struct videomode vm;
- struct omap_dss_dsi_videomode_timings dsi_vm;
-};
-
-struct dsi_lp_clock_info {
- unsigned long lp_clk;
- u16 lp_clk_div;
-};
-
-struct dsi_module_id_data {
- u32 address;
- int id;
-};
-
-enum dsi_quirks {
- DSI_QUIRK_PLL_PWR_BUG = (1 << 0), /* DSI-PLL power command 0x3 is not working */
- DSI_QUIRK_DCS_CMD_CONFIG_VC = (1 << 1),
- DSI_QUIRK_VC_OCP_WIDTH = (1 << 2),
- DSI_QUIRK_REVERSE_TXCLKESC = (1 << 3),
- DSI_QUIRK_GNQ = (1 << 4),
- DSI_QUIRK_PHY_DCC = (1 << 5),
-};
-
-struct dsi_of_data {
- enum dsi_model model;
- const struct dss_pll_hw *pll_hw;
- const struct dsi_module_id_data *modules;
- unsigned int max_fck_freq;
- unsigned int max_pll_lpdiv;
- enum dsi_quirks quirks;
-};
-
-struct dsi_data {
- struct device *dev;
- void __iomem *proto_base;
- void __iomem *phy_base;
- void __iomem *pll_base;
-
- const struct dsi_of_data *data;
- int module_id;
-
- int irq;
-
- bool is_enabled;
-
- struct clk *dss_clk;
- struct regmap *syscon;
- struct dss_device *dss;
-
- struct dispc_clock_info user_dispc_cinfo;
- struct dss_pll_clock_info user_dsi_cinfo;
+static int dsi_init_dispc(struct dsi_data *dsi);
+static void dsi_uninit_dispc(struct dsi_data *dsi);
- struct dsi_lp_clock_info user_lp_cinfo;
- struct dsi_lp_clock_info current_lp_cinfo;
+static int dsi_vc_send_null(struct dsi_data *dsi, int vc, int channel);
- struct dss_pll pll;
-
- bool vdds_dsi_enabled;
- struct regulator *vdds_dsi_reg;
-
- struct {
- enum dsi_vc_source source;
- struct omap_dss_device *dssdev;
- enum fifo_size tx_fifo_size;
- enum fifo_size rx_fifo_size;
- int vc_id;
- } vc[4];
-
- struct mutex lock;
- struct semaphore bus_lock;
-
- spinlock_t irq_lock;
- struct dsi_isr_tables isr_tables;
- /* space for a copy used by the interrupt handler */
- struct dsi_isr_tables isr_tables_copy;
-
- int update_channel;
-#ifdef DSI_PERF_MEASURE
- unsigned int update_bytes;
-#endif
-
- bool te_enabled;
- bool ulps_enabled;
-
- void (*framedone_callback)(int, void *);
- void *framedone_data;
-
- struct delayed_work framedone_timeout_work;
-
-#ifdef DSI_CATCH_MISSING_TE
- struct timer_list te_timer;
-#endif
-
- unsigned long cache_req_pck;
- unsigned long cache_clk_freq;
- struct dss_pll_clock_info cache_cinfo;
-
- u32 errors;
- spinlock_t errors_lock;
-#ifdef DSI_PERF_MEASURE
- ktime_t perf_setup_time;
- ktime_t perf_start_time;
-#endif
- int debug_read;
- int debug_write;
- struct {
- struct dss_debugfs_entry *irqs;
- struct dss_debugfs_entry *regs;
- struct dss_debugfs_entry *clks;
- } debugfs;
-
-#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
- spinlock_t irq_stats_lock;
- struct dsi_irq_stats irq_stats;
-#endif
-
- unsigned int num_lanes_supported;
- unsigned int line_buffer_size;
-
- struct dsi_lane_config lanes[DSI_MAX_NR_LANES];
- unsigned int num_lanes_used;
-
- unsigned int scp_clk_refcount;
-
- struct dss_lcd_mgr_config mgr_config;
- struct videomode vm;
- enum omap_dss_dsi_pixel_format pix_fmt;
- enum omap_dss_dsi_mode mode;
- struct omap_dss_dsi_videomode_timings vm_timings;
-
- struct omap_dss_device output;
-};
-
-struct dsi_packet_sent_handler_data {
- struct dsi_data *dsi;
- struct completion *completion;
-};
+static ssize_t _omap_dsi_host_transfer(struct dsi_data *dsi, int vc,
+ const struct mipi_dsi_msg *msg);
#ifdef DSI_PERF_MEASURE
static bool dsi_perf;
module_param(dsi_perf, bool, 0644);
#endif
+/* Note: for some reason video mode seems to work only if VC_VIDEO is 0 */
+#define VC_VIDEO 0
+#define VC_CMD 1
+
+#define drm_bridge_to_dsi(bridge) \
+ container_of(bridge, struct dsi_data, bridge)
+
static inline struct dsi_data *to_dsi_data(struct omap_dss_device *dssdev)
{
return dev_get_drvdata(dssdev->dev);
}
+static inline struct dsi_data *host_to_omap(struct mipi_dsi_host *host)
+{
+ return container_of(host, struct dsi_data, host);
+}
+
static inline void dsi_write_reg(struct dsi_data *dsi,
const struct dsi_reg idx, u32 val)
{
@@ -461,17 +112,13 @@ static inline u32 dsi_read_reg(struct dsi_data *dsi, const struct dsi_reg idx)
return __raw_readl(base + idx.idx);
}
-static void dsi_bus_lock(struct omap_dss_device *dssdev)
+static void dsi_bus_lock(struct dsi_data *dsi)
{
- struct dsi_data *dsi = to_dsi_data(dssdev);
-
down(&dsi->bus_lock);
}
-static void dsi_bus_unlock(struct omap_dss_device *dssdev)
+static void dsi_bus_unlock(struct dsi_data *dsi)
{
- struct dsi_data *dsi = to_dsi_data(dssdev);
-
up(&dsi->bus_lock);
}
@@ -514,22 +161,6 @@ static inline bool wait_for_bit_change(struct dsi_data *dsi,
return false;
}
-static u8 dsi_get_pixel_size(enum omap_dss_dsi_pixel_format fmt)
-{
- switch (fmt) {
- case OMAP_DSS_DSI_FMT_RGB888:
- case OMAP_DSS_DSI_FMT_RGB666:
- return 24;
- case OMAP_DSS_DSI_FMT_RGB666_PACKED:
- return 18;
- case OMAP_DSS_DSI_FMT_RGB565:
- return 16;
- default:
- BUG();
- return 0;
- }
-}
-
#ifdef DSI_PERF_MEASURE
static void dsi_perf_mark_setup(struct dsi_data *dsi)
{
@@ -623,7 +254,7 @@ static void print_irq_status(u32 status)
#undef PIS
}
-static void print_irq_status_vc(int channel, u32 status)
+static void print_irq_status_vc(int vc, u32 status)
{
if (status == 0)
return;
@@ -634,7 +265,7 @@ static void print_irq_status_vc(int channel, u32 status)
#define PIS(x) (status & DSI_VC_IRQ_##x) ? (#x " ") : ""
pr_debug("DSI VC(%d) IRQ 0x%x: %s%s%s%s%s%s%s%s%s\n",
- channel,
+ vc,
status,
PIS(CS),
PIS(ECC_CORR),
@@ -1015,7 +646,7 @@ static int dsi_unregister_isr(struct dsi_data *dsi, omap_dsi_isr_t isr,
return r;
}
-static int dsi_register_isr_vc(struct dsi_data *dsi, int channel,
+static int dsi_register_isr_vc(struct dsi_data *dsi, int vc,
omap_dsi_isr_t isr, void *arg, u32 mask)
{
unsigned long flags;
@@ -1024,18 +655,18 @@ static int dsi_register_isr_vc(struct dsi_data *dsi, int channel,
spin_lock_irqsave(&dsi->irq_lock, flags);
r = _dsi_register_isr(isr, arg, mask,
- dsi->isr_tables.isr_table_vc[channel],
- ARRAY_SIZE(dsi->isr_tables.isr_table_vc[channel]));
+ dsi->isr_tables.isr_table_vc[vc],
+ ARRAY_SIZE(dsi->isr_tables.isr_table_vc[vc]));
if (r == 0)
- _omap_dsi_set_irqs_vc(dsi, channel);
+ _omap_dsi_set_irqs_vc(dsi, vc);
spin_unlock_irqrestore(&dsi->irq_lock, flags);
return r;
}
-static int dsi_unregister_isr_vc(struct dsi_data *dsi, int channel,
+static int dsi_unregister_isr_vc(struct dsi_data *dsi, int vc,
omap_dsi_isr_t isr, void *arg, u32 mask)
{
unsigned long flags;
@@ -1044,49 +675,11 @@ static int dsi_unregister_isr_vc(struct dsi_data *dsi, int channel,
spin_lock_irqsave(&dsi->irq_lock, flags);
r = _dsi_unregister_isr(isr, arg, mask,
- dsi->isr_tables.isr_table_vc[channel],
- ARRAY_SIZE(dsi->isr_tables.isr_table_vc[channel]));
+ dsi->isr_tables.isr_table_vc[vc],
+ ARRAY_SIZE(dsi->isr_tables.isr_table_vc[vc]));
if (r == 0)
- _omap_dsi_set_irqs_vc(dsi, channel);
-
- spin_unlock_irqrestore(&dsi->irq_lock, flags);
-
- return r;
-}
-
-static int dsi_register_isr_cio(struct dsi_data *dsi, omap_dsi_isr_t isr,
- void *arg, u32 mask)
-{
- unsigned long flags;
- int r;
-
- spin_lock_irqsave(&dsi->irq_lock, flags);
-
- r = _dsi_register_isr(isr, arg, mask, dsi->isr_tables.isr_table_cio,
- ARRAY_SIZE(dsi->isr_tables.isr_table_cio));
-
- if (r == 0)
- _omap_dsi_set_irqs_cio(dsi);
-
- spin_unlock_irqrestore(&dsi->irq_lock, flags);
-
- return r;
-}
-
-static int dsi_unregister_isr_cio(struct dsi_data *dsi, omap_dsi_isr_t isr,
- void *arg, u32 mask)
-{
- unsigned long flags;
- int r;
-
- spin_lock_irqsave(&dsi->irq_lock, flags);
-
- r = _dsi_unregister_isr(isr, arg, mask, dsi->isr_tables.isr_table_cio,
- ARRAY_SIZE(dsi->isr_tables.isr_table_cio));
-
- if (r == 0)
- _omap_dsi_set_irqs_cio(dsi);
+ _omap_dsi_set_irqs_vc(dsi, vc);
spin_unlock_irqrestore(&dsi->irq_lock, flags);
@@ -1819,56 +1412,6 @@ static void dsi_cio_timings(struct dsi_data *dsi)
dsi_write_reg(dsi, DSI_DSIPHY_CFG2, r);
}
-/* lane masks have lane 0 at lsb. mask_p for positive lines, n for negative */
-static void dsi_cio_enable_lane_override(struct dsi_data *dsi,
- unsigned int mask_p,
- unsigned int mask_n)
-{
- int i;
- u32 l;
- u8 lptxscp_start = dsi->num_lanes_supported == 3 ? 22 : 26;
-
- l = 0;
-
- for (i = 0; i < dsi->num_lanes_supported; ++i) {
- unsigned int p = dsi->lanes[i].polarity;
-
- if (mask_p & (1 << i))
- l |= 1 << (i * 2 + (p ? 0 : 1));
-
- if (mask_n & (1 << i))
- l |= 1 << (i * 2 + (p ? 1 : 0));
- }
-
- /*
- * Bits in REGLPTXSCPDAT4TO0DXDY:
- * 17: DY0 18: DX0
- * 19: DY1 20: DX1
- * 21: DY2 22: DX2
- * 23: DY3 24: DX3
- * 25: DY4 26: DX4
- */
-
- /* Set the lane override configuration */
-
- /* REGLPTXSCPDAT4TO0DXDY */
- REG_FLD_MOD(dsi, DSI_DSIPHY_CFG10, l, lptxscp_start, 17);
-
- /* Enable lane override */
-
- /* ENLPTXSCPDAT */
- REG_FLD_MOD(dsi, DSI_DSIPHY_CFG10, 1, 27, 27);
-}
-
-static void dsi_cio_disable_lane_override(struct dsi_data *dsi)
-{
- /* Disable lane override */
- REG_FLD_MOD(dsi, DSI_DSIPHY_CFG10, 0, 27, 27); /* ENLPTXSCPDAT */
- /* Reset the lane override configuration */
- /* REGLPTXSCPDAT4TO0DXDY */
- REG_FLD_MOD(dsi, DSI_DSIPHY_CFG10, 0, 22, 17);
-}
-
static int dsi_cio_wait_tx_clk_esc_reset(struct dsi_data *dsi)
{
int t, i;
@@ -2043,32 +1586,6 @@ static int dsi_cio_init(struct dsi_data *dsi)
l = FLD_MOD(l, 0x1fff, 12, 0); /* STOP_STATE_COUNTER_IO */
dsi_write_reg(dsi, DSI_TIMING1, l);
- if (dsi->ulps_enabled) {
- unsigned int mask_p;
- int i;
-
- DSSDBG("manual ulps exit\n");
-
- /* ULPS is exited by Mark-1 state for 1ms, followed by
- * stop state. DSS HW cannot do this via the normal
- * ULPS exit sequence, as after reset the DSS HW thinks
- * that we are not in ULPS mode, and refuses to send the
- * sequence. So we need to send the ULPS exit sequence
- * manually by setting positive lines high and negative lines
- * low for 1ms.
- */
-
- mask_p = 0;
-
- for (i = 0; i < dsi->num_lanes_supported; ++i) {
- if (dsi->lanes[i].function == DSI_LANE_UNUSED)
- continue;
- mask_p |= 1 << i;
- }
-
- dsi_cio_enable_lane_override(dsi, mask_p, 0);
- }
-
r = dsi_cio_power(dsi, DSI_COMPLEXIO_POWER_ON);
if (r)
goto err_cio_pwr;
@@ -2087,29 +1604,15 @@ static int dsi_cio_init(struct dsi_data *dsi)
if (r)
goto err_tx_clk_esc_rst;
- if (dsi->ulps_enabled) {
- /* Keep Mark-1 state for 1ms (as per DSI spec) */
- ktime_t wait = ns_to_ktime(1000 * 1000);
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_hrtimeout(&wait, HRTIMER_MODE_REL);
-
- /* Disable the override. The lanes should be set to Mark-11
- * state by the HW */
- dsi_cio_disable_lane_override(dsi);
- }
-
/* FORCE_TX_STOP_MODE_IO */
REG_FLD_MOD(dsi, DSI_TIMING1, 0, 15, 15);
dsi_cio_timings(dsi);
- if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) {
- /* DDR_CLK_ALWAYS_ON */
- REG_FLD_MOD(dsi, DSI_CLK_CTRL,
- dsi->vm_timings.ddr_clk_always_on, 13, 13);
- }
-
- dsi->ulps_enabled = false;
+ /* DDR_CLK_ALWAYS_ON */
+ REG_FLD_MOD(dsi, DSI_CLK_CTRL,
+ !(dsi->dsidev->mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS),
+ 13, 13);
DSSDBG("CIO init done\n");
@@ -2120,8 +1623,6 @@ err_tx_clk_esc_rst:
err_cio_pwr_dom:
dsi_cio_power(dsi, DSI_COMPLEXIO_POWER_OFF);
err_cio_pwr:
- if (dsi->ulps_enabled)
- dsi_cio_disable_lane_override(dsi);
err_scp_clk_dom:
dsi_disable_scp_clk(dsi);
dsi_disable_pads(dsi);
@@ -2218,9 +1719,9 @@ static int dsi_force_tx_stop_mode_io(struct dsi_data *dsi)
return 0;
}
-static bool dsi_vc_is_enabled(struct dsi_data *dsi, int channel)
+static bool dsi_vc_is_enabled(struct dsi_data *dsi, int vc)
{
- return REG_GET(dsi, DSI_VC_CTRL(channel), 0, 0);
+ return REG_GET(dsi, DSI_VC_CTRL(vc), 0, 0);
}
static void dsi_packet_sent_handler_vp(void *data, u32 mask)
@@ -2228,14 +1729,14 @@ static void dsi_packet_sent_handler_vp(void *data, u32 mask)
struct dsi_packet_sent_handler_data *vp_data =
(struct dsi_packet_sent_handler_data *) data;
struct dsi_data *dsi = vp_data->dsi;
- const int channel = dsi->update_channel;
+ const int vc = dsi->update_vc;
u8 bit = dsi->te_enabled ? 30 : 31;
- if (REG_GET(dsi, DSI_VC_TE(channel), bit, bit) == 0)
+ if (REG_GET(dsi, DSI_VC_TE(vc), bit, bit) == 0)
complete(vp_data->completion);
}
-static int dsi_sync_vc_vp(struct dsi_data *dsi, int channel)
+static int dsi_sync_vc_vp(struct dsi_data *dsi, int vc)
{
DECLARE_COMPLETION_ONSTACK(completion);
struct dsi_packet_sent_handler_data vp_data = {
@@ -2247,13 +1748,13 @@ static int dsi_sync_vc_vp(struct dsi_data *dsi, int channel)
bit = dsi->te_enabled ? 30 : 31;
- r = dsi_register_isr_vc(dsi, channel, dsi_packet_sent_handler_vp,
+ r = dsi_register_isr_vc(dsi, vc, dsi_packet_sent_handler_vp,
&vp_data, DSI_VC_IRQ_PACKET_SENT);
if (r)
goto err0;
/* Wait for completion only if TE_EN/TE_START is still set */
- if (REG_GET(dsi, DSI_VC_TE(channel), bit, bit)) {
+ if (REG_GET(dsi, DSI_VC_TE(vc), bit, bit)) {
if (wait_for_completion_timeout(&completion,
msecs_to_jiffies(10)) == 0) {
DSSERR("Failed to complete previous frame transfer\n");
@@ -2262,12 +1763,12 @@ static int dsi_sync_vc_vp(struct dsi_data *dsi, int channel)
}
}
- dsi_unregister_isr_vc(dsi, channel, dsi_packet_sent_handler_vp,
+ dsi_unregister_isr_vc(dsi, vc, dsi_packet_sent_handler_vp,
&vp_data, DSI_VC_IRQ_PACKET_SENT);
return 0;
err1:
- dsi_unregister_isr_vc(dsi, channel, dsi_packet_sent_handler_vp,
+ dsi_unregister_isr_vc(dsi, vc, dsi_packet_sent_handler_vp,
&vp_data, DSI_VC_IRQ_PACKET_SENT);
err0:
return r;
@@ -2278,13 +1779,13 @@ static void dsi_packet_sent_handler_l4(void *data, u32 mask)
struct dsi_packet_sent_handler_data *l4_data =
(struct dsi_packet_sent_handler_data *) data;
struct dsi_data *dsi = l4_data->dsi;
- const int channel = dsi->update_channel;
+ const int vc = dsi->update_vc;
- if (REG_GET(dsi, DSI_VC_CTRL(channel), 5, 5) == 0)
+ if (REG_GET(dsi, DSI_VC_CTRL(vc), 5, 5) == 0)
complete(l4_data->completion);
}
-static int dsi_sync_vc_l4(struct dsi_data *dsi, int channel)
+static int dsi_sync_vc_l4(struct dsi_data *dsi, int vc)
{
DECLARE_COMPLETION_ONSTACK(completion);
struct dsi_packet_sent_handler_data l4_data = {
@@ -2293,13 +1794,13 @@ static int dsi_sync_vc_l4(struct dsi_data *dsi, int channel)
};
int r = 0;
- r = dsi_register_isr_vc(dsi, channel, dsi_packet_sent_handler_l4,
+ r = dsi_register_isr_vc(dsi, vc, dsi_packet_sent_handler_l4,
&l4_data, DSI_VC_IRQ_PACKET_SENT);
if (r)
goto err0;
/* Wait for completion only if TX_FIFO_NOT_EMPTY is still set */
- if (REG_GET(dsi, DSI_VC_CTRL(channel), 5, 5)) {
+ if (REG_GET(dsi, DSI_VC_CTRL(vc), 5, 5)) {
if (wait_for_completion_timeout(&completion,
msecs_to_jiffies(10)) == 0) {
DSSERR("Failed to complete previous l4 transfer\n");
@@ -2308,47 +1809,47 @@ static int dsi_sync_vc_l4(struct dsi_data *dsi, int channel)
}
}
- dsi_unregister_isr_vc(dsi, channel, dsi_packet_sent_handler_l4,
+ dsi_unregister_isr_vc(dsi, vc, dsi_packet_sent_handler_l4,
&l4_data, DSI_VC_IRQ_PACKET_SENT);
return 0;
err1:
- dsi_unregister_isr_vc(dsi, channel, dsi_packet_sent_handler_l4,
+ dsi_unregister_isr_vc(dsi, vc, dsi_packet_sent_handler_l4,
&l4_data, DSI_VC_IRQ_PACKET_SENT);
err0:
return r;
}
-static int dsi_sync_vc(struct dsi_data *dsi, int channel)
+static int dsi_sync_vc(struct dsi_data *dsi, int vc)
{
WARN_ON(!dsi_bus_is_locked(dsi));
WARN_ON(in_interrupt());
- if (!dsi_vc_is_enabled(dsi, channel))
+ if (!dsi_vc_is_enabled(dsi, vc))
return 0;
- switch (dsi->vc[channel].source) {
+ switch (dsi->vc[vc].source) {
case DSI_VC_SOURCE_VP:
- return dsi_sync_vc_vp(dsi, channel);
+ return dsi_sync_vc_vp(dsi, vc);
case DSI_VC_SOURCE_L4:
- return dsi_sync_vc_l4(dsi, channel);
+ return dsi_sync_vc_l4(dsi, vc);
default:
BUG();
return -EINVAL;
}
}
-static int dsi_vc_enable(struct dsi_data *dsi, int channel, bool enable)
+static int dsi_vc_enable(struct dsi_data *dsi, int vc, bool enable)
{
- DSSDBG("dsi_vc_enable channel %d, enable %d\n",
- channel, enable);
+ DSSDBG("dsi_vc_enable vc %d, enable %d\n",
+ vc, enable);
enable = enable ? 1 : 0;
- REG_FLD_MOD(dsi, DSI_VC_CTRL(channel), enable, 0, 0);
+ REG_FLD_MOD(dsi, DSI_VC_CTRL(vc), enable, 0, 0);
- if (!wait_for_bit_change(dsi, DSI_VC_CTRL(channel), 0, enable)) {
+ if (!wait_for_bit_change(dsi, DSI_VC_CTRL(vc), 0, enable)) {
DSSERR("Failed to set dsi_vc_enable to %d\n", enable);
return -EIO;
}
@@ -2356,17 +1857,17 @@ static int dsi_vc_enable(struct dsi_data *dsi, int channel, bool enable)
return 0;
}
-static void dsi_vc_initial_config(struct dsi_data *dsi, int channel)
+static void dsi_vc_initial_config(struct dsi_data *dsi, int vc)
{
u32 r;
- DSSDBG("Initial config of virtual channel %d", channel);
+ DSSDBG("Initial config of VC %d", vc);
- r = dsi_read_reg(dsi, DSI_VC_CTRL(channel));
+ r = dsi_read_reg(dsi, DSI_VC_CTRL(vc));
if (FLD_GET(r, 15, 15)) /* VC_BUSY */
DSSERR("VC(%d) busy when trying to configure it!\n",
- channel);
+ vc);
r = FLD_MOD(r, 0, 1, 1); /* SOURCE, 0 = L4 */
r = FLD_MOD(r, 0, 2, 2); /* BTA_SHORT_EN */
@@ -2381,74 +1882,39 @@ static void dsi_vc_initial_config(struct dsi_data *dsi, int channel)
r = FLD_MOD(r, 4, 29, 27); /* DMA_RX_REQ_NB = no dma */
r = FLD_MOD(r, 4, 23, 21); /* DMA_TX_REQ_NB = no dma */
- dsi_write_reg(dsi, DSI_VC_CTRL(channel), r);
+ dsi_write_reg(dsi, DSI_VC_CTRL(vc), r);
- dsi->vc[channel].source = DSI_VC_SOURCE_L4;
+ dsi->vc[vc].source = DSI_VC_SOURCE_L4;
}
-static int dsi_vc_config_source(struct dsi_data *dsi, int channel,
- enum dsi_vc_source source)
-{
- if (dsi->vc[channel].source == source)
- return 0;
-
- DSSDBG("Source config of virtual channel %d", channel);
-
- dsi_sync_vc(dsi, channel);
-
- dsi_vc_enable(dsi, channel, 0);
-
- /* VC_BUSY */
- if (!wait_for_bit_change(dsi, DSI_VC_CTRL(channel), 15, 0)) {
- DSSERR("vc(%d) busy when trying to config for VP\n", channel);
- return -EIO;
- }
-
- /* SOURCE, 0 = L4, 1 = video port */
- REG_FLD_MOD(dsi, DSI_VC_CTRL(channel), source, 1, 1);
-
- /* DCS_CMD_ENABLE */
- if (dsi->data->quirks & DSI_QUIRK_DCS_CMD_CONFIG_VC) {
- bool enable = source == DSI_VC_SOURCE_VP;
- REG_FLD_MOD(dsi, DSI_VC_CTRL(channel), enable, 30, 30);
- }
-
- dsi_vc_enable(dsi, channel, 1);
-
- dsi->vc[channel].source = source;
-
- return 0;
-}
-
-static void dsi_vc_enable_hs(struct omap_dss_device *dssdev, int channel,
+static void dsi_vc_enable_hs(struct omap_dss_device *dssdev, int vc,
bool enable)
{
struct dsi_data *dsi = to_dsi_data(dssdev);
- DSSDBG("dsi_vc_enable_hs(%d, %d)\n", channel, enable);
+ DSSDBG("dsi_vc_enable_hs(%d, %d)\n", vc, enable);
+
+ if (REG_GET(dsi, DSI_VC_CTRL(vc), 9, 9) == enable)
+ return;
WARN_ON(!dsi_bus_is_locked(dsi));
- dsi_vc_enable(dsi, channel, 0);
+ dsi_vc_enable(dsi, vc, 0);
dsi_if_enable(dsi, 0);
- REG_FLD_MOD(dsi, DSI_VC_CTRL(channel), enable, 9, 9);
+ REG_FLD_MOD(dsi, DSI_VC_CTRL(vc), enable, 9, 9);
- dsi_vc_enable(dsi, channel, 1);
+ dsi_vc_enable(dsi, vc, 1);
dsi_if_enable(dsi, 1);
dsi_force_tx_stop_mode_io(dsi);
-
- /* start the DDR clock by sending a NULL packet */
- if (dsi->vm_timings.ddr_clk_always_on && enable)
- dsi_vc_send_null(dsi, channel);
}
-static void dsi_vc_flush_long_data(struct dsi_data *dsi, int channel)
+static void dsi_vc_flush_long_data(struct dsi_data *dsi, int vc)
{
- while (REG_GET(dsi, DSI_VC_CTRL(channel), 20, 20)) {
+ while (REG_GET(dsi, DSI_VC_CTRL(vc), 20, 20)) {
u32 val;
- val = dsi_read_reg(dsi, DSI_VC_SHORT_PACKET_HEADER(channel));
+ val = dsi_read_reg(dsi, DSI_VC_SHORT_PACKET_HEADER(vc));
DSSDBG("\t\tb1 %#02x b2 %#02x b3 %#02x b4 %#02x\n",
(val >> 0) & 0xff,
(val >> 8) & 0xff,
@@ -2494,13 +1960,13 @@ static void dsi_show_rx_ack_with_err(u16 err)
DSSERR("\t\tDSI Protocol Violation\n");
}
-static u16 dsi_vc_flush_receive_data(struct dsi_data *dsi, int channel)
+static u16 dsi_vc_flush_receive_data(struct dsi_data *dsi, int vc)
{
/* RX_FIFO_NOT_EMPTY */
- while (REG_GET(dsi, DSI_VC_CTRL(channel), 20, 20)) {
+ while (REG_GET(dsi, DSI_VC_CTRL(vc), 20, 20)) {
u32 val;
u8 dt;
- val = dsi_read_reg(dsi, DSI_VC_SHORT_PACKET_HEADER(channel));
+ val = dsi_read_reg(dsi, DSI_VC_SHORT_PACKET_HEADER(vc));
DSSERR("\trawval %#08x\n", val);
dt = FLD_GET(val, 5, 0);
if (dt == MIPI_DSI_RX_ACKNOWLEDGE_AND_ERROR_REPORT) {
@@ -2515,7 +1981,7 @@ static u16 dsi_vc_flush_receive_data(struct dsi_data *dsi, int channel)
} else if (dt == MIPI_DSI_RX_DCS_LONG_READ_RESPONSE) {
DSSERR("\tDCS long response, len %d\n",
FLD_GET(val, 23, 8));
- dsi_vc_flush_long_data(dsi, channel);
+ dsi_vc_flush_long_data(dsi, vc);
} else {
DSSERR("\tunknown datatype 0x%02x\n", dt);
}
@@ -2523,35 +1989,35 @@ static u16 dsi_vc_flush_receive_data(struct dsi_data *dsi, int channel)
return 0;
}
-static int dsi_vc_send_bta(struct dsi_data *dsi, int channel)
+static int dsi_vc_send_bta(struct dsi_data *dsi, int vc)
{
if (dsi->debug_write || dsi->debug_read)
- DSSDBG("dsi_vc_send_bta %d\n", channel);
+ DSSDBG("dsi_vc_send_bta %d\n", vc);
WARN_ON(!dsi_bus_is_locked(dsi));
/* RX_FIFO_NOT_EMPTY */
- if (REG_GET(dsi, DSI_VC_CTRL(channel), 20, 20)) {
+ if (REG_GET(dsi, DSI_VC_CTRL(vc), 20, 20)) {
DSSERR("rx fifo not empty when sending BTA, dumping data:\n");
- dsi_vc_flush_receive_data(dsi, channel);
+ dsi_vc_flush_receive_data(dsi, vc);
}
- REG_FLD_MOD(dsi, DSI_VC_CTRL(channel), 1, 6, 6); /* BTA_EN */
+ REG_FLD_MOD(dsi, DSI_VC_CTRL(vc), 1, 6, 6); /* BTA_EN */
/* flush posted write */
- dsi_read_reg(dsi, DSI_VC_CTRL(channel));
+ dsi_read_reg(dsi, DSI_VC_CTRL(vc));
return 0;
}
-static int dsi_vc_send_bta_sync(struct omap_dss_device *dssdev, int channel)
+static int dsi_vc_send_bta_sync(struct omap_dss_device *dssdev, int vc)
{
struct dsi_data *dsi = to_dsi_data(dssdev);
DECLARE_COMPLETION_ONSTACK(completion);
int r = 0;
u32 err;
- r = dsi_register_isr_vc(dsi, channel, dsi_completion_handler,
+ r = dsi_register_isr_vc(dsi, vc, dsi_completion_handler,
&completion, DSI_VC_IRQ_BTA);
if (r)
goto err0;
@@ -2561,7 +2027,7 @@ static int dsi_vc_send_bta_sync(struct omap_dss_device *dssdev, int channel)
if (r)
goto err1;
- r = dsi_vc_send_bta(dsi, channel);
+ r = dsi_vc_send_bta(dsi, vc);
if (r)
goto err2;
@@ -2582,29 +2048,30 @@ err2:
dsi_unregister_isr(dsi, dsi_completion_handler, &completion,
DSI_IRQ_ERROR_MASK);
err1:
- dsi_unregister_isr_vc(dsi, channel, dsi_completion_handler,
+ dsi_unregister_isr_vc(dsi, vc, dsi_completion_handler,
&completion, DSI_VC_IRQ_BTA);
err0:
return r;
}
-static inline void dsi_vc_write_long_header(struct dsi_data *dsi, int channel,
- u8 data_type, u16 len, u8 ecc)
+static inline void dsi_vc_write_long_header(struct dsi_data *dsi, int vc,
+ int channel, u8 data_type, u16 len,
+ u8 ecc)
{
u32 val;
u8 data_id;
WARN_ON(!dsi_bus_is_locked(dsi));
- data_id = data_type | dsi->vc[channel].vc_id << 6;
+ data_id = data_type | channel << 6;
val = FLD_VAL(data_id, 7, 0) | FLD_VAL(len, 23, 8) |
FLD_VAL(ecc, 31, 24);
- dsi_write_reg(dsi, DSI_VC_LONG_PACKET_HEADER(channel), val);
+ dsi_write_reg(dsi, DSI_VC_LONG_PACKET_HEADER(vc), val);
}
-static inline void dsi_vc_write_long_payload(struct dsi_data *dsi, int channel,
+static inline void dsi_vc_write_long_payload(struct dsi_data *dsi, int vc,
u8 b1, u8 b2, u8 b3, u8 b4)
{
u32 val;
@@ -2614,33 +2081,31 @@ static inline void dsi_vc_write_long_payload(struct dsi_data *dsi, int channel,
/* DSSDBG("\twriting %02x, %02x, %02x, %02x (%#010x)\n",
b1, b2, b3, b4, val); */
- dsi_write_reg(dsi, DSI_VC_LONG_PACKET_PAYLOAD(channel), val);
+ dsi_write_reg(dsi, DSI_VC_LONG_PACKET_PAYLOAD(vc), val);
}
-static int dsi_vc_send_long(struct dsi_data *dsi, int channel, u8 data_type,
- u8 *data, u16 len, u8 ecc)
+static int dsi_vc_send_long(struct dsi_data *dsi, int vc,
+ const struct mipi_dsi_msg *msg)
{
/*u32 val; */
int i;
- u8 *p;
+ const u8 *p;
int r = 0;
u8 b1, b2, b3, b4;
if (dsi->debug_write)
- DSSDBG("dsi_vc_send_long, %d bytes\n", len);
+ DSSDBG("dsi_vc_send_long, %d bytes\n", msg->tx_len);
/* len + header */
- if (dsi->vc[channel].tx_fifo_size * 32 * 4 < len + 4) {
+ if (dsi->vc[vc].tx_fifo_size * 32 * 4 < msg->tx_len + 4) {
DSSERR("unable to send long packet: packet too long.\n");
return -EINVAL;
}
- dsi_vc_config_source(dsi, channel, DSI_VC_SOURCE_L4);
-
- dsi_vc_write_long_header(dsi, channel, data_type, len, ecc);
+ dsi_vc_write_long_header(dsi, vc, msg->channel, msg->type, msg->tx_len, 0);
- p = data;
- for (i = 0; i < len >> 2; i++) {
+ p = msg->tx_buf;
+ for (i = 0; i < msg->tx_len >> 2; i++) {
if (dsi->debug_write)
DSSDBG("\tsending full packet %d\n", i);
@@ -2649,10 +2114,10 @@ static int dsi_vc_send_long(struct dsi_data *dsi, int channel, u8 data_type,
b3 = *p++;
b4 = *p++;
- dsi_vc_write_long_payload(dsi, channel, b1, b2, b3, b4);
+ dsi_vc_write_long_payload(dsi, vc, b1, b2, b3, b4);
}
- i = len % 4;
+ i = msg->tx_len % 4;
if (i) {
b1 = 0; b2 = 0; b3 = 0;
@@ -2674,194 +2139,89 @@ static int dsi_vc_send_long(struct dsi_data *dsi, int channel, u8 data_type,
break;
}
- dsi_vc_write_long_payload(dsi, channel, b1, b2, b3, 0);
+ dsi_vc_write_long_payload(dsi, vc, b1, b2, b3, 0);
}
return r;
}
-static int dsi_vc_send_short(struct dsi_data *dsi, int channel, u8 data_type,
- u16 data, u8 ecc)
+static int dsi_vc_send_short(struct dsi_data *dsi, int vc,
+ const struct mipi_dsi_msg *msg)
{
+ struct mipi_dsi_packet pkt;
u32 r;
- u8 data_id;
+
+ r = mipi_dsi_create_packet(&pkt, msg);
+ if (r < 0)
+ return r;
WARN_ON(!dsi_bus_is_locked(dsi));
if (dsi->debug_write)
- DSSDBG("dsi_vc_send_short(ch%d, dt %#x, b1 %#x, b2 %#x)\n",
- channel,
- data_type, data & 0xff, (data >> 8) & 0xff);
-
- dsi_vc_config_source(dsi, channel, DSI_VC_SOURCE_L4);
+ DSSDBG("dsi_vc_send_short(vc%d, dt %#x, b1 %#x, b2 %#x)\n",
+ vc, msg->type, pkt.header[1], pkt.header[2]);
- if (FLD_GET(dsi_read_reg(dsi, DSI_VC_CTRL(channel)), 16, 16)) {
+ if (FLD_GET(dsi_read_reg(dsi, DSI_VC_CTRL(vc)), 16, 16)) {
DSSERR("ERROR FIFO FULL, aborting transfer\n");
return -EINVAL;
}
- data_id = data_type | dsi->vc[channel].vc_id << 6;
-
- r = (data_id << 0) | (data << 8) | (ecc << 24);
+ r = pkt.header[3] << 24 | pkt.header[2] << 16 | pkt.header[1] << 8 |
+ pkt.header[0];
- dsi_write_reg(dsi, DSI_VC_SHORT_PACKET_HEADER(channel), r);
+ dsi_write_reg(dsi, DSI_VC_SHORT_PACKET_HEADER(vc), r);
return 0;
}
-static int dsi_vc_send_null(struct dsi_data *dsi, int channel)
-{
- return dsi_vc_send_long(dsi, channel, MIPI_DSI_NULL_PACKET, NULL, 0, 0);
-}
-
-static int dsi_vc_write_nosync_common(struct dsi_data *dsi, int channel,
- u8 *data, int len,
- enum dss_dsi_content_type type)
-{
- int r;
-
- if (len == 0) {
- BUG_ON(type == DSS_DSI_CONTENT_DCS);
- r = dsi_vc_send_short(dsi, channel,
- MIPI_DSI_GENERIC_SHORT_WRITE_0_PARAM, 0, 0);
- } else if (len == 1) {
- r = dsi_vc_send_short(dsi, channel,
- type == DSS_DSI_CONTENT_GENERIC ?
- MIPI_DSI_GENERIC_SHORT_WRITE_1_PARAM :
- MIPI_DSI_DCS_SHORT_WRITE, data[0], 0);
- } else if (len == 2) {
- r = dsi_vc_send_short(dsi, channel,
- type == DSS_DSI_CONTENT_GENERIC ?
- MIPI_DSI_GENERIC_SHORT_WRITE_2_PARAM :
- MIPI_DSI_DCS_SHORT_WRITE_PARAM,
- data[0] | (data[1] << 8), 0);
- } else {
- r = dsi_vc_send_long(dsi, channel,
- type == DSS_DSI_CONTENT_GENERIC ?
- MIPI_DSI_GENERIC_LONG_WRITE :
- MIPI_DSI_DCS_LONG_WRITE, data, len, 0);
- }
-
- return r;
-}
-
-static int dsi_vc_dcs_write_nosync(struct omap_dss_device *dssdev, int channel,
- u8 *data, int len)
-{
- struct dsi_data *dsi = to_dsi_data(dssdev);
-
- return dsi_vc_write_nosync_common(dsi, channel, data, len,
- DSS_DSI_CONTENT_DCS);
-}
-
-static int dsi_vc_generic_write_nosync(struct omap_dss_device *dssdev, int channel,
- u8 *data, int len)
+static int dsi_vc_send_null(struct dsi_data *dsi, int vc, int channel)
{
- struct dsi_data *dsi = to_dsi_data(dssdev);
+ const struct mipi_dsi_msg msg = {
+ .channel = channel,
+ .type = MIPI_DSI_NULL_PACKET,
+ };
- return dsi_vc_write_nosync_common(dsi, channel, data, len,
- DSS_DSI_CONTENT_GENERIC);
+ return dsi_vc_send_long(dsi, vc, &msg);
}
-static int dsi_vc_write_common(struct omap_dss_device *dssdev,
- int channel, u8 *data, int len,
- enum dss_dsi_content_type type)
+static int dsi_vc_write_common(struct omap_dss_device *dssdev, int vc,
+ const struct mipi_dsi_msg *msg)
{
struct dsi_data *dsi = to_dsi_data(dssdev);
int r;
- r = dsi_vc_write_nosync_common(dsi, channel, data, len, type);
- if (r)
- goto err;
-
- r = dsi_vc_send_bta_sync(dssdev, channel);
- if (r)
- goto err;
-
- /* RX_FIFO_NOT_EMPTY */
- if (REG_GET(dsi, DSI_VC_CTRL(channel), 20, 20)) {
- DSSERR("rx fifo not empty after write, dumping data:\n");
- dsi_vc_flush_receive_data(dsi, channel);
- r = -EIO;
- goto err;
- }
-
- return 0;
-err:
- DSSERR("dsi_vc_write_common(ch %d, cmd 0x%02x, len %d) failed\n",
- channel, data[0], len);
- return r;
-}
-
-static int dsi_vc_dcs_write(struct omap_dss_device *dssdev, int channel, u8 *data,
- int len)
-{
- return dsi_vc_write_common(dssdev, channel, data, len,
- DSS_DSI_CONTENT_DCS);
-}
-
-static int dsi_vc_generic_write(struct omap_dss_device *dssdev, int channel, u8 *data,
- int len)
-{
- return dsi_vc_write_common(dssdev, channel, data, len,
- DSS_DSI_CONTENT_GENERIC);
-}
+ if (mipi_dsi_packet_format_is_short(msg->type))
+ r = dsi_vc_send_short(dsi, vc, msg);
+ else
+ r = dsi_vc_send_long(dsi, vc, msg);
-static int dsi_vc_dcs_send_read_request(struct dsi_data *dsi, int channel,
- u8 dcs_cmd)
-{
- int r;
+ if (r < 0)
+ return r;
- if (dsi->debug_read)
- DSSDBG("dsi_vc_dcs_send_read_request(ch%d, dcs_cmd %x)\n",
- channel, dcs_cmd);
+ /*
+ * TODO: we do not always have to do the BTA sync, for example
+ * we can improve performance by setting the update window
+ * information without sending BTA sync between the commands.
+ * In that case we can return early.
+ */
- r = dsi_vc_send_short(dsi, channel, MIPI_DSI_DCS_READ, dcs_cmd, 0);
+ r = dsi_vc_send_bta_sync(dssdev, vc);
if (r) {
- DSSERR("dsi_vc_dcs_send_read_request(ch %d, cmd 0x%02x)"
- " failed\n", channel, dcs_cmd);
+ DSSERR("bta sync failed\n");
return r;
}
- return 0;
-}
-
-static int dsi_vc_generic_send_read_request(struct dsi_data *dsi, int channel,
- u8 *reqdata, int reqlen)
-{
- u16 data;
- u8 data_type;
- int r;
-
- if (dsi->debug_read)
- DSSDBG("dsi_vc_generic_send_read_request(ch %d, reqlen %d)\n",
- channel, reqlen);
-
- if (reqlen == 0) {
- data_type = MIPI_DSI_GENERIC_READ_REQUEST_0_PARAM;
- data = 0;
- } else if (reqlen == 1) {
- data_type = MIPI_DSI_GENERIC_READ_REQUEST_1_PARAM;
- data = reqdata[0];
- } else if (reqlen == 2) {
- data_type = MIPI_DSI_GENERIC_READ_REQUEST_2_PARAM;
- data = reqdata[0] | (reqdata[1] << 8);
- } else {
- BUG();
- return -EINVAL;
- }
-
- r = dsi_vc_send_short(dsi, channel, data_type, data, 0);
- if (r) {
- DSSERR("dsi_vc_generic_send_read_request(ch %d, reqlen %d)"
- " failed\n", channel, reqlen);
- return r;
+ /* RX_FIFO_NOT_EMPTY */
+ if (REG_GET(dsi, DSI_VC_CTRL(vc), 20, 20)) {
+ DSSERR("rx fifo not empty after write, dumping data:\n");
+ dsi_vc_flush_receive_data(dsi, vc);
+ return -EIO;
}
return 0;
}
-static int dsi_vc_read_rx_fifo(struct dsi_data *dsi, int channel, u8 *buf,
+static int dsi_vc_read_rx_fifo(struct dsi_data *dsi, int vc, u8 *buf,
int buflen, enum dss_dsi_content_type type)
{
u32 val;
@@ -2869,13 +2229,13 @@ static int dsi_vc_read_rx_fifo(struct dsi_data *dsi, int channel, u8 *buf,
int r;
/* RX_FIFO_NOT_EMPTY */
- if (REG_GET(dsi, DSI_VC_CTRL(channel), 20, 20) == 0) {
+ if (REG_GET(dsi, DSI_VC_CTRL(vc), 20, 20) == 0) {
DSSERR("RX fifo empty when trying to read.\n");
r = -EIO;
goto err;
}
- val = dsi_read_reg(dsi, DSI_VC_SHORT_PACKET_HEADER(channel));
+ val = dsi_read_reg(dsi, DSI_VC_SHORT_PACKET_HEADER(vc));
if (dsi->debug_read)
DSSDBG("\theader: %08x\n", val);
dt = FLD_GET(val, 5, 0);
@@ -2939,7 +2299,7 @@ static int dsi_vc_read_rx_fifo(struct dsi_data *dsi, int channel, u8 *buf,
for (w = 0; w < len + 2;) {
int b;
val = dsi_read_reg(dsi,
- DSI_VC_SHORT_PACKET_HEADER(channel));
+ DSI_VC_SHORT_PACKET_HEADER(vc));
if (dsi->debug_read)
DSSDBG("\t\t%02x %02x %02x %02x\n",
(val >> 0) & 0xff,
@@ -2963,168 +2323,73 @@ static int dsi_vc_read_rx_fifo(struct dsi_data *dsi, int channel, u8 *buf,
}
err:
- DSSERR("dsi_vc_read_rx_fifo(ch %d type %s) failed\n", channel,
+ DSSERR("dsi_vc_read_rx_fifo(vc %d type %s) failed\n", vc,
type == DSS_DSI_CONTENT_GENERIC ? "GENERIC" : "DCS");
return r;
}
-static int dsi_vc_dcs_read(struct omap_dss_device *dssdev, int channel, u8 dcs_cmd,
- u8 *buf, int buflen)
+static int dsi_vc_dcs_read(struct omap_dss_device *dssdev, int vc,
+ const struct mipi_dsi_msg *msg)
{
struct dsi_data *dsi = to_dsi_data(dssdev);
+ u8 cmd = ((u8 *)msg->tx_buf)[0];
int r;
- r = dsi_vc_dcs_send_read_request(dsi, channel, dcs_cmd);
+ if (dsi->debug_read)
+ DSSDBG("%s(vc %d, cmd %x)\n", __func__, vc, cmd);
+
+ r = dsi_vc_send_short(dsi, vc, msg);
if (r)
goto err;
- r = dsi_vc_send_bta_sync(dssdev, channel);
+ r = dsi_vc_send_bta_sync(dssdev, vc);
if (r)
goto err;
- r = dsi_vc_read_rx_fifo(dsi, channel, buf, buflen,
+ r = dsi_vc_read_rx_fifo(dsi, vc, msg->rx_buf, msg->rx_len,
DSS_DSI_CONTENT_DCS);
if (r < 0)
goto err;
- if (r != buflen) {
+ if (r != msg->rx_len) {
r = -EIO;
goto err;
}
return 0;
err:
- DSSERR("dsi_vc_dcs_read(ch %d, cmd 0x%02x) failed\n", channel, dcs_cmd);
+ DSSERR("%s(vc %d, cmd 0x%02x) failed\n", __func__, vc, cmd);
return r;
}
-static int dsi_vc_generic_read(struct omap_dss_device *dssdev, int channel,
- u8 *reqdata, int reqlen, u8 *buf, int buflen)
+static int dsi_vc_generic_read(struct omap_dss_device *dssdev, int vc,
+ const struct mipi_dsi_msg *msg)
{
struct dsi_data *dsi = to_dsi_data(dssdev);
int r;
- r = dsi_vc_generic_send_read_request(dsi, channel, reqdata, reqlen);
+ r = dsi_vc_send_short(dsi, vc, msg);
if (r)
- return r;
+ goto err;
- r = dsi_vc_send_bta_sync(dssdev, channel);
+ r = dsi_vc_send_bta_sync(dssdev, vc);
if (r)
- return r;
+ goto err;
- r = dsi_vc_read_rx_fifo(dsi, channel, buf, buflen,
+ r = dsi_vc_read_rx_fifo(dsi, vc, msg->rx_buf, msg->rx_len,
DSS_DSI_CONTENT_GENERIC);
if (r < 0)
- return r;
-
- if (r != buflen) {
- r = -EIO;
- return r;
- }
-
- return 0;
-}
-
-static int dsi_vc_set_max_rx_packet_size(struct omap_dss_device *dssdev, int channel,
- u16 len)
-{
- struct dsi_data *dsi = to_dsi_data(dssdev);
-
- return dsi_vc_send_short(dsi, channel,
- MIPI_DSI_SET_MAXIMUM_RETURN_PACKET_SIZE, len, 0);
-}
-
-static int dsi_enter_ulps(struct dsi_data *dsi)
-{
- DECLARE_COMPLETION_ONSTACK(completion);
- int r, i;
- unsigned int mask;
-
- DSSDBG("Entering ULPS");
-
- WARN_ON(!dsi_bus_is_locked(dsi));
-
- WARN_ON(dsi->ulps_enabled);
-
- if (dsi->ulps_enabled)
- return 0;
-
- /* DDR_CLK_ALWAYS_ON */
- if (REG_GET(dsi, DSI_CLK_CTRL, 13, 13)) {
- dsi_if_enable(dsi, 0);
- REG_FLD_MOD(dsi, DSI_CLK_CTRL, 0, 13, 13);
- dsi_if_enable(dsi, 1);
- }
-
- dsi_sync_vc(dsi, 0);
- dsi_sync_vc(dsi, 1);
- dsi_sync_vc(dsi, 2);
- dsi_sync_vc(dsi, 3);
-
- dsi_force_tx_stop_mode_io(dsi);
-
- dsi_vc_enable(dsi, 0, false);
- dsi_vc_enable(dsi, 1, false);
- dsi_vc_enable(dsi, 2, false);
- dsi_vc_enable(dsi, 3, false);
-
- if (REG_GET(dsi, DSI_COMPLEXIO_CFG2, 16, 16)) { /* HS_BUSY */
- DSSERR("HS busy when enabling ULPS\n");
- return -EIO;
- }
-
- if (REG_GET(dsi, DSI_COMPLEXIO_CFG2, 17, 17)) { /* LP_BUSY */
- DSSERR("LP busy when enabling ULPS\n");
- return -EIO;
- }
-
- r = dsi_register_isr_cio(dsi, dsi_completion_handler, &completion,
- DSI_CIO_IRQ_ULPSACTIVENOT_ALL0);
- if (r)
- return r;
-
- mask = 0;
-
- for (i = 0; i < dsi->num_lanes_supported; ++i) {
- if (dsi->lanes[i].function == DSI_LANE_UNUSED)
- continue;
- mask |= 1 << i;
- }
- /* Assert TxRequestEsc for data lanes and TxUlpsClk for clk lane */
- /* LANEx_ULPS_SIG2 */
- REG_FLD_MOD(dsi, DSI_COMPLEXIO_CFG2, mask, 9, 5);
-
- /* flush posted write and wait for SCP interface to finish the write */
- dsi_read_reg(dsi, DSI_COMPLEXIO_CFG2);
+ goto err;
- if (wait_for_completion_timeout(&completion,
- msecs_to_jiffies(1000)) == 0) {
- DSSERR("ULPS enable timeout\n");
+ if (r != msg->rx_len) {
r = -EIO;
goto err;
}
- dsi_unregister_isr_cio(dsi, dsi_completion_handler, &completion,
- DSI_CIO_IRQ_ULPSACTIVENOT_ALL0);
-
- /* Reset LANEx_ULPS_SIG2 */
- REG_FLD_MOD(dsi, DSI_COMPLEXIO_CFG2, 0, 9, 5);
-
- /* flush posted write and wait for SCP interface to finish the write */
- dsi_read_reg(dsi, DSI_COMPLEXIO_CFG2);
-
- dsi_cio_power(dsi, DSI_COMPLEXIO_POWER_ULPS);
-
- dsi_if_enable(dsi, false);
-
- dsi->ulps_enabled = true;
-
return 0;
-
err:
- dsi_unregister_isr_cio(dsi, dsi_completion_handler, &completion,
- DSI_CIO_IRQ_ULPSACTIVENOT_ALL0);
+ DSSERR("%s(vc %d, reqlen %d) failed\n", __func__, vc, msg->tx_len);
return r;
}
@@ -3241,7 +2506,7 @@ static void dsi_config_vp_num_line_buffers(struct dsi_data *dsi)
int num_line_buffers;
if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) {
- int bpp = dsi_get_pixel_size(dsi->pix_fmt);
+ int bpp = mipi_dsi_pixel_format_to_bpp(dsi->pix_fmt);
const struct videomode *vm = &dsi->vm;
/*
* Don't use line buffers if width is greater than the video
@@ -3372,7 +2637,7 @@ static void dsi_config_cmd_mode_interleaving(struct dsi_data *dsi)
int tclk_trail, ths_exit, exiths_clk;
bool ddr_alwon;
const struct videomode *vm = &dsi->vm;
- int bpp = dsi_get_pixel_size(dsi->pix_fmt);
+ int bpp = mipi_dsi_pixel_format_to_bpp(dsi->pix_fmt);
int ndl = dsi->num_lanes_used - 1;
int dsi_fclk_hsdiv = dsi->user_dsi_cinfo.mX[HSDIV_DSI] + 1;
int hsa_interleave_hs = 0, hsa_interleave_lp = 0;
@@ -3500,7 +2765,7 @@ static int dsi_proto_config(struct dsi_data *dsi)
dsi_set_lp_rx_timeout(dsi, 0x1fff, true, true);
dsi_set_hs_tx_timeout(dsi, 0x1fff, true, true);
- switch (dsi_get_pixel_size(dsi->pix_fmt)) {
+ switch (mipi_dsi_pixel_format_to_bpp(dsi->pix_fmt)) {
case 16:
buswidth = 0;
break;
@@ -3621,7 +2886,7 @@ static void dsi_proto_timings(struct dsi_data *dsi)
int window_sync = dsi->vm_timings.window_sync;
bool hsync_end;
const struct videomode *vm = &dsi->vm;
- int bpp = dsi_get_pixel_size(dsi->pix_fmt);
+ int bpp = mipi_dsi_pixel_format_to_bpp(dsi->pix_fmt);
int tl, t_he, width_bytes;
hsync_end = dsi->vm_timings.trans_mode == OMAP_DSS_DSI_PULSE_MODE;
@@ -3659,12 +2924,9 @@ static void dsi_proto_timings(struct dsi_data *dsi)
}
}
-static int dsi_configure_pins(struct omap_dss_device *dssdev,
- const struct omap_dsi_pin_config *pin_cfg)
+static int dsi_configure_pins(struct dsi_data *dsi,
+ int num_pins, const u32 *pins)
{
- struct dsi_data *dsi = to_dsi_data(dssdev);
- int num_pins;
- const int *pins;
struct dsi_lane_config lanes[DSI_MAX_NR_LANES];
int num_lanes;
int i;
@@ -3677,9 +2939,6 @@ static int dsi_configure_pins(struct omap_dss_device *dssdev,
DSI_LANE_DATA4,
};
- num_pins = pin_cfg->num_pins;
- pins = pin_cfg->pins;
-
if (num_pins < 4 || num_pins > dsi->num_lanes_supported * 2
|| num_pins % 2 != 0)
return -EINVAL;
@@ -3691,15 +2950,15 @@ static int dsi_configure_pins(struct omap_dss_device *dssdev,
for (i = 0; i < num_pins; i += 2) {
u8 lane, pol;
- int dx, dy;
+ u32 dx, dy;
dx = pins[i];
dy = pins[i + 1];
- if (dx < 0 || dx >= dsi->num_lanes_supported * 2)
+ if (dx >= dsi->num_lanes_supported * 2)
return -EINVAL;
- if (dy < 0 || dy >= dsi->num_lanes_supported * 2)
+ if (dy >= dsi->num_lanes_supported * 2)
return -EINVAL;
if (dx & 1) {
@@ -3725,86 +2984,102 @@ static int dsi_configure_pins(struct omap_dss_device *dssdev,
return 0;
}
-static int dsi_enable_video_output(struct omap_dss_device *dssdev, int channel)
+static int dsi_enable_video_mode(struct dsi_data *dsi, int vc)
{
- struct dsi_data *dsi = to_dsi_data(dssdev);
- int bpp = dsi_get_pixel_size(dsi->pix_fmt);
+ int bpp = mipi_dsi_pixel_format_to_bpp(dsi->pix_fmt);
u8 data_type;
u16 word_count;
- int r;
- r = dsi_display_init_dispc(dsi);
- if (r)
- return r;
+ switch (dsi->pix_fmt) {
+ case MIPI_DSI_FMT_RGB888:
+ data_type = MIPI_DSI_PACKED_PIXEL_STREAM_24;
+ break;
+ case MIPI_DSI_FMT_RGB666:
+ data_type = MIPI_DSI_PIXEL_STREAM_3BYTE_18;
+ break;
+ case MIPI_DSI_FMT_RGB666_PACKED:
+ data_type = MIPI_DSI_PACKED_PIXEL_STREAM_18;
+ break;
+ case MIPI_DSI_FMT_RGB565:
+ data_type = MIPI_DSI_PACKED_PIXEL_STREAM_16;
+ break;
+ default:
+ return -EINVAL;
+ }
- if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) {
- switch (dsi->pix_fmt) {
- case OMAP_DSS_DSI_FMT_RGB888:
- data_type = MIPI_DSI_PACKED_PIXEL_STREAM_24;
- break;
- case OMAP_DSS_DSI_FMT_RGB666:
- data_type = MIPI_DSI_PIXEL_STREAM_3BYTE_18;
- break;
- case OMAP_DSS_DSI_FMT_RGB666_PACKED:
- data_type = MIPI_DSI_PACKED_PIXEL_STREAM_18;
- break;
- case OMAP_DSS_DSI_FMT_RGB565:
- data_type = MIPI_DSI_PACKED_PIXEL_STREAM_16;
- break;
- default:
- r = -EINVAL;
- goto err_pix_fmt;
- }
+ dsi_if_enable(dsi, false);
+ dsi_vc_enable(dsi, vc, false);
- dsi_if_enable(dsi, false);
- dsi_vc_enable(dsi, channel, false);
+ /* MODE, 1 = video mode */
+ REG_FLD_MOD(dsi, DSI_VC_CTRL(vc), 1, 4, 4);
- /* MODE, 1 = video mode */
- REG_FLD_MOD(dsi, DSI_VC_CTRL(channel), 1, 4, 4);
+ word_count = DIV_ROUND_UP(dsi->vm.hactive * bpp, 8);
- word_count = DIV_ROUND_UP(dsi->vm.hactive * bpp, 8);
+ dsi_vc_write_long_header(dsi, vc, dsi->dsidev->channel, data_type,
+ word_count, 0);
- dsi_vc_write_long_header(dsi, channel, data_type,
- word_count, 0);
+ dsi_vc_enable(dsi, vc, true);
+ dsi_if_enable(dsi, true);
- dsi_vc_enable(dsi, channel, true);
- dsi_if_enable(dsi, true);
+ return 0;
+}
+
+static void dsi_disable_video_mode(struct dsi_data *dsi, int vc)
+{
+ dsi_if_enable(dsi, false);
+ dsi_vc_enable(dsi, vc, false);
+
+ /* MODE, 0 = command mode */
+ REG_FLD_MOD(dsi, DSI_VC_CTRL(vc), 0, 4, 4);
+
+ dsi_vc_enable(dsi, vc, true);
+ dsi_if_enable(dsi, true);
+}
+
+static void dsi_enable_video_output(struct omap_dss_device *dssdev, int vc)
+{
+ struct dsi_data *dsi = to_dsi_data(dssdev);
+ int r;
+
+ r = dsi_init_dispc(dsi);
+ if (r) {
+ dev_err(dsi->dev, "failed to init dispc!\n");
+ return;
+ }
+
+ if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) {
+ r = dsi_enable_video_mode(dsi, vc);
+ if (r)
+ goto err_video_mode;
}
r = dss_mgr_enable(&dsi->output);
if (r)
goto err_mgr_enable;
- return 0;
+ return;
err_mgr_enable:
if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) {
dsi_if_enable(dsi, false);
- dsi_vc_enable(dsi, channel, false);
+ dsi_vc_enable(dsi, vc, false);
}
-err_pix_fmt:
- dsi_display_uninit_dispc(dsi);
- return r;
+err_video_mode:
+ dsi_uninit_dispc(dsi);
+ dev_err(dsi->dev, "failed to enable DSI encoder!\n");
+ return;
}
-static void dsi_disable_video_output(struct omap_dss_device *dssdev, int channel)
+static void dsi_disable_video_output(struct omap_dss_device *dssdev, int vc)
{
struct dsi_data *dsi = to_dsi_data(dssdev);
- if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) {
- dsi_if_enable(dsi, false);
- dsi_vc_enable(dsi, channel, false);
-
- /* MODE, 0 = command mode */
- REG_FLD_MOD(dsi, DSI_VC_CTRL(channel), 0, 4, 4);
-
- dsi_vc_enable(dsi, channel, true);
- dsi_if_enable(dsi, true);
- }
+ if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE)
+ dsi_disable_video_mode(dsi, vc);
dss_mgr_disable(&dsi->output);
- dsi_display_uninit_dispc(dsi);
+ dsi_uninit_dispc(dsi);
}
static void dsi_update_screen_dispc(struct dsi_data *dsi)
@@ -3817,16 +3092,14 @@ static void dsi_update_screen_dispc(struct dsi_data *dsi)
unsigned int packet_len;
u32 l;
int r;
- const unsigned channel = dsi->update_channel;
+ const unsigned vc = dsi->update_vc;
const unsigned int line_buf_size = dsi->line_buffer_size;
u16 w = dsi->vm.hactive;
u16 h = dsi->vm.vactive;
DSSDBG("dsi_update_screen_dispc(%dx%d)\n", w, h);
- dsi_vc_config_source(dsi, channel, DSI_VC_SOURCE_VP);
-
- bytespp = dsi_get_pixel_size(dsi->pix_fmt) / 8;
+ bytespp = mipi_dsi_pixel_format_to_bpp(dsi->pix_fmt) / 8;
bytespl = w * bytespp;
bytespf = bytespl * h;
@@ -3845,16 +3118,16 @@ static void dsi_update_screen_dispc(struct dsi_data *dsi)
total_len += (bytespf % packet_payload) + 1;
l = FLD_VAL(total_len, 23, 0); /* TE_SIZE */
- dsi_write_reg(dsi, DSI_VC_TE(channel), l);
+ dsi_write_reg(dsi, DSI_VC_TE(vc), l);
- dsi_vc_write_long_header(dsi, channel, MIPI_DSI_DCS_LONG_WRITE,
+ dsi_vc_write_long_header(dsi, vc, dsi->dsidev->channel, MIPI_DSI_DCS_LONG_WRITE,
packet_len, 0);
if (dsi->te_enabled)
l = FLD_MOD(l, 1, 30, 30); /* TE_EN */
else
l = FLD_MOD(l, 1, 31, 31); /* TE_START */
- dsi_write_reg(dsi, DSI_VC_TE(channel), l);
+ dsi_write_reg(dsi, DSI_VC_TE(vc), l);
/* We put SIDLEMODE to no-idle for the duration of the transfer,
* because DSS interrupts are not capable of waking up the CPU and the
@@ -3877,7 +3150,7 @@ static void dsi_update_screen_dispc(struct dsi_data *dsi)
* for TE is longer than the timer allows */
REG_FLD_MOD(dsi, DSI_TIMING2, 0, 15, 15); /* LP_RX_TO */
- dsi_vc_send_bta(dsi, channel);
+ dsi_vc_send_bta(dsi, vc);
#ifdef DSI_CATCH_MISSING_TE
mod_timer(&dsi->te_timer, jiffies + msecs_to_jiffies(250));
@@ -3902,7 +3175,7 @@ static void dsi_handle_framedone(struct dsi_data *dsi, int error)
REG_FLD_MOD(dsi, DSI_TIMING2, 1, 15, 15); /* LP_RX_TO */
}
- dsi->framedone_callback(error, dsi->framedone_data);
+ dsi_bus_unlock(dsi);
if (!error)
dsi_perf_show(dsi, "DISPC");
@@ -3935,30 +3208,91 @@ static void dsi_framedone_irq_callback(void *data)
cancel_delayed_work(&dsi->framedone_timeout_work);
+ DSSDBG("Framedone received!\n");
+
dsi_handle_framedone(dsi, 0);
}
-static int dsi_update(struct omap_dss_device *dssdev, int channel,
- void (*callback)(int, void *), void *data)
+static int _dsi_update(struct dsi_data *dsi)
{
- struct dsi_data *dsi = to_dsi_data(dssdev);
-
dsi_perf_mark_setup(dsi);
- dsi->update_channel = channel;
-
- dsi->framedone_callback = callback;
- dsi->framedone_data = data;
-
#ifdef DSI_PERF_MEASURE
dsi->update_bytes = dsi->vm.hactive * dsi->vm.vactive *
- dsi_get_pixel_size(dsi->pix_fmt) / 8;
+ mipi_dsi_pixel_format_to_bpp(dsi->pix_fmt) / 8;
#endif
dsi_update_screen_dispc(dsi);
return 0;
}
+static int _dsi_send_nop(struct dsi_data *dsi, int vc, int channel)
+{
+ const u8 payload[] = { MIPI_DCS_NOP };
+ const struct mipi_dsi_msg msg = {
+ .channel = channel,
+ .type = MIPI_DSI_DCS_SHORT_WRITE,
+ .tx_len = 1,
+ .tx_buf = payload,
+ };
+
+ WARN_ON(!dsi_bus_is_locked(dsi));
+
+ return _omap_dsi_host_transfer(dsi, vc, &msg);
+}
+
+static int dsi_update_channel(struct omap_dss_device *dssdev, int vc)
+{
+ struct dsi_data *dsi = to_dsi_data(dssdev);
+ int r;
+
+ dsi_bus_lock(dsi);
+
+ if (!dsi->video_enabled) {
+ r = -EIO;
+ goto err;
+ }
+
+ if (dsi->vm.hactive == 0 || dsi->vm.vactive == 0) {
+ r = -EINVAL;
+ goto err;
+ }
+
+ DSSDBG("dsi_update_channel: %d", vc);
+
+ /*
+ * Send NOP between the frames. If we don't send something here, the
+ * updates stop working. This is probably related to DSI spec stating
+ * that the DSI host should transition to LP at least once per frame.
+ */
+ r = _dsi_send_nop(dsi, VC_CMD, dsi->dsidev->channel);
+ if (r < 0) {
+ DSSWARN("failed to send nop between frames: %d\n", r);
+ goto err;
+ }
+
+ dsi->update_vc = vc;
+
+ if (dsi->te_enabled && dsi->te_gpio) {
+ schedule_delayed_work(&dsi->te_timeout_work,
+ msecs_to_jiffies(250));
+ atomic_set(&dsi->do_ext_te_update, 1);
+ } else {
+ _dsi_update(dsi);
+ }
+
+ return 0;
+
+err:
+ dsi_bus_unlock(dsi);
+ return r;
+}
+
+static int dsi_update_all(struct omap_dss_device *dssdev)
+{
+ return dsi_update_channel(dssdev, VC_VIDEO);
+}
+
/* Display funcs */
static int dsi_configure_dispc_clocks(struct dsi_data *dsi)
@@ -3983,12 +3317,12 @@ static int dsi_configure_dispc_clocks(struct dsi_data *dsi)
return 0;
}
-static int dsi_display_init_dispc(struct dsi_data *dsi)
+static int dsi_init_dispc(struct dsi_data *dsi)
{
- enum omap_channel channel = dsi->output.dispc_channel;
+ enum omap_channel dispc_channel = dsi->output.dispc_channel;
int r;
- dss_select_lcd_clk_source(dsi->dss, channel, dsi->module_id == 0 ?
+ dss_select_lcd_clk_source(dsi->dss, dispc_channel, dsi->module_id == 0 ?
DSS_CLK_SRC_PLL1_1 :
DSS_CLK_SRC_PLL2_1);
@@ -4013,7 +3347,7 @@ static int dsi_display_init_dispc(struct dsi_data *dsi)
dsi->mgr_config.io_pad_mode = DSS_IO_PAD_MODE_BYPASS;
dsi->mgr_config.video_port_width =
- dsi_get_pixel_size(dsi->pix_fmt);
+ mipi_dsi_pixel_format_to_bpp(dsi->pix_fmt);
dsi->mgr_config.lcden_sig_polarity = 0;
dss_mgr_set_lcd_config(&dsi->output, &dsi->mgr_config);
@@ -4024,19 +3358,19 @@ err1:
dss_mgr_unregister_framedone_handler(&dsi->output,
dsi_framedone_irq_callback, dsi);
err:
- dss_select_lcd_clk_source(dsi->dss, channel, DSS_CLK_SRC_FCK);
+ dss_select_lcd_clk_source(dsi->dss, dispc_channel, DSS_CLK_SRC_FCK);
return r;
}
-static void dsi_display_uninit_dispc(struct dsi_data *dsi)
+static void dsi_uninit_dispc(struct dsi_data *dsi)
{
- enum omap_channel channel = dsi->output.dispc_channel;
+ enum omap_channel dispc_channel = dsi->output.dispc_channel;
if (dsi->mode == OMAP_DSS_DSI_CMD_MODE)
dss_mgr_unregister_framedone_handler(&dsi->output,
dsi_framedone_irq_callback, dsi);
- dss_select_lcd_clk_source(dsi->dss, channel, DSS_CLK_SRC_FCK);
+ dss_select_lcd_clk_source(dsi->dss, dispc_channel, DSS_CLK_SRC_FCK);
}
static int dsi_configure_dsi_clocks(struct dsi_data *dsi)
@@ -4055,7 +3389,37 @@ static int dsi_configure_dsi_clocks(struct dsi_data *dsi)
return 0;
}
-static int dsi_display_init_dsi(struct dsi_data *dsi)
+static void dsi_setup_dsi_vcs(struct dsi_data *dsi)
+{
+ /* Setup VC_CMD for LP and cpu transfers */
+ REG_FLD_MOD(dsi, DSI_VC_CTRL(VC_CMD), 0, 9, 9); /* LP */
+
+ REG_FLD_MOD(dsi, DSI_VC_CTRL(VC_CMD), 0, 1, 1); /* SOURCE_L4 */
+ dsi->vc[VC_CMD].source = DSI_VC_SOURCE_L4;
+
+ /* Setup VC_VIDEO for HS and dispc transfers */
+ REG_FLD_MOD(dsi, DSI_VC_CTRL(VC_VIDEO), 1, 9, 9); /* HS */
+
+ REG_FLD_MOD(dsi, DSI_VC_CTRL(VC_VIDEO), 1, 1, 1); /* SOURCE_VP */
+ dsi->vc[VC_VIDEO].source = DSI_VC_SOURCE_VP;
+
+ if ((dsi->data->quirks & DSI_QUIRK_DCS_CMD_CONFIG_VC) &&
+ !(dsi->dsidev->mode_flags & MIPI_DSI_MODE_VIDEO))
+ REG_FLD_MOD(dsi, DSI_VC_CTRL(VC_VIDEO), 1, 30, 30); /* DCS_CMD_ENABLE */
+
+ dsi_vc_enable(dsi, VC_CMD, 1);
+ dsi_vc_enable(dsi, VC_VIDEO, 1);
+
+ dsi_if_enable(dsi, 1);
+
+ dsi_force_tx_stop_mode_io(dsi);
+
+ /* start the DDR clock by sending a NULL packet */
+ if (!(dsi->dsidev->mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS))
+ dsi_vc_send_null(dsi, VC_CMD, dsi->dsidev->channel);
+}
+
+static int dsi_init_dsi(struct dsi_data *dsi)
{
int r;
@@ -4097,13 +3461,7 @@ static int dsi_display_init_dsi(struct dsi_data *dsi)
if (r)
goto err3;
- /* enable interface */
- dsi_vc_enable(dsi, 0, 1);
- dsi_vc_enable(dsi, 1, 1);
- dsi_vc_enable(dsi, 2, 1);
- dsi_vc_enable(dsi, 3, 1);
- dsi_if_enable(dsi, 1);
- dsi_force_tx_stop_mode_io(dsi);
+ dsi_setup_dsi_vcs(dsi);
return 0;
err3:
@@ -4119,12 +3477,8 @@ err0:
return r;
}
-static void dsi_display_uninit_dsi(struct dsi_data *dsi, bool disconnect_lanes,
- bool enter_ulps)
+static void dsi_uninit_dsi(struct dsi_data *dsi)
{
- if (enter_ulps && !dsi->ulps_enabled)
- dsi_enter_ulps(dsi);
-
/* disable interface */
dsi_if_enable(dsi, 0);
dsi_vc_enable(dsi, 0, 0);
@@ -4136,21 +3490,19 @@ static void dsi_display_uninit_dsi(struct dsi_data *dsi, bool disconnect_lanes,
dsi_cio_uninit(dsi);
dss_pll_disable(&dsi->pll);
- if (disconnect_lanes) {
- regulator_disable(dsi->vdds_dsi_reg);
- dsi->vdds_dsi_enabled = false;
- }
+ regulator_disable(dsi->vdds_dsi_reg);
+ dsi->vdds_dsi_enabled = false;
}
-static void dsi_display_enable(struct omap_dss_device *dssdev)
+static void dsi_enable(struct dsi_data *dsi)
{
- struct dsi_data *dsi = to_dsi_data(dssdev);
int r;
- DSSDBG("dsi_display_enable\n");
-
WARN_ON(!dsi_bus_is_locked(dsi));
+ if (WARN_ON(dsi->iface_enabled))
+ return;
+
mutex_lock(&dsi->lock);
r = dsi_runtime_get(dsi);
@@ -4159,10 +3511,12 @@ static void dsi_display_enable(struct omap_dss_device *dssdev)
_dsi_initialize_irq(dsi);
- r = dsi_display_init_dsi(dsi);
+ r = dsi_init_dsi(dsi);
if (r)
goto err_init_dsi;
+ dsi->iface_enabled = true;
+
mutex_unlock(&dsi->lock);
return;
@@ -4171,18 +3525,16 @@ err_init_dsi:
dsi_runtime_put(dsi);
err_get_dsi:
mutex_unlock(&dsi->lock);
- DSSDBG("dsi_display_enable FAILED\n");
+ DSSDBG("dsi_enable FAILED\n");
}
-static void dsi_display_disable(struct omap_dss_device *dssdev,
- bool disconnect_lanes, bool enter_ulps)
+static void dsi_disable(struct dsi_data *dsi)
{
- struct dsi_data *dsi = to_dsi_data(dssdev);
-
- DSSDBG("dsi_display_disable\n");
-
WARN_ON(!dsi_bus_is_locked(dsi));
+ if (WARN_ON(!dsi->iface_enabled))
+ return;
+
mutex_lock(&dsi->lock);
dsi_sync_vc(dsi, 0);
@@ -4190,18 +3542,26 @@ static void dsi_display_disable(struct omap_dss_device *dssdev,
dsi_sync_vc(dsi, 2);
dsi_sync_vc(dsi, 3);
- dsi_display_uninit_dsi(dsi, disconnect_lanes, enter_ulps);
+ dsi_uninit_dsi(dsi);
dsi_runtime_put(dsi);
+ dsi->iface_enabled = false;
+
mutex_unlock(&dsi->lock);
}
-static int dsi_enable_te(struct omap_dss_device *dssdev, bool enable)
+static int dsi_enable_te(struct dsi_data *dsi, bool enable)
{
- struct dsi_data *dsi = to_dsi_data(dssdev);
-
dsi->te_enabled = enable;
+
+ if (dsi->te_gpio) {
+ if (enable)
+ enable_irq(dsi->te_irq);
+ else
+ disable_irq(dsi->te_irq);
+ }
+
return 0;
}
@@ -4351,7 +3711,7 @@ static bool dsi_cm_calc(struct dsi_data *dsi,
unsigned long pck, txbyteclk;
clkin = clk_get_rate(dsi->pll.clkin);
- bitspp = dsi_get_pixel_size(cfg->pixel_format);
+ bitspp = mipi_dsi_pixel_format_to_bpp(cfg->pixel_format);
ndl = dsi->num_lanes_used - 1;
/*
@@ -4384,7 +3744,7 @@ static bool dsi_vm_calc_blanking(struct dsi_clk_calc_ctx *ctx)
{
struct dsi_data *dsi = ctx->dsi;
const struct omap_dss_dsi_config *cfg = ctx->config;
- int bitspp = dsi_get_pixel_size(cfg->pixel_format);
+ int bitspp = mipi_dsi_pixel_format_to_bpp(cfg->pixel_format);
int ndl = dsi->num_lanes_used - 1;
unsigned long hsclk = ctx->dsi_cinfo.clkdco / 4;
unsigned long byteclk = hsclk / 4;
@@ -4531,7 +3891,6 @@ static bool dsi_vm_calc_blanking(struct dsi_clk_calc_ctx *ctx)
dsi_vm->hfp_blanking_mode = 1;
dsi_vm->hbp_blanking_mode = 1;
- dsi_vm->ddr_clk_always_on = cfg->ddr_clk_always_on;
dsi_vm->window_sync = 4;
/* setup DISPC videomode */
@@ -4651,7 +4010,7 @@ static bool dsi_vm_calc(struct dsi_data *dsi,
unsigned long pll_min;
unsigned long pll_max;
int ndl = dsi->num_lanes_used - 1;
- int bitspp = dsi_get_pixel_size(cfg->pixel_format);
+ int bitspp = mipi_dsi_pixel_format_to_bpp(cfg->pixel_format);
unsigned long byteclk_min;
clkin = clk_get_rate(dsi->pll.clkin);
@@ -4684,39 +4043,62 @@ static bool dsi_vm_calc(struct dsi_data *dsi,
dsi_vm_calc_pll_cb, ctx);
}
-static int dsi_set_config(struct omap_dss_device *dssdev,
- const struct omap_dss_dsi_config *config)
+static bool dsi_is_video_mode(struct omap_dss_device *dssdev)
{
struct dsi_data *dsi = to_dsi_data(dssdev);
- struct dsi_clk_calc_ctx ctx;
+
+ return dsi->mode == OMAP_DSS_DSI_VIDEO_MODE;
+}
+
+static int __dsi_calc_config(struct dsi_data *dsi,
+ const struct drm_display_mode *mode,
+ struct dsi_clk_calc_ctx *ctx)
+{
+ struct omap_dss_dsi_config cfg = dsi->config;
+ struct videomode vm;
bool ok;
int r;
- mutex_lock(&dsi->lock);
+ drm_display_mode_to_videomode(mode, &vm);
- dsi->pix_fmt = config->pixel_format;
- dsi->mode = config->mode;
+ cfg.vm = &vm;
+ cfg.mode = dsi->mode;
+ cfg.pixel_format = dsi->pix_fmt;
- if (config->mode == OMAP_DSS_DSI_VIDEO_MODE)
- ok = dsi_vm_calc(dsi, config, &ctx);
+ if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE)
+ ok = dsi_vm_calc(dsi, &cfg, ctx);
else
- ok = dsi_cm_calc(dsi, config, &ctx);
+ ok = dsi_cm_calc(dsi, &cfg, ctx);
- if (!ok) {
- DSSERR("failed to find suitable DSI clock settings\n");
- r = -EINVAL;
- goto err;
- }
+ if (!ok)
+ return -EINVAL;
+
+ dsi_pll_calc_dsi_fck(dsi, &ctx->dsi_cinfo);
+
+ r = dsi_lp_clock_calc(ctx->dsi_cinfo.clkout[HSDIV_DSI],
+ cfg.lp_clk_min, cfg.lp_clk_max, &ctx->lp_cinfo);
+ if (r)
+ return r;
+
+ return 0;
+}
- dsi_pll_calc_dsi_fck(dsi, &ctx.dsi_cinfo);
+static int dsi_set_config(struct omap_dss_device *dssdev,
+ const struct drm_display_mode *mode)
+{
+ struct dsi_data *dsi = to_dsi_data(dssdev);
+ struct dsi_clk_calc_ctx ctx;
+ int r;
- r = dsi_lp_clock_calc(ctx.dsi_cinfo.clkout[HSDIV_DSI],
- config->lp_clk_min, config->lp_clk_max, &dsi->user_lp_cinfo);
+ mutex_lock(&dsi->lock);
+
+ r = __dsi_calc_config(dsi, mode, &ctx);
if (r) {
- DSSERR("failed to find suitable DSI LP clock settings\n");
+ DSSERR("failed to find suitable DSI clock settings\n");
goto err;
}
+ dsi->user_lp_cinfo = ctx.lp_cinfo;
dsi->user_dsi_cinfo = ctx.dsi_cinfo;
dsi->user_dispc_cinfo = ctx.dispc_cinfo;
@@ -4757,12 +4139,12 @@ err:
}
/*
- * Return a hardcoded channel for the DSI output. This should work for
+ * Return a hardcoded dispc channel for the DSI output. This should work for
* current use cases, but this can be later expanded to either resolve
* the channel in some more dynamic manner, or get the channel as a user
* parameter.
*/
-static enum omap_channel dsi_get_channel(struct dsi_data *dsi)
+static enum omap_channel dsi_get_dispc_channel(struct dsi_data *dsi)
{
switch (dsi->data->model) {
case DSI_MODEL_OMAP3:
@@ -4796,59 +4178,75 @@ static enum omap_channel dsi_get_channel(struct dsi_data *dsi)
}
}
-static int dsi_request_vc(struct omap_dss_device *dssdev, int *channel)
+static ssize_t _omap_dsi_host_transfer(struct dsi_data *dsi, int vc,
+ const struct mipi_dsi_msg *msg)
{
- struct dsi_data *dsi = to_dsi_data(dssdev);
- int i;
+ struct omap_dss_device *dssdev = &dsi->output;
+ int r;
- for (i = 0; i < ARRAY_SIZE(dsi->vc); i++) {
- if (!dsi->vc[i].dssdev) {
- dsi->vc[i].dssdev = dssdev;
- *channel = i;
- return 0;
- }
+ dsi_vc_enable_hs(dssdev, vc, !(msg->flags & MIPI_DSI_MSG_USE_LPM));
+
+ switch (msg->type) {
+ case MIPI_DSI_GENERIC_SHORT_WRITE_0_PARAM:
+ case MIPI_DSI_GENERIC_SHORT_WRITE_1_PARAM:
+ case MIPI_DSI_GENERIC_SHORT_WRITE_2_PARAM:
+ case MIPI_DSI_GENERIC_LONG_WRITE:
+ case MIPI_DSI_DCS_SHORT_WRITE:
+ case MIPI_DSI_DCS_SHORT_WRITE_PARAM:
+ case MIPI_DSI_DCS_LONG_WRITE:
+ case MIPI_DSI_SET_MAXIMUM_RETURN_PACKET_SIZE:
+ case MIPI_DSI_NULL_PACKET:
+ r = dsi_vc_write_common(dssdev, vc, msg);
+ break;
+ case MIPI_DSI_GENERIC_READ_REQUEST_0_PARAM:
+ case MIPI_DSI_GENERIC_READ_REQUEST_1_PARAM:
+ case MIPI_DSI_GENERIC_READ_REQUEST_2_PARAM:
+ r = dsi_vc_generic_read(dssdev, vc, msg);
+ break;
+ case MIPI_DSI_DCS_READ:
+ r = dsi_vc_dcs_read(dssdev, vc, msg);
+ break;
+ default:
+ r = -EINVAL;
+ break;
}
- DSSERR("cannot get VC for display %s", dssdev->name);
- return -ENOSPC;
-}
-
-static int dsi_set_vc_id(struct omap_dss_device *dssdev, int channel, int vc_id)
-{
- struct dsi_data *dsi = to_dsi_data(dssdev);
-
- if (vc_id < 0 || vc_id > 3) {
- DSSERR("VC ID out of range\n");
- return -EINVAL;
- }
+ if (r < 0)
+ return r;
- if (channel < 0 || channel > 3) {
- DSSERR("Virtual Channel out of range\n");
- return -EINVAL;
- }
+ if (msg->type == MIPI_DSI_DCS_SHORT_WRITE ||
+ msg->type == MIPI_DSI_DCS_SHORT_WRITE_PARAM) {
+ u8 cmd = ((u8 *)msg->tx_buf)[0];
- if (dsi->vc[channel].dssdev != dssdev) {
- DSSERR("Virtual Channel not allocated to display %s\n",
- dssdev->name);
- return -EINVAL;
+ if (cmd == MIPI_DCS_SET_TEAR_OFF)
+ dsi_enable_te(dsi, false);
+ else if (cmd == MIPI_DCS_SET_TEAR_ON)
+ dsi_enable_te(dsi, true);
}
- dsi->vc[channel].vc_id = vc_id;
-
return 0;
}
-static void dsi_release_vc(struct omap_dss_device *dssdev, int channel)
+static ssize_t omap_dsi_host_transfer(struct mipi_dsi_host *host,
+ const struct mipi_dsi_msg *msg)
{
- struct dsi_data *dsi = to_dsi_data(dssdev);
+ struct dsi_data *dsi = host_to_omap(host);
+ int r;
+ int vc = VC_CMD;
- if ((channel >= 0 && channel <= 3) &&
- dsi->vc[channel].dssdev == dssdev) {
- dsi->vc[channel].dssdev = NULL;
- dsi->vc[channel].vc_id = 0;
+ dsi_bus_lock(dsi);
+
+ if (!dsi->iface_enabled) {
+ dsi_enable(dsi);
+ schedule_delayed_work(&dsi->dsi_disable_work, msecs_to_jiffies(2000));
}
-}
+ r = _omap_dsi_host_transfer(dsi, vc, msg);
+
+ dsi_bus_unlock(dsi);
+
+ return r;
+}
static int dsi_get_clocks(struct dsi_data *dsi)
{
@@ -4865,57 +4263,167 @@ static int dsi_get_clocks(struct dsi_data *dsi)
return 0;
}
-static int dsi_connect(struct omap_dss_device *src,
- struct omap_dss_device *dst)
+static const struct omapdss_dsi_ops dsi_ops = {
+ .update = dsi_update_all,
+ .is_video_mode = dsi_is_video_mode,
+};
+
+static irqreturn_t omap_dsi_te_irq_handler(int irq, void *dev_id)
{
- return omapdss_device_connect(dst->dss, dst, dst->next);
+ struct dsi_data *dsi = (struct dsi_data *)dev_id;
+ int old;
+
+ old = atomic_cmpxchg(&dsi->do_ext_te_update, 1, 0);
+ if (old) {
+ cancel_delayed_work(&dsi->te_timeout_work);
+ _dsi_update(dsi);
+ }
+
+ return IRQ_HANDLED;
}
-static void dsi_disconnect(struct omap_dss_device *src,
- struct omap_dss_device *dst)
+static void omap_dsi_te_timeout_work_callback(struct work_struct *work)
{
- omapdss_device_disconnect(dst, dst->next);
+ struct dsi_data *dsi =
+ container_of(work, struct dsi_data, te_timeout_work.work);
+ int old;
+
+ old = atomic_cmpxchg(&dsi->do_ext_te_update, 1, 0);
+ if (old) {
+ dev_err(dsi->dev, "TE not received for 250ms!\n");
+ _dsi_update(dsi);
+ }
}
-static const struct omap_dss_device_ops dsi_ops = {
- .connect = dsi_connect,
- .disconnect = dsi_disconnect,
- .enable = dsi_display_enable,
+static int omap_dsi_register_te_irq(struct dsi_data *dsi,
+ struct mipi_dsi_device *client)
+{
+ int err;
+ int te_irq;
- .dsi = {
- .bus_lock = dsi_bus_lock,
- .bus_unlock = dsi_bus_unlock,
+ dsi->te_gpio = gpiod_get(&client->dev, "te-gpios", GPIOD_IN);
+ if (IS_ERR(dsi->te_gpio)) {
+ err = PTR_ERR(dsi->te_gpio);
- .disable = dsi_display_disable,
+ if (err == -ENOENT) {
+ dsi->te_gpio = NULL;
+ return 0;
+ }
- .enable_hs = dsi_vc_enable_hs,
+ dev_err(dsi->dev, "Could not get TE gpio: %d\n", err);
+ return err;
+ }
+
+ te_irq = gpiod_to_irq(dsi->te_gpio);
+ if (te_irq < 0) {
+ gpiod_put(dsi->te_gpio);
+ dsi->te_gpio = NULL;
+ return -EINVAL;
+ }
+
+ dsi->te_irq = te_irq;
- .configure_pins = dsi_configure_pins,
- .set_config = dsi_set_config,
+ irq_set_status_flags(te_irq, IRQ_NOAUTOEN);
- .enable_video_output = dsi_enable_video_output,
- .disable_video_output = dsi_disable_video_output,
+ err = request_threaded_irq(te_irq, NULL, omap_dsi_te_irq_handler,
+ IRQF_TRIGGER_RISING, "TE", dsi);
+ if (err) {
+ dev_err(dsi->dev, "request irq failed with %d\n", err);
+ gpiod_put(dsi->te_gpio);
+ dsi->te_gpio = NULL;
+ return err;
+ }
+
+ INIT_DEFERRABLE_WORK(&dsi->te_timeout_work,
+ omap_dsi_te_timeout_work_callback);
- .update = dsi_update,
+ dev_dbg(dsi->dev, "Using GPIO TE\n");
- .enable_te = dsi_enable_te,
+ return 0;
+}
- .request_vc = dsi_request_vc,
- .set_vc_id = dsi_set_vc_id,
- .release_vc = dsi_release_vc,
+static void omap_dsi_unregister_te_irq(struct dsi_data *dsi)
+{
+ if (dsi->te_gpio) {
+ free_irq(dsi->te_irq, dsi);
+ cancel_delayed_work(&dsi->te_timeout_work);
+ gpiod_put(dsi->te_gpio);
+ dsi->te_gpio = NULL;
+ }
+}
- .dcs_write = dsi_vc_dcs_write,
- .dcs_write_nosync = dsi_vc_dcs_write_nosync,
- .dcs_read = dsi_vc_dcs_read,
+static int omap_dsi_host_attach(struct mipi_dsi_host *host,
+ struct mipi_dsi_device *client)
+{
+ struct dsi_data *dsi = host_to_omap(host);
+ int r;
- .gen_write = dsi_vc_generic_write,
- .gen_write_nosync = dsi_vc_generic_write_nosync,
- .gen_read = dsi_vc_generic_read,
+ if (dsi->dsidev) {
+ DSSERR("dsi client already attached\n");
+ return -EBUSY;
+ }
- .bta_sync = dsi_vc_send_bta_sync,
+ if (mipi_dsi_pixel_format_to_bpp(client->format) < 0) {
+ DSSERR("invalid pixel format\n");
+ return -EINVAL;
+ }
- .set_max_rx_packet_size = dsi_vc_set_max_rx_packet_size,
- },
+ atomic_set(&dsi->do_ext_te_update, 0);
+
+ if (client->mode_flags & MIPI_DSI_MODE_VIDEO) {
+ dsi->mode = OMAP_DSS_DSI_VIDEO_MODE;
+ } else {
+ r = omap_dsi_register_te_irq(dsi, client);
+ if (r)
+ return r;
+
+ dsi->mode = OMAP_DSS_DSI_CMD_MODE;
+ }
+
+ dsi->dsidev = client;
+ dsi->pix_fmt = client->format;
+
+ dsi->config.hs_clk_min = 150000000; // TODO: get from client?
+ dsi->config.hs_clk_max = client->hs_rate;
+ dsi->config.lp_clk_min = 7000000; // TODO: get from client?
+ dsi->config.lp_clk_max = client->lp_rate;
+
+ if (client->mode_flags & MIPI_DSI_MODE_VIDEO_BURST)
+ dsi->config.trans_mode = OMAP_DSS_DSI_BURST_MODE;
+ else if (client->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE)
+ dsi->config.trans_mode = OMAP_DSS_DSI_PULSE_MODE;
+ else
+ dsi->config.trans_mode = OMAP_DSS_DSI_EVENT_MODE;
+
+ return 0;
+}
+
+static int omap_dsi_host_detach(struct mipi_dsi_host *host,
+ struct mipi_dsi_device *client)
+{
+ struct dsi_data *dsi = host_to_omap(host);
+
+ if (WARN_ON(dsi->dsidev != client))
+ return -EINVAL;
+
+ cancel_delayed_work_sync(&dsi->dsi_disable_work);
+
+ dsi_bus_lock(dsi);
+
+ if (dsi->iface_enabled)
+ dsi_disable(dsi);
+
+ dsi_bus_unlock(dsi);
+
+ omap_dsi_unregister_te_irq(dsi);
+ dsi->dsidev = NULL;
+ return 0;
+}
+
+static const struct mipi_dsi_host_ops omap_dsi_host_ops = {
+ .attach = omap_dsi_host_attach,
+ .detach = omap_dsi_host_detach,
+ .transfer = omap_dsi_host_transfer,
};
/* -----------------------------------------------------------------------------
@@ -5097,6 +4605,106 @@ static const struct component_ops dsi_component_ops = {
};
/* -----------------------------------------------------------------------------
+ * DRM Bridge Operations
+ */
+
+static int dsi_bridge_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
+{
+ struct dsi_data *dsi = drm_bridge_to_dsi(bridge);
+
+ if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR))
+ return -EINVAL;
+
+ return drm_bridge_attach(bridge->encoder, dsi->output.next_bridge,
+ bridge, flags);
+}
+
+static enum drm_mode_status
+dsi_bridge_mode_valid(struct drm_bridge *bridge,
+ const struct drm_display_info *info,
+ const struct drm_display_mode *mode)
+{
+ struct dsi_data *dsi = drm_bridge_to_dsi(bridge);
+ struct dsi_clk_calc_ctx ctx;
+ int r;
+
+ mutex_lock(&dsi->lock);
+ r = __dsi_calc_config(dsi, mode, &ctx);
+ mutex_unlock(&dsi->lock);
+
+ return r ? MODE_CLOCK_RANGE : MODE_OK;
+}
+
+static void dsi_bridge_mode_set(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ const struct drm_display_mode *adjusted_mode)
+{
+ struct dsi_data *dsi = drm_bridge_to_dsi(bridge);
+
+ dsi_set_config(&dsi->output, adjusted_mode);
+}
+
+static void dsi_bridge_enable(struct drm_bridge *bridge)
+{
+ struct dsi_data *dsi = drm_bridge_to_dsi(bridge);
+ struct omap_dss_device *dssdev = &dsi->output;
+
+ cancel_delayed_work_sync(&dsi->dsi_disable_work);
+
+ dsi_bus_lock(dsi);
+
+ if (!dsi->iface_enabled)
+ dsi_enable(dsi);
+
+ dsi_enable_video_output(dssdev, VC_VIDEO);
+
+ dsi->video_enabled = true;
+
+ dsi_bus_unlock(dsi);
+}
+
+static void dsi_bridge_disable(struct drm_bridge *bridge)
+{
+ struct dsi_data *dsi = drm_bridge_to_dsi(bridge);
+ struct omap_dss_device *dssdev = &dsi->output;
+
+ cancel_delayed_work_sync(&dsi->dsi_disable_work);
+
+ dsi_bus_lock(dsi);
+
+ dsi->video_enabled = false;
+
+ dsi_disable_video_output(dssdev, VC_VIDEO);
+
+ dsi_disable(dsi);
+
+ dsi_bus_unlock(dsi);
+}
+
+static const struct drm_bridge_funcs dsi_bridge_funcs = {
+ .attach = dsi_bridge_attach,
+ .mode_valid = dsi_bridge_mode_valid,
+ .mode_set = dsi_bridge_mode_set,
+ .enable = dsi_bridge_enable,
+ .disable = dsi_bridge_disable,
+};
+
+static void dsi_bridge_init(struct dsi_data *dsi)
+{
+ dsi->bridge.funcs = &dsi_bridge_funcs;
+ dsi->bridge.of_node = dsi->host.dev->of_node;
+ dsi->bridge.type = DRM_MODE_CONNECTOR_DSI;
+
+ drm_bridge_add(&dsi->bridge);
+}
+
+static void dsi_bridge_cleanup(struct dsi_data *dsi)
+{
+ drm_bridge_remove(&dsi->bridge);
+}
+
+/* -----------------------------------------------------------------------------
* Probe & Remove, Suspend & Resume
*/
@@ -5105,23 +4713,26 @@ static int dsi_init_output(struct dsi_data *dsi)
struct omap_dss_device *out = &dsi->output;
int r;
+ dsi_bridge_init(dsi);
+
out->dev = dsi->dev;
out->id = dsi->module_id == 0 ?
OMAP_DSS_OUTPUT_DSI1 : OMAP_DSS_OUTPUT_DSI2;
out->type = OMAP_DISPLAY_TYPE_DSI;
out->name = dsi->module_id == 0 ? "dsi.0" : "dsi.1";
- out->dispc_channel = dsi_get_channel(dsi);
- out->ops = &dsi_ops;
- out->owner = THIS_MODULE;
+ out->dispc_channel = dsi_get_dispc_channel(dsi);
+ out->dsi_ops = &dsi_ops;
out->of_port = 0;
out->bus_flags = DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE
| DRM_BUS_FLAG_DE_HIGH
| DRM_BUS_FLAG_SYNC_DRIVE_NEGEDGE;
- r = omapdss_device_init_output(out, NULL);
- if (r < 0)
+ r = omapdss_device_init_output(out, &dsi->bridge);
+ if (r < 0) {
+ dsi_bridge_cleanup(dsi);
return r;
+ }
omapdss_device_register(out);
@@ -5134,6 +4745,7 @@ static void dsi_uninit_output(struct dsi_data *dsi)
omapdss_device_unregister(out);
omapdss_device_cleanup_output(out);
+ dsi_bridge_cleanup(dsi);
}
static int dsi_probe_of(struct dsi_data *dsi)
@@ -5142,9 +4754,8 @@ static int dsi_probe_of(struct dsi_data *dsi)
struct property *prop;
u32 lane_arr[10];
int len, num_pins;
- int r, i;
+ int r;
struct device_node *ep;
- struct omap_dsi_pin_config pin_cfg;
ep = of_graph_get_endpoint_by_regs(node, 0, 0);
if (!ep)
@@ -5172,11 +4783,7 @@ static int dsi_probe_of(struct dsi_data *dsi)
goto err;
}
- pin_cfg.num_pins = num_pins;
- for (i = 0; i < num_pins; ++i)
- pin_cfg.pins[i] = (int)lane_arr[i];
-
- r = dsi_configure_pins(&dsi->output, &pin_cfg);
+ r = dsi_configure_pins(dsi, num_pins, lane_arr);
if (r) {
dev_err(dsi->dev, "failed to configure pins");
goto err;
@@ -5256,6 +4863,18 @@ static const struct soc_device_attribute dsi_soc_devices[] = {
{ /* sentinel */ }
};
+static void omap_dsi_disable_work_callback(struct work_struct *work)
+{
+ struct dsi_data *dsi = container_of(work, struct dsi_data, dsi_disable_work.work);
+
+ dsi_bus_lock(dsi);
+
+ if (dsi->iface_enabled && !dsi->video_enabled)
+ dsi_disable(dsi);
+
+ dsi_bus_unlock(dsi);
+}
+
static int dsi_probe(struct platform_device *pdev)
{
const struct soc_device_attribute *soc;
@@ -5289,6 +4908,8 @@ static int dsi_probe(struct platform_device *pdev)
INIT_DEFERRABLE_WORK(&dsi->framedone_timeout_work,
dsi_framedone_timeout_work_callback);
+ INIT_DEFERRABLE_WORK(&dsi->dsi_disable_work, omap_dsi_disable_work_callback);
+
#ifdef DSI_CATCH_MISSING_TE
timer_setup(&dsi->te_timer, dsi_te_timeout, 0);
#endif
@@ -5364,11 +4985,8 @@ static int dsi_probe(struct platform_device *pdev)
}
/* DSI VCs initialization */
- for (i = 0; i < ARRAY_SIZE(dsi->vc); i++) {
+ for (i = 0; i < ARRAY_SIZE(dsi->vc); i++)
dsi->vc[i].source = DSI_VC_SOURCE_L4;
- dsi->vc[i].dssdev = NULL;
- dsi->vc[i].vc_id = 0;
- }
r = dsi_get_clocks(dsi);
if (r)
@@ -5387,21 +5005,24 @@ static int dsi_probe(struct platform_device *pdev)
dsi->num_lanes_supported = 3;
}
- r = of_platform_populate(dev->of_node, NULL, NULL, dev);
+ dsi->host.ops = &omap_dsi_host_ops;
+ dsi->host.dev = &pdev->dev;
+
+ r = dsi_probe_of(dsi);
if (r) {
- DSSERR("Failed to populate DSI child devices: %d\n", r);
+ DSSERR("Invalid DSI DT data\n");
+ goto err_pm_disable;
+ }
+
+ r = mipi_dsi_host_register(&dsi->host);
+ if (r < 0) {
+ dev_err(&pdev->dev, "failed to register DSI host: %d\n", r);
goto err_pm_disable;
}
r = dsi_init_output(dsi);
if (r)
- goto err_of_depopulate;
-
- r = dsi_probe_of(dsi);
- if (r) {
- DSSERR("Invalid DSI DT data\n");
- goto err_uninit_output;
- }
+ goto err_dsi_host_unregister;
r = component_add(&pdev->dev, &dsi_component_ops);
if (r)
@@ -5411,8 +5032,8 @@ static int dsi_probe(struct platform_device *pdev)
err_uninit_output:
dsi_uninit_output(dsi);
-err_of_depopulate:
- of_platform_depopulate(dev);
+err_dsi_host_unregister:
+ mipi_dsi_host_unregister(&dsi->host);
err_pm_disable:
pm_runtime_disable(dev);
return r;
@@ -5426,7 +5047,7 @@ static int dsi_remove(struct platform_device *pdev)
dsi_uninit_output(dsi);
- of_platform_depopulate(&pdev->dev);
+ mipi_dsi_host_unregister(&dsi->host);
pm_runtime_disable(&pdev->dev);
diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.h b/drivers/gpu/drm/omapdrm/dss/dsi.h
new file mode 100644
index 000000000000..601707c0ecc4
--- /dev/null
+++ b/drivers/gpu/drm/omapdrm/dss/dsi.h
@@ -0,0 +1,456 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2016 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Tomi Valkeinen <[email protected]>
+ */
+
+#ifndef __OMAP_DRM_DSS_DSI_H
+#define __OMAP_DRM_DSS_DSI_H
+
+#include <drm/drm_mipi_dsi.h>
+
+struct dsi_reg {
+ u16 module;
+ u16 idx;
+};
+
+#define DSI_REG(mod, idx) ((const struct dsi_reg) { mod, idx })
+
+/* DSI Protocol Engine */
+
+#define DSI_PROTO 0
+#define DSI_PROTO_SZ 0x200
+
+#define DSI_REVISION DSI_REG(DSI_PROTO, 0x0000)
+#define DSI_SYSCONFIG DSI_REG(DSI_PROTO, 0x0010)
+#define DSI_SYSSTATUS DSI_REG(DSI_PROTO, 0x0014)
+#define DSI_IRQSTATUS DSI_REG(DSI_PROTO, 0x0018)
+#define DSI_IRQENABLE DSI_REG(DSI_PROTO, 0x001C)
+#define DSI_CTRL DSI_REG(DSI_PROTO, 0x0040)
+#define DSI_GNQ DSI_REG(DSI_PROTO, 0x0044)
+#define DSI_COMPLEXIO_CFG1 DSI_REG(DSI_PROTO, 0x0048)
+#define DSI_COMPLEXIO_IRQ_STATUS DSI_REG(DSI_PROTO, 0x004C)
+#define DSI_COMPLEXIO_IRQ_ENABLE DSI_REG(DSI_PROTO, 0x0050)
+#define DSI_CLK_CTRL DSI_REG(DSI_PROTO, 0x0054)
+#define DSI_TIMING1 DSI_REG(DSI_PROTO, 0x0058)
+#define DSI_TIMING2 DSI_REG(DSI_PROTO, 0x005C)
+#define DSI_VM_TIMING1 DSI_REG(DSI_PROTO, 0x0060)
+#define DSI_VM_TIMING2 DSI_REG(DSI_PROTO, 0x0064)
+#define DSI_VM_TIMING3 DSI_REG(DSI_PROTO, 0x0068)
+#define DSI_CLK_TIMING DSI_REG(DSI_PROTO, 0x006C)
+#define DSI_TX_FIFO_VC_SIZE DSI_REG(DSI_PROTO, 0x0070)
+#define DSI_RX_FIFO_VC_SIZE DSI_REG(DSI_PROTO, 0x0074)
+#define DSI_COMPLEXIO_CFG2 DSI_REG(DSI_PROTO, 0x0078)
+#define DSI_RX_FIFO_VC_FULLNESS DSI_REG(DSI_PROTO, 0x007C)
+#define DSI_VM_TIMING4 DSI_REG(DSI_PROTO, 0x0080)
+#define DSI_TX_FIFO_VC_EMPTINESS DSI_REG(DSI_PROTO, 0x0084)
+#define DSI_VM_TIMING5 DSI_REG(DSI_PROTO, 0x0088)
+#define DSI_VM_TIMING6 DSI_REG(DSI_PROTO, 0x008C)
+#define DSI_VM_TIMING7 DSI_REG(DSI_PROTO, 0x0090)
+#define DSI_STOPCLK_TIMING DSI_REG(DSI_PROTO, 0x0094)
+#define DSI_VC_CTRL(n) DSI_REG(DSI_PROTO, 0x0100 + (n * 0x20))
+#define DSI_VC_TE(n) DSI_REG(DSI_PROTO, 0x0104 + (n * 0x20))
+#define DSI_VC_LONG_PACKET_HEADER(n) DSI_REG(DSI_PROTO, 0x0108 + (n * 0x20))
+#define DSI_VC_LONG_PACKET_PAYLOAD(n) DSI_REG(DSI_PROTO, 0x010C + (n * 0x20))
+#define DSI_VC_SHORT_PACKET_HEADER(n) DSI_REG(DSI_PROTO, 0x0110 + (n * 0x20))
+#define DSI_VC_IRQSTATUS(n) DSI_REG(DSI_PROTO, 0x0118 + (n * 0x20))
+#define DSI_VC_IRQENABLE(n) DSI_REG(DSI_PROTO, 0x011C + (n * 0x20))
+
+/* DSIPHY_SCP */
+
+#define DSI_PHY 1
+#define DSI_PHY_OFFSET 0x200
+#define DSI_PHY_SZ 0x40
+
+#define DSI_DSIPHY_CFG0 DSI_REG(DSI_PHY, 0x0000)
+#define DSI_DSIPHY_CFG1 DSI_REG(DSI_PHY, 0x0004)
+#define DSI_DSIPHY_CFG2 DSI_REG(DSI_PHY, 0x0008)
+#define DSI_DSIPHY_CFG5 DSI_REG(DSI_PHY, 0x0014)
+#define DSI_DSIPHY_CFG10 DSI_REG(DSI_PHY, 0x0028)
+
+/* DSI_PLL_CTRL_SCP */
+
+#define DSI_PLL 2
+#define DSI_PLL_OFFSET 0x300
+#define DSI_PLL_SZ 0x20
+
+#define DSI_PLL_CONTROL DSI_REG(DSI_PLL, 0x0000)
+#define DSI_PLL_STATUS DSI_REG(DSI_PLL, 0x0004)
+#define DSI_PLL_GO DSI_REG(DSI_PLL, 0x0008)
+#define DSI_PLL_CONFIGURATION1 DSI_REG(DSI_PLL, 0x000C)
+#define DSI_PLL_CONFIGURATION2 DSI_REG(DSI_PLL, 0x0010)
+
+/* Global interrupts */
+#define DSI_IRQ_VC0 (1 << 0)
+#define DSI_IRQ_VC1 (1 << 1)
+#define DSI_IRQ_VC2 (1 << 2)
+#define DSI_IRQ_VC3 (1 << 3)
+#define DSI_IRQ_WAKEUP (1 << 4)
+#define DSI_IRQ_RESYNC (1 << 5)
+#define DSI_IRQ_PLL_LOCK (1 << 7)
+#define DSI_IRQ_PLL_UNLOCK (1 << 8)
+#define DSI_IRQ_PLL_RECALL (1 << 9)
+#define DSI_IRQ_COMPLEXIO_ERR (1 << 10)
+#define DSI_IRQ_HS_TX_TIMEOUT (1 << 14)
+#define DSI_IRQ_LP_RX_TIMEOUT (1 << 15)
+#define DSI_IRQ_TE_TRIGGER (1 << 16)
+#define DSI_IRQ_ACK_TRIGGER (1 << 17)
+#define DSI_IRQ_SYNC_LOST (1 << 18)
+#define DSI_IRQ_LDO_POWER_GOOD (1 << 19)
+#define DSI_IRQ_TA_TIMEOUT (1 << 20)
+#define DSI_IRQ_ERROR_MASK \
+ (DSI_IRQ_HS_TX_TIMEOUT | DSI_IRQ_LP_RX_TIMEOUT | DSI_IRQ_SYNC_LOST | \
+ DSI_IRQ_TA_TIMEOUT)
+#define DSI_IRQ_CHANNEL_MASK 0xf
+
+/* Virtual channel interrupts */
+#define DSI_VC_IRQ_CS (1 << 0)
+#define DSI_VC_IRQ_ECC_CORR (1 << 1)
+#define DSI_VC_IRQ_PACKET_SENT (1 << 2)
+#define DSI_VC_IRQ_FIFO_TX_OVF (1 << 3)
+#define DSI_VC_IRQ_FIFO_RX_OVF (1 << 4)
+#define DSI_VC_IRQ_BTA (1 << 5)
+#define DSI_VC_IRQ_ECC_NO_CORR (1 << 6)
+#define DSI_VC_IRQ_FIFO_TX_UDF (1 << 7)
+#define DSI_VC_IRQ_PP_BUSY_CHANGE (1 << 8)
+#define DSI_VC_IRQ_ERROR_MASK \
+ (DSI_VC_IRQ_CS | DSI_VC_IRQ_ECC_CORR | DSI_VC_IRQ_FIFO_TX_OVF | \
+ DSI_VC_IRQ_FIFO_RX_OVF | DSI_VC_IRQ_ECC_NO_CORR | \
+ DSI_VC_IRQ_FIFO_TX_UDF)
+
+/* ComplexIO interrupts */
+#define DSI_CIO_IRQ_ERRSYNCESC1 (1 << 0)
+#define DSI_CIO_IRQ_ERRSYNCESC2 (1 << 1)
+#define DSI_CIO_IRQ_ERRSYNCESC3 (1 << 2)
+#define DSI_CIO_IRQ_ERRSYNCESC4 (1 << 3)
+#define DSI_CIO_IRQ_ERRSYNCESC5 (1 << 4)
+#define DSI_CIO_IRQ_ERRESC1 (1 << 5)
+#define DSI_CIO_IRQ_ERRESC2 (1 << 6)
+#define DSI_CIO_IRQ_ERRESC3 (1 << 7)
+#define DSI_CIO_IRQ_ERRESC4 (1 << 8)
+#define DSI_CIO_IRQ_ERRESC5 (1 << 9)
+#define DSI_CIO_IRQ_ERRCONTROL1 (1 << 10)
+#define DSI_CIO_IRQ_ERRCONTROL2 (1 << 11)
+#define DSI_CIO_IRQ_ERRCONTROL3 (1 << 12)
+#define DSI_CIO_IRQ_ERRCONTROL4 (1 << 13)
+#define DSI_CIO_IRQ_ERRCONTROL5 (1 << 14)
+#define DSI_CIO_IRQ_STATEULPS1 (1 << 15)
+#define DSI_CIO_IRQ_STATEULPS2 (1 << 16)
+#define DSI_CIO_IRQ_STATEULPS3 (1 << 17)
+#define DSI_CIO_IRQ_STATEULPS4 (1 << 18)
+#define DSI_CIO_IRQ_STATEULPS5 (1 << 19)
+#define DSI_CIO_IRQ_ERRCONTENTIONLP0_1 (1 << 20)
+#define DSI_CIO_IRQ_ERRCONTENTIONLP1_1 (1 << 21)
+#define DSI_CIO_IRQ_ERRCONTENTIONLP0_2 (1 << 22)
+#define DSI_CIO_IRQ_ERRCONTENTIONLP1_2 (1 << 23)
+#define DSI_CIO_IRQ_ERRCONTENTIONLP0_3 (1 << 24)
+#define DSI_CIO_IRQ_ERRCONTENTIONLP1_3 (1 << 25)
+#define DSI_CIO_IRQ_ERRCONTENTIONLP0_4 (1 << 26)
+#define DSI_CIO_IRQ_ERRCONTENTIONLP1_4 (1 << 27)
+#define DSI_CIO_IRQ_ERRCONTENTIONLP0_5 (1 << 28)
+#define DSI_CIO_IRQ_ERRCONTENTIONLP1_5 (1 << 29)
+#define DSI_CIO_IRQ_ULPSACTIVENOT_ALL0 (1 << 30)
+#define DSI_CIO_IRQ_ULPSACTIVENOT_ALL1 (1 << 31)
+#define DSI_CIO_IRQ_ERROR_MASK \
+ (DSI_CIO_IRQ_ERRSYNCESC1 | DSI_CIO_IRQ_ERRSYNCESC2 | \
+ DSI_CIO_IRQ_ERRSYNCESC3 | DSI_CIO_IRQ_ERRSYNCESC4 | \
+ DSI_CIO_IRQ_ERRSYNCESC5 | \
+ DSI_CIO_IRQ_ERRESC1 | DSI_CIO_IRQ_ERRESC2 | \
+ DSI_CIO_IRQ_ERRESC3 | DSI_CIO_IRQ_ERRESC4 | \
+ DSI_CIO_IRQ_ERRESC5 | \
+ DSI_CIO_IRQ_ERRCONTROL1 | DSI_CIO_IRQ_ERRCONTROL2 | \
+ DSI_CIO_IRQ_ERRCONTROL3 | DSI_CIO_IRQ_ERRCONTROL4 | \
+ DSI_CIO_IRQ_ERRCONTROL5 | \
+ DSI_CIO_IRQ_ERRCONTENTIONLP0_1 | DSI_CIO_IRQ_ERRCONTENTIONLP1_1 | \
+ DSI_CIO_IRQ_ERRCONTENTIONLP0_2 | DSI_CIO_IRQ_ERRCONTENTIONLP1_2 | \
+ DSI_CIO_IRQ_ERRCONTENTIONLP0_3 | DSI_CIO_IRQ_ERRCONTENTIONLP1_3 | \
+ DSI_CIO_IRQ_ERRCONTENTIONLP0_4 | DSI_CIO_IRQ_ERRCONTENTIONLP1_4 | \
+ DSI_CIO_IRQ_ERRCONTENTIONLP0_5 | DSI_CIO_IRQ_ERRCONTENTIONLP1_5)
+
+enum omap_dss_dsi_mode {
+ OMAP_DSS_DSI_CMD_MODE = 0,
+ OMAP_DSS_DSI_VIDEO_MODE,
+};
+
+enum omap_dss_dsi_trans_mode {
+ /* Sync Pulses: both sync start and end packets sent */
+ OMAP_DSS_DSI_PULSE_MODE,
+ /* Sync Events: only sync start packets sent */
+ OMAP_DSS_DSI_EVENT_MODE,
+ /* Burst: only sync start packets sent, pixels are time compressed */
+ OMAP_DSS_DSI_BURST_MODE,
+};
+
+struct omap_dss_dsi_videomode_timings {
+ unsigned long hsclk;
+
+ unsigned int ndl;
+ unsigned int bitspp;
+
+ /* pixels */
+ u16 hact;
+ /* lines */
+ u16 vact;
+
+ /* DSI video mode blanking data */
+ /* Unit: byte clock cycles */
+ u16 hss;
+ u16 hsa;
+ u16 hse;
+ u16 hfp;
+ u16 hbp;
+ /* Unit: line clocks */
+ u16 vsa;
+ u16 vfp;
+ u16 vbp;
+
+ /* DSI blanking modes */
+ int blanking_mode;
+ int hsa_blanking_mode;
+ int hbp_blanking_mode;
+ int hfp_blanking_mode;
+
+ enum omap_dss_dsi_trans_mode trans_mode;
+
+ int window_sync;
+};
+
+struct omap_dss_dsi_config {
+ enum omap_dss_dsi_mode mode;
+ enum mipi_dsi_pixel_format pixel_format;
+ const struct videomode *vm;
+
+ unsigned long hs_clk_min, hs_clk_max;
+ unsigned long lp_clk_min, lp_clk_max;
+
+ enum omap_dss_dsi_trans_mode trans_mode;
+};
+
+/* DSI PLL HSDIV indices */
+#define HSDIV_DISPC 0
+#define HSDIV_DSI 1
+
+#define DSI_MAX_NR_ISRS 2
+#define DSI_MAX_NR_LANES 5
+
+enum dsi_model {
+ DSI_MODEL_OMAP3,
+ DSI_MODEL_OMAP4,
+ DSI_MODEL_OMAP5,
+};
+
+enum dsi_lane_function {
+ DSI_LANE_UNUSED = 0,
+ DSI_LANE_CLK,
+ DSI_LANE_DATA1,
+ DSI_LANE_DATA2,
+ DSI_LANE_DATA3,
+ DSI_LANE_DATA4,
+};
+
+struct dsi_lane_config {
+ enum dsi_lane_function function;
+ u8 polarity;
+};
+
+typedef void (*omap_dsi_isr_t) (void *arg, u32 mask);
+
+struct dsi_isr_data {
+ omap_dsi_isr_t isr;
+ void *arg;
+ u32 mask;
+};
+
+enum fifo_size {
+ DSI_FIFO_SIZE_0 = 0,
+ DSI_FIFO_SIZE_32 = 1,
+ DSI_FIFO_SIZE_64 = 2,
+ DSI_FIFO_SIZE_96 = 3,
+ DSI_FIFO_SIZE_128 = 4,
+};
+
+enum dsi_vc_source {
+ DSI_VC_SOURCE_L4 = 0,
+ DSI_VC_SOURCE_VP,
+};
+
+struct dsi_irq_stats {
+ unsigned long last_reset;
+ unsigned int irq_count;
+ unsigned int dsi_irqs[32];
+ unsigned int vc_irqs[4][32];
+ unsigned int cio_irqs[32];
+};
+
+struct dsi_isr_tables {
+ struct dsi_isr_data isr_table[DSI_MAX_NR_ISRS];
+ struct dsi_isr_data isr_table_vc[4][DSI_MAX_NR_ISRS];
+ struct dsi_isr_data isr_table_cio[DSI_MAX_NR_ISRS];
+};
+
+struct dsi_lp_clock_info {
+ unsigned long lp_clk;
+ u16 lp_clk_div;
+};
+
+struct dsi_clk_calc_ctx {
+ struct dsi_data *dsi;
+ struct dss_pll *pll;
+
+ /* inputs */
+
+ const struct omap_dss_dsi_config *config;
+
+ unsigned long req_pck_min, req_pck_nom, req_pck_max;
+
+ /* outputs */
+
+ struct dss_pll_clock_info dsi_cinfo;
+ struct dispc_clock_info dispc_cinfo;
+ struct dsi_lp_clock_info lp_cinfo;
+
+ struct videomode vm;
+ struct omap_dss_dsi_videomode_timings dsi_vm;
+};
+
+struct dsi_module_id_data {
+ u32 address;
+ int id;
+};
+
+enum dsi_quirks {
+ DSI_QUIRK_PLL_PWR_BUG = (1 << 0), /* DSI-PLL power command 0x3 is not working */
+ DSI_QUIRK_DCS_CMD_CONFIG_VC = (1 << 1),
+ DSI_QUIRK_VC_OCP_WIDTH = (1 << 2),
+ DSI_QUIRK_REVERSE_TXCLKESC = (1 << 3),
+ DSI_QUIRK_GNQ = (1 << 4),
+ DSI_QUIRK_PHY_DCC = (1 << 5),
+};
+
+struct dsi_of_data {
+ enum dsi_model model;
+ const struct dss_pll_hw *pll_hw;
+ const struct dsi_module_id_data *modules;
+ unsigned int max_fck_freq;
+ unsigned int max_pll_lpdiv;
+ enum dsi_quirks quirks;
+};
+
+struct dsi_data {
+ struct device *dev;
+ void __iomem *proto_base;
+ void __iomem *phy_base;
+ void __iomem *pll_base;
+
+ const struct dsi_of_data *data;
+ int module_id;
+
+ int irq;
+
+ bool is_enabled;
+
+ struct clk *dss_clk;
+ struct regmap *syscon;
+ struct dss_device *dss;
+
+ struct mipi_dsi_host host;
+
+ struct dispc_clock_info user_dispc_cinfo;
+ struct dss_pll_clock_info user_dsi_cinfo;
+
+ struct dsi_lp_clock_info user_lp_cinfo;
+ struct dsi_lp_clock_info current_lp_cinfo;
+
+ struct dss_pll pll;
+
+ bool vdds_dsi_enabled;
+ struct regulator *vdds_dsi_reg;
+
+ struct mipi_dsi_device *dsidev;
+
+ struct {
+ enum dsi_vc_source source;
+ enum fifo_size tx_fifo_size;
+ enum fifo_size rx_fifo_size;
+ } vc[4];
+
+ struct mutex lock;
+ struct semaphore bus_lock;
+
+ spinlock_t irq_lock;
+ struct dsi_isr_tables isr_tables;
+ /* space for a copy used by the interrupt handler */
+ struct dsi_isr_tables isr_tables_copy;
+
+ int update_vc;
+#ifdef DSI_PERF_MEASURE
+ unsigned int update_bytes;
+#endif
+
+ /* external TE GPIO */
+ struct gpio_desc *te_gpio;
+ int te_irq;
+ struct delayed_work te_timeout_work;
+ atomic_t do_ext_te_update;
+
+ bool te_enabled;
+ bool iface_enabled;
+ bool video_enabled;
+
+ struct delayed_work framedone_timeout_work;
+
+#ifdef DSI_CATCH_MISSING_TE
+ struct timer_list te_timer;
+#endif
+
+ unsigned long cache_req_pck;
+ unsigned long cache_clk_freq;
+ struct dss_pll_clock_info cache_cinfo;
+
+ u32 errors;
+ spinlock_t errors_lock;
+#ifdef DSI_PERF_MEASURE
+ ktime_t perf_setup_time;
+ ktime_t perf_start_time;
+#endif
+ int debug_read;
+ int debug_write;
+ struct {
+ struct dss_debugfs_entry *irqs;
+ struct dss_debugfs_entry *regs;
+ struct dss_debugfs_entry *clks;
+ } debugfs;
+
+#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
+ spinlock_t irq_stats_lock;
+ struct dsi_irq_stats irq_stats;
+#endif
+
+ unsigned int num_lanes_supported;
+ unsigned int line_buffer_size;
+
+ struct dsi_lane_config lanes[DSI_MAX_NR_LANES];
+ unsigned int num_lanes_used;
+
+ unsigned int scp_clk_refcount;
+
+ struct omap_dss_dsi_config config;
+
+ struct dss_lcd_mgr_config mgr_config;
+ struct videomode vm;
+ enum mipi_dsi_pixel_format pix_fmt;
+ enum omap_dss_dsi_mode mode;
+ struct omap_dss_dsi_videomode_timings vm_timings;
+
+ struct omap_dss_device output;
+ struct drm_bridge bridge;
+
+ struct delayed_work dsi_disable_work;
+};
+
+struct dsi_packet_sent_handler_data {
+ struct dsi_data *dsi;
+ struct completion *completion;
+};
+
+#endif /* __OMAP_DRM_DSS_DSI_H */
diff --git a/drivers/gpu/drm/omapdrm/dss/dss.c b/drivers/gpu/drm/omapdrm/dss/dss.c
index d7b2f5bcac16..d6a5862b4dbf 100644
--- a/drivers/gpu/drm/omapdrm/dss/dss.c
+++ b/drivers/gpu/drm/omapdrm/dss/dss.c
@@ -1308,6 +1308,7 @@ static int dss_bind(struct device *dev)
{
struct dss_device *dss = dev_get_drvdata(dev);
struct platform_device *drm_pdev;
+ struct dss_pdata pdata;
int r;
r = component_bind_all(dev, NULL);
@@ -1316,9 +1317,9 @@ static int dss_bind(struct device *dev)
pm_set_vt_switch(0);
- omapdss_set_dss(dss);
-
- drm_pdev = platform_device_register_simple("omapdrm", 0, NULL, 0);
+ pdata.dss = dss;
+ drm_pdev = platform_device_register_data(NULL, "omapdrm", 0,
+ &pdata, sizeof(pdata));
if (IS_ERR(drm_pdev)) {
component_unbind_all(dev, NULL);
return PTR_ERR(drm_pdev);
@@ -1335,8 +1336,6 @@ static void dss_unbind(struct device *dev)
platform_device_unregister(dss->drm_pdev);
- omapdss_set_dss(NULL);
-
component_unbind_all(dev, NULL);
}
@@ -1569,15 +1568,7 @@ static int dss_remove(struct platform_device *pdev)
static void dss_shutdown(struct platform_device *pdev)
{
- struct omap_dss_device *dssdev = NULL;
-
DSSDBG("shutdown\n");
-
- for_each_dss_output(dssdev) {
- if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE &&
- dssdev->ops && dssdev->ops->disable)
- dssdev->ops->disable(dssdev);
- }
}
static int dss_runtime_suspend(struct device *dev)
@@ -1650,21 +1641,14 @@ static struct platform_driver * const omap_dss_drivers[] = {
#endif
};
-static int __init omap_dss_init(void)
+int __init omap_dss_init(void)
{
return platform_register_drivers(omap_dss_drivers,
ARRAY_SIZE(omap_dss_drivers));
}
-static void __exit omap_dss_exit(void)
+void omap_dss_exit(void)
{
platform_unregister_drivers(omap_dss_drivers,
ARRAY_SIZE(omap_dss_drivers));
}
-
-module_init(omap_dss_init);
-module_exit(omap_dss_exit);
-
-MODULE_AUTHOR("Tomi Valkeinen <[email protected]>");
-MODULE_DESCRIPTION("OMAP2/3/4/5 Display Subsystem");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/omapdrm/dss/dss.h b/drivers/gpu/drm/omapdrm/dss/dss.h
index 2b404bcb41dd..a547527bb2f3 100644
--- a/drivers/gpu/drm/omapdrm/dss/dss.h
+++ b/drivers/gpu/drm/omapdrm/dss/dss.h
@@ -257,8 +257,6 @@ struct dss_device {
struct dss_pll *video2_pll;
struct dispc_device *dispc;
- const struct dispc_ops *dispc_ops;
- const struct dss_mgr_ops *mgr_ops;
struct omap_drm_private *mgr_ops_priv;
};
@@ -393,6 +391,76 @@ void dispc_dump_clocks(struct dispc_device *dispc, struct seq_file *s);
int dispc_runtime_get(struct dispc_device *dispc);
void dispc_runtime_put(struct dispc_device *dispc);
+int dispc_get_num_ovls(struct dispc_device *dispc);
+int dispc_get_num_mgrs(struct dispc_device *dispc);
+
+const u32 *dispc_ovl_get_color_modes(struct dispc_device *dispc,
+ enum omap_plane_id plane);
+
+u32 dispc_read_irqstatus(struct dispc_device *dispc);
+void dispc_clear_irqstatus(struct dispc_device *dispc, u32 mask);
+void dispc_write_irqenable(struct dispc_device *dispc, u32 mask);
+
+int dispc_request_irq(struct dispc_device *dispc, irq_handler_t handler,
+ void *dev_id);
+void dispc_free_irq(struct dispc_device *dispc, void *dev_id);
+
+u32 dispc_mgr_get_vsync_irq(struct dispc_device *dispc,
+ enum omap_channel channel);
+u32 dispc_mgr_get_framedone_irq(struct dispc_device *dispc,
+ enum omap_channel channel);
+u32 dispc_mgr_get_sync_lost_irq(struct dispc_device *dispc,
+ enum omap_channel channel);
+u32 dispc_wb_get_framedone_irq(struct dispc_device *dispc);
+
+u32 dispc_get_memory_bandwidth_limit(struct dispc_device *dispc);
+
+void dispc_mgr_enable(struct dispc_device *dispc,
+ enum omap_channel channel, bool enable);
+
+bool dispc_mgr_go_busy(struct dispc_device *dispc,
+ enum omap_channel channel);
+
+void dispc_mgr_go(struct dispc_device *dispc, enum omap_channel channel);
+
+void dispc_mgr_set_lcd_config(struct dispc_device *dispc,
+ enum omap_channel channel,
+ const struct dss_lcd_mgr_config *config);
+void dispc_mgr_set_timings(struct dispc_device *dispc,
+ enum omap_channel channel,
+ const struct videomode *vm);
+void dispc_mgr_setup(struct dispc_device *dispc,
+ enum omap_channel channel,
+ const struct omap_overlay_manager_info *info);
+
+int dispc_mgr_check_timings(struct dispc_device *dispc,
+ enum omap_channel channel,
+ const struct videomode *vm);
+
+u32 dispc_mgr_gamma_size(struct dispc_device *dispc,
+ enum omap_channel channel);
+void dispc_mgr_set_gamma(struct dispc_device *dispc,
+ enum omap_channel channel,
+ const struct drm_color_lut *lut,
+ unsigned int length);
+
+int dispc_ovl_setup(struct dispc_device *dispc,
+ enum omap_plane_id plane,
+ const struct omap_overlay_info *oi,
+ const struct videomode *vm, bool mem_to_mem,
+ enum omap_channel channel);
+
+int dispc_ovl_enable(struct dispc_device *dispc,
+ enum omap_plane_id plane, bool enable);
+
+bool dispc_has_writeback(struct dispc_device *dispc);
+int dispc_wb_setup(struct dispc_device *dispc,
+ const struct omap_dss_writeback_info *wi,
+ bool mem_to_mem, const struct videomode *vm,
+ enum dss_writeback_channel channel_in);
+bool dispc_wb_go_busy(struct dispc_device *dispc);
+void dispc_wb_go(struct dispc_device *dispc);
+
void dispc_enable_sidle(struct dispc_device *dispc);
void dispc_disable_sidle(struct dispc_device *dispc);
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4.c b/drivers/gpu/drm/omapdrm/dss/hdmi4.c
index 8de41e74e8f8..35b750cebaeb 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi4.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi4.c
@@ -707,7 +707,6 @@ static int hdmi4_init_output(struct omap_hdmi *hdmi)
out->type = OMAP_DISPLAY_TYPE_HDMI;
out->name = "hdmi.0";
out->dispc_channel = OMAP_DSS_CHANNEL_DIGIT;
- out->owner = THIS_MODULE;
out->of_port = 0;
r = omapdss_device_init_output(out, &hdmi->bridge);
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5.c b/drivers/gpu/drm/omapdrm/dss/hdmi5.c
index 54e5cb5aa52d..65085d886da5 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi5.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi5.c
@@ -681,7 +681,6 @@ static int hdmi5_init_output(struct omap_hdmi *hdmi)
out->type = OMAP_DISPLAY_TYPE_HDMI;
out->name = "hdmi.0";
out->dispc_channel = OMAP_DSS_CHANNEL_DIGIT;
- out->owner = THIS_MODULE;
out->of_port = 0;
r = omapdss_device_init_output(out, &hdmi->bridge);
diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c b/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c
deleted file mode 100644
index f21b5df31213..000000000000
--- a/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c
+++ /dev/null
@@ -1,229 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2014 Texas Instruments Incorporated - https://www.ti.com/
- * Author: Tomi Valkeinen <[email protected]>
- */
-
-/*
- * As omapdss panel drivers are omapdss specific, but we want to define the
- * DT-data in generic manner, we convert the compatible strings of the panel and
- * encoder nodes from "panel-foo" to "omapdss,panel-foo". This way we can have
- * both correct DT data and omapdss specific drivers.
- *
- * When we get generic panel drivers to the kernel, this file will be removed.
- */
-
-#include <linux/kernel.h>
-#include <linux/of.h>
-#include <linux/of_graph.h>
-#include <linux/slab.h>
-#include <linux/list.h>
-
-static struct list_head dss_conv_list __initdata;
-
-static const char prefix[] __initconst = "omapdss,";
-
-struct dss_conv_node {
- struct list_head list;
- struct device_node *node;
- bool root;
-};
-
-static int __init omapdss_count_strings(const struct property *prop)
-{
- const char *p = prop->value;
- int l = 0, total = 0;
- int i;
-
- for (i = 0; total < prop->length; total += l, p += l, i++)
- l = strlen(p) + 1;
-
- return i;
-}
-
-static void __init omapdss_update_prop(struct device_node *node, char *compat,
- int len)
-{
- struct property *prop;
-
- prop = kzalloc(sizeof(*prop), GFP_KERNEL);
- if (!prop)
- return;
-
- prop->name = "compatible";
- prop->value = compat;
- prop->length = len;
-
- of_update_property(node, prop);
-}
-
-static void __init omapdss_prefix_strcpy(char *dst, int dst_len,
- const char *src, int src_len)
-{
- size_t total = 0;
-
- while (total < src_len) {
- size_t l = strlen(src) + 1;
-
- strcpy(dst, prefix);
- dst += strlen(prefix);
-
- strcpy(dst, src);
- dst += l;
-
- src += l;
- total += l;
- }
-}
-
-/* prepend compatible property strings with "omapdss," */
-static void __init omapdss_omapify_node(struct device_node *node)
-{
- struct property *prop;
- char *new_compat;
- int num_strs;
- int new_len;
-
- prop = of_find_property(node, "compatible", NULL);
-
- if (!prop || !prop->value)
- return;
-
- if (strnlen(prop->value, prop->length) >= prop->length)
- return;
-
- /* is it already prefixed? */
- if (strncmp(prefix, prop->value, strlen(prefix)) == 0)
- return;
-
- num_strs = omapdss_count_strings(prop);
-
- new_len = prop->length + strlen(prefix) * num_strs;
- new_compat = kmalloc(new_len, GFP_KERNEL);
-
- omapdss_prefix_strcpy(new_compat, new_len, prop->value, prop->length);
-
- omapdss_update_prop(node, new_compat, new_len);
-}
-
-static void __init omapdss_add_to_list(struct device_node *node, bool root)
-{
- struct dss_conv_node *n = kmalloc(sizeof(*n), GFP_KERNEL);
- if (n) {
- n->node = node;
- n->root = root;
- list_add(&n->list, &dss_conv_list);
- }
-}
-
-static bool __init omapdss_list_contains(const struct device_node *node)
-{
- struct dss_conv_node *n;
-
- list_for_each_entry(n, &dss_conv_list, list) {
- if (n->node == node)
- return true;
- }
-
- return false;
-}
-
-static void __init omapdss_walk_device(struct device_node *node, bool root)
-{
- struct device_node *n;
-
- omapdss_add_to_list(node, root);
-
- /*
- * of_graph_get_remote_port_parent() prints an error if there is no
- * port/ports node. To avoid that, check first that there's the node.
- */
- n = of_get_child_by_name(node, "ports");
- if (!n)
- n = of_get_child_by_name(node, "port");
- if (!n)
- return;
-
- of_node_put(n);
-
- n = NULL;
- while ((n = of_graph_get_next_endpoint(node, n)) != NULL) {
- struct device_node *pn;
-
- pn = of_graph_get_remote_port_parent(n);
-
- if (!pn)
- continue;
-
- if (!of_device_is_available(pn) || omapdss_list_contains(pn)) {
- of_node_put(pn);
- continue;
- }
-
- omapdss_walk_device(pn, false);
- }
-}
-
-static const struct of_device_id omapdss_of_match[] __initconst = {
- { .compatible = "ti,omap2-dss", },
- { .compatible = "ti,omap3-dss", },
- { .compatible = "ti,omap4-dss", },
- { .compatible = "ti,omap5-dss", },
- { .compatible = "ti,dra7-dss", },
- {},
-};
-
-static const struct of_device_id omapdss_of_fixups_whitelist[] __initconst = {
- { .compatible = "panel-dsi-cm" },
- {},
-};
-
-static void __init omapdss_find_children(struct device_node *np)
-{
- struct device_node *child;
-
- for_each_available_child_of_node(np, child) {
- if (!of_find_property(child, "compatible", NULL))
- continue;
-
- omapdss_walk_device(child, true);
-
- if (of_device_is_compatible(child, "ti,sysc"))
- omapdss_find_children(child);
- }
-}
-
-static int __init omapdss_boot_init(void)
-{
- struct device_node *dss;
-
- INIT_LIST_HEAD(&dss_conv_list);
-
- dss = of_find_matching_node(NULL, omapdss_of_match);
-
- if (dss == NULL || !of_device_is_available(dss))
- goto put_node;
-
- omapdss_walk_device(dss, true);
- omapdss_find_children(dss);
-
- while (!list_empty(&dss_conv_list)) {
- struct dss_conv_node *n;
-
- n = list_first_entry(&dss_conv_list, struct dss_conv_node,
- list);
-
- if (of_match_node(omapdss_of_fixups_whitelist, n->node))
- omapdss_omapify_node(n->node);
-
- list_del(&n->list);
- of_node_put(n->node);
- kfree(n);
- }
-
-put_node:
- of_node_put(dss);
- return 0;
-}
-
-subsys_initcall(omapdss_boot_init);
diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss.h b/drivers/gpu/drm/omapdrm/dss/omapdss.h
index a48a9a254e33..a40abeafd2e9 100644
--- a/drivers/gpu/drm/omapdrm/dss/omapdss.h
+++ b/drivers/gpu/drm/omapdrm/dss/omapdss.h
@@ -7,13 +7,14 @@
#ifndef __OMAP_DRM_DSS_H
#define __OMAP_DRM_DSS_H
-#include <linux/list.h>
+#include <drm/drm_color_mgmt.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_mode.h>
#include <linux/device.h>
#include <linux/interrupt.h>
-#include <video/videomode.h>
+#include <linux/list.h>
#include <linux/platform_data/omapdss.h>
-#include <uapi/drm/drm_mode.h>
-#include <drm/drm_crtc.h>
+#include <video/videomode.h>
#define DISPC_IRQ_FRAMEDONE (1 << 0)
#define DISPC_IRQ_VSYNC (1 << 1)
@@ -116,28 +117,6 @@ enum omap_dss_venc_type {
OMAP_DSS_VENC_TYPE_SVIDEO,
};
-enum omap_dss_dsi_pixel_format {
- OMAP_DSS_DSI_FMT_RGB888,
- OMAP_DSS_DSI_FMT_RGB666,
- OMAP_DSS_DSI_FMT_RGB666_PACKED,
- OMAP_DSS_DSI_FMT_RGB565,
-};
-
-enum omap_dss_dsi_mode {
- OMAP_DSS_DSI_CMD_MODE = 0,
- OMAP_DSS_DSI_VIDEO_MODE,
-};
-
-enum omap_display_caps {
- OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE = 1 << 0,
- OMAP_DSS_DISPLAY_CAP_TEAR_ELIM = 1 << 1,
-};
-
-enum omap_dss_display_state {
- OMAP_DSS_DISPLAY_DISABLED = 0,
- OMAP_DSS_DISPLAY_ACTIVE,
-};
-
enum omap_dss_rotation_type {
OMAP_DSS_ROT_NONE = 0,
OMAP_DSS_ROT_TILER = 1 << 0,
@@ -162,64 +141,6 @@ enum omap_dss_output_id {
OMAP_DSS_OUTPUT_HDMI = 1 << 6,
};
-/* DSI */
-
-enum omap_dss_dsi_trans_mode {
- /* Sync Pulses: both sync start and end packets sent */
- OMAP_DSS_DSI_PULSE_MODE,
- /* Sync Events: only sync start packets sent */
- OMAP_DSS_DSI_EVENT_MODE,
- /* Burst: only sync start packets sent, pixels are time compressed */
- OMAP_DSS_DSI_BURST_MODE,
-};
-
-struct omap_dss_dsi_videomode_timings {
- unsigned long hsclk;
-
- unsigned int ndl;
- unsigned int bitspp;
-
- /* pixels */
- u16 hact;
- /* lines */
- u16 vact;
-
- /* DSI video mode blanking data */
- /* Unit: byte clock cycles */
- u16 hss;
- u16 hsa;
- u16 hse;
- u16 hfp;
- u16 hbp;
- /* Unit: line clocks */
- u16 vsa;
- u16 vfp;
- u16 vbp;
-
- /* DSI blanking modes */
- int blanking_mode;
- int hsa_blanking_mode;
- int hbp_blanking_mode;
- int hfp_blanking_mode;
-
- enum omap_dss_dsi_trans_mode trans_mode;
-
- bool ddr_clk_always_on;
- int window_sync;
-};
-
-struct omap_dss_dsi_config {
- enum omap_dss_dsi_mode mode;
- enum omap_dss_dsi_pixel_format pixel_format;
- const struct videomode *vm;
-
- unsigned long hs_clk_min, hs_clk_max;
- unsigned long lp_clk_min, lp_clk_max;
-
- bool ddr_clk_always_on;
- enum omap_dss_dsi_trans_mode trans_mode;
-};
-
struct omap_dss_cpr_coefs {
s16 rr, rg, rb;
s16 gr, gg, gb;
@@ -243,6 +164,9 @@ struct omap_overlay_info {
u8 global_alpha;
u8 pre_mult_alpha;
u8 zorder;
+
+ enum drm_color_encoding color_encoding;
+ enum drm_color_range color_range;
};
struct omap_overlay_manager_info {
@@ -258,21 +182,6 @@ struct omap_overlay_manager_info {
struct omap_dss_cpr_coefs cpr_coefs;
};
-/* 22 pins means 1 clk lane and 10 data lanes */
-#define OMAP_DSS_MAX_DSI_PINS 22
-
-struct omap_dsi_pin_config {
- int num_pins;
- /*
- * pin numbers in the following order:
- * clk+, clk-
- * data1+, data1-
- * data2+, data2-
- * ...
- */
- int pins[OMAP_DSS_MAX_DSI_PINS];
-};
-
struct omap_dss_writeback_info {
u32 paddr;
u32 p_uv_addr;
@@ -286,89 +195,14 @@ struct omap_dss_writeback_info {
};
struct omapdss_dsi_ops {
- void (*disable)(struct omap_dss_device *dssdev, bool disconnect_lanes,
- bool enter_ulps);
-
- /* bus configuration */
- int (*set_config)(struct omap_dss_device *dssdev,
- const struct omap_dss_dsi_config *cfg);
- int (*configure_pins)(struct omap_dss_device *dssdev,
- const struct omap_dsi_pin_config *pin_cfg);
-
- void (*enable_hs)(struct omap_dss_device *dssdev, int channel,
- bool enable);
- int (*enable_te)(struct omap_dss_device *dssdev, bool enable);
-
- int (*update)(struct omap_dss_device *dssdev, int channel,
- void (*callback)(int, void *), void *data);
-
- void (*bus_lock)(struct omap_dss_device *dssdev);
- void (*bus_unlock)(struct omap_dss_device *dssdev);
-
- int (*enable_video_output)(struct omap_dss_device *dssdev, int channel);
- void (*disable_video_output)(struct omap_dss_device *dssdev,
- int channel);
-
- int (*request_vc)(struct omap_dss_device *dssdev, int *channel);
- int (*set_vc_id)(struct omap_dss_device *dssdev, int channel,
- int vc_id);
- void (*release_vc)(struct omap_dss_device *dssdev, int channel);
-
- /* data transfer */
- int (*dcs_write)(struct omap_dss_device *dssdev, int channel,
- u8 *data, int len);
- int (*dcs_write_nosync)(struct omap_dss_device *dssdev, int channel,
- u8 *data, int len);
- int (*dcs_read)(struct omap_dss_device *dssdev, int channel, u8 dcs_cmd,
- u8 *data, int len);
-
- int (*gen_write)(struct omap_dss_device *dssdev, int channel,
- u8 *data, int len);
- int (*gen_write_nosync)(struct omap_dss_device *dssdev, int channel,
- u8 *data, int len);
- int (*gen_read)(struct omap_dss_device *dssdev, int channel,
- u8 *reqdata, int reqlen,
- u8 *data, int len);
-
- int (*bta_sync)(struct omap_dss_device *dssdev, int channel);
-
- int (*set_max_rx_packet_size)(struct omap_dss_device *dssdev,
- int channel, u16 plen);
-};
-
-struct omap_dss_device_ops {
- int (*connect)(struct omap_dss_device *dssdev,
- struct omap_dss_device *dst);
- void (*disconnect)(struct omap_dss_device *dssdev,
- struct omap_dss_device *dst);
-
- void (*enable)(struct omap_dss_device *dssdev);
- void (*disable)(struct omap_dss_device *dssdev);
-
- int (*check_timings)(struct omap_dss_device *dssdev,
- struct drm_display_mode *mode);
-
- int (*get_modes)(struct omap_dss_device *dssdev,
- struct drm_connector *connector);
-
- const struct omapdss_dsi_ops dsi;
-};
-
-/**
- * enum omap_dss_device_ops_flag - Indicates which device ops are supported
- * @OMAP_DSS_DEVICE_OP_MODES: The device supports reading modes
- */
-enum omap_dss_device_ops_flag {
- OMAP_DSS_DEVICE_OP_MODES = BIT(3),
+ int (*update)(struct omap_dss_device *dssdev);
+ bool (*is_video_mode)(struct omap_dss_device *dssdev);
};
struct omap_dss_device {
struct device *dev;
- struct module *owner;
-
struct dss_device *dss;
- struct omap_dss_device *next;
struct drm_bridge *bridge;
struct drm_bridge *next_bridge;
struct drm_panel *panel;
@@ -382,23 +216,11 @@ struct omap_dss_device {
*/
enum omap_display_type type;
- /*
- * True if the device is a display (panel or connector) at the end of
- * the pipeline, false otherwise.
- */
- bool display;
-
const char *name;
- const struct omap_dss_driver *driver;
- const struct omap_dss_device_ops *ops;
- unsigned long ops_flags;
+ const struct omapdss_dsi_ops *dsi_ops;
u32 bus_flags;
- enum omap_display_caps caps;
-
- enum omap_dss_display_state state;
-
/* OMAP DSS output specific fields */
/* DISPC channel for this output */
@@ -411,30 +233,10 @@ struct omap_dss_device {
unsigned int of_port;
};
-struct omap_dss_driver {
- int (*update)(struct omap_dss_device *dssdev,
- u16 x, u16 y, u16 w, u16 h);
- int (*sync)(struct omap_dss_device *dssdev);
-
- int (*enable_te)(struct omap_dss_device *dssdev, bool enable);
- int (*get_te)(struct omap_dss_device *dssdev);
-
- int (*memory_read)(struct omap_dss_device *dssdev,
- void *buf, size_t size,
- u16 x, u16 y, u16 w, u16 h);
+struct dss_pdata {
+ struct dss_device *dss;
};
-struct dss_device *omapdss_get_dss(void);
-void omapdss_set_dss(struct dss_device *dss);
-static inline bool omapdss_is_initialized(void)
-{
- return !!omapdss_get_dss();
-}
-
-void omapdss_display_init(struct omap_dss_device *dssdev);
-int omapdss_display_get_modes(struct drm_connector *connector,
- const struct videomode *vm);
-
void omapdss_device_register(struct omap_dss_device *dssdev);
void omapdss_device_unregister(struct omap_dss_device *dssdev);
struct omap_dss_device *omapdss_device_get(struct omap_dss_device *dssdev);
@@ -445,8 +247,6 @@ int omapdss_device_connect(struct dss_device *dss,
struct omap_dss_device *dst);
void omapdss_device_disconnect(struct omap_dss_device *src,
struct omap_dss_device *dst);
-void omapdss_device_enable(struct omap_dss_device *dssdev);
-void omapdss_device_disable(struct omap_dss_device *dssdev);
int omap_dss_get_num_overlay_managers(void);
@@ -466,11 +266,6 @@ int omap_dispc_unregister_isr(omap_dispc_isr_t isr, void *arg, u32 mask);
int omapdss_compat_init(void);
void omapdss_compat_uninit(void);
-static inline bool omapdss_device_is_enabled(struct omap_dss_device *dssdev)
-{
- return dssdev->state == OMAP_DSS_DISPLAY_ACTIVE;
-}
-
enum dss_writeback_channel {
DSS_WB_LCD1_MGR = 0,
DSS_WB_LCD2_MGR = 1,
@@ -482,31 +277,23 @@ enum dss_writeback_channel {
DSS_WB_LCD3_MGR = 7,
};
-struct dss_mgr_ops {
- void (*start_update)(struct omap_drm_private *priv,
- enum omap_channel channel);
- int (*enable)(struct omap_drm_private *priv,
- enum omap_channel channel);
- void (*disable)(struct omap_drm_private *priv,
- enum omap_channel channel);
- void (*set_timings)(struct omap_drm_private *priv,
- enum omap_channel channel,
- const struct videomode *vm);
- void (*set_lcd_config)(struct omap_drm_private *priv,
- enum omap_channel channel,
- const struct dss_lcd_mgr_config *config);
- int (*register_framedone_handler)(struct omap_drm_private *priv,
- enum omap_channel channel,
- void (*handler)(void *), void *data);
- void (*unregister_framedone_handler)(struct omap_drm_private *priv,
- enum omap_channel channel,
- void (*handler)(void *), void *data);
-};
-
-int dss_install_mgr_ops(struct dss_device *dss,
- const struct dss_mgr_ops *mgr_ops,
- struct omap_drm_private *priv);
-void dss_uninstall_mgr_ops(struct dss_device *dss);
+void omap_crtc_dss_start_update(struct omap_drm_private *priv,
+ enum omap_channel channel);
+void omap_crtc_set_enabled(struct drm_crtc *crtc, bool enable);
+int omap_crtc_dss_enable(struct omap_drm_private *priv, enum omap_channel channel);
+void omap_crtc_dss_disable(struct omap_drm_private *priv, enum omap_channel channel);
+void omap_crtc_dss_set_timings(struct omap_drm_private *priv,
+ enum omap_channel channel,
+ const struct videomode *vm);
+void omap_crtc_dss_set_lcd_config(struct omap_drm_private *priv,
+ enum omap_channel channel,
+ const struct dss_lcd_mgr_config *config);
+int omap_crtc_dss_register_framedone(
+ struct omap_drm_private *priv, enum omap_channel channel,
+ void (*handler)(void *), void *data);
+void omap_crtc_dss_unregister_framedone(
+ struct omap_drm_private *priv, enum omap_channel channel,
+ void (*handler)(void *), void *data);
void dss_mgr_set_timings(struct omap_dss_device *dssdev,
const struct videomode *vm);
@@ -520,80 +307,12 @@ int dss_mgr_register_framedone_handler(struct omap_dss_device *dssdev,
void dss_mgr_unregister_framedone_handler(struct omap_dss_device *dssdev,
void (*handler)(void *), void *data);
-/* dispc ops */
-
-struct dispc_ops {
- u32 (*read_irqstatus)(struct dispc_device *dispc);
- void (*clear_irqstatus)(struct dispc_device *dispc, u32 mask);
- void (*write_irqenable)(struct dispc_device *dispc, u32 mask);
-
- int (*request_irq)(struct dispc_device *dispc, irq_handler_t handler,
- void *dev_id);
- void (*free_irq)(struct dispc_device *dispc, void *dev_id);
-
- int (*runtime_get)(struct dispc_device *dispc);
- void (*runtime_put)(struct dispc_device *dispc);
-
- int (*get_num_ovls)(struct dispc_device *dispc);
- int (*get_num_mgrs)(struct dispc_device *dispc);
-
- u32 (*get_memory_bandwidth_limit)(struct dispc_device *dispc);
-
- void (*mgr_enable)(struct dispc_device *dispc,
- enum omap_channel channel, bool enable);
- bool (*mgr_is_enabled)(struct dispc_device *dispc,
- enum omap_channel channel);
- u32 (*mgr_get_vsync_irq)(struct dispc_device *dispc,
- enum omap_channel channel);
- u32 (*mgr_get_framedone_irq)(struct dispc_device *dispc,
- enum omap_channel channel);
- u32 (*mgr_get_sync_lost_irq)(struct dispc_device *dispc,
- enum omap_channel channel);
- bool (*mgr_go_busy)(struct dispc_device *dispc,
- enum omap_channel channel);
- void (*mgr_go)(struct dispc_device *dispc, enum omap_channel channel);
- void (*mgr_set_lcd_config)(struct dispc_device *dispc,
- enum omap_channel channel,
- const struct dss_lcd_mgr_config *config);
- int (*mgr_check_timings)(struct dispc_device *dispc,
- enum omap_channel channel,
- const struct videomode *vm);
- void (*mgr_set_timings)(struct dispc_device *dispc,
- enum omap_channel channel,
- const struct videomode *vm);
- void (*mgr_setup)(struct dispc_device *dispc, enum omap_channel channel,
- const struct omap_overlay_manager_info *info);
- u32 (*mgr_gamma_size)(struct dispc_device *dispc,
- enum omap_channel channel);
- void (*mgr_set_gamma)(struct dispc_device *dispc,
- enum omap_channel channel,
- const struct drm_color_lut *lut,
- unsigned int length);
-
- int (*ovl_enable)(struct dispc_device *dispc, enum omap_plane_id plane,
- bool enable);
- int (*ovl_setup)(struct dispc_device *dispc, enum omap_plane_id plane,
- const struct omap_overlay_info *oi,
- const struct videomode *vm, bool mem_to_mem,
- enum omap_channel channel);
-
- const u32 *(*ovl_get_color_modes)(struct dispc_device *dispc,
- enum omap_plane_id plane);
-
- u32 (*wb_get_framedone_irq)(struct dispc_device *dispc);
- int (*wb_setup)(struct dispc_device *dispc,
- const struct omap_dss_writeback_info *wi,
- bool mem_to_mem, const struct videomode *vm,
- enum dss_writeback_channel channel_in);
- bool (*has_writeback)(struct dispc_device *dispc);
- bool (*wb_go_busy)(struct dispc_device *dispc);
- void (*wb_go)(struct dispc_device *dispc);
-};
-
struct dispc_device *dispc_get_dispc(struct dss_device *dss);
-const struct dispc_ops *dispc_get_ops(struct dss_device *dss);
bool omapdss_stack_is_ready(void);
void omapdss_gather_components(struct device *dev);
+int omap_dss_init(void);
+void omap_dss_exit(void);
+
#endif /* __OMAP_DRM_DSS_H */
diff --git a/drivers/gpu/drm/omapdrm/dss/output.c b/drivers/gpu/drm/omapdrm/dss/output.c
index 5affdf078134..7378e855c278 100644
--- a/drivers/gpu/drm/omapdrm/dss/output.c
+++ b/drivers/gpu/drm/omapdrm/dss/output.c
@@ -30,7 +30,6 @@ int omapdss_device_init_output(struct omap_dss_device *out,
return 0;
}
- out->next = omapdss_find_device_by_node(remote_node);
out->bridge = of_drm_find_bridge(remote_node);
out->panel = of_drm_find_panel(remote_node);
if (IS_ERR(out->panel))
@@ -38,12 +37,6 @@ int omapdss_device_init_output(struct omap_dss_device *out,
of_node_put(remote_node);
- if (out->next && out->type != out->next->type) {
- dev_err(out->dev, "output type and display type don't match\n");
- ret = -EINVAL;
- goto error;
- }
-
if (out->panel) {
struct drm_bridge *bridge;
@@ -69,7 +62,7 @@ int omapdss_device_init_output(struct omap_dss_device *out,
out->bridge = local_bridge;
}
- if (!out->next && !out->bridge) {
+ if (!out->bridge) {
ret = -EPROBE_DEFER;
goto error;
}
@@ -78,98 +71,64 @@ int omapdss_device_init_output(struct omap_dss_device *out,
error:
omapdss_device_cleanup_output(out);
- out->next = NULL;
return ret;
}
-EXPORT_SYMBOL(omapdss_device_init_output);
void omapdss_device_cleanup_output(struct omap_dss_device *out)
{
if (out->bridge && out->panel)
drm_panel_bridge_remove(out->next_bridge ?
out->next_bridge : out->bridge);
-
- if (out->next)
- omapdss_device_put(out->next);
-}
-EXPORT_SYMBOL(omapdss_device_cleanup_output);
-
-int dss_install_mgr_ops(struct dss_device *dss,
- const struct dss_mgr_ops *mgr_ops,
- struct omap_drm_private *priv)
-{
- if (dss->mgr_ops)
- return -EBUSY;
-
- dss->mgr_ops = mgr_ops;
- dss->mgr_ops_priv = priv;
-
- return 0;
-}
-EXPORT_SYMBOL(dss_install_mgr_ops);
-
-void dss_uninstall_mgr_ops(struct dss_device *dss)
-{
- dss->mgr_ops = NULL;
- dss->mgr_ops_priv = NULL;
}
-EXPORT_SYMBOL(dss_uninstall_mgr_ops);
void dss_mgr_set_timings(struct omap_dss_device *dssdev,
const struct videomode *vm)
{
- dssdev->dss->mgr_ops->set_timings(dssdev->dss->mgr_ops_priv,
+ omap_crtc_dss_set_timings(dssdev->dss->mgr_ops_priv,
dssdev->dispc_channel, vm);
}
-EXPORT_SYMBOL(dss_mgr_set_timings);
void dss_mgr_set_lcd_config(struct omap_dss_device *dssdev,
const struct dss_lcd_mgr_config *config)
{
- dssdev->dss->mgr_ops->set_lcd_config(dssdev->dss->mgr_ops_priv,
+ omap_crtc_dss_set_lcd_config(dssdev->dss->mgr_ops_priv,
dssdev->dispc_channel, config);
}
-EXPORT_SYMBOL(dss_mgr_set_lcd_config);
int dss_mgr_enable(struct omap_dss_device *dssdev)
{
- return dssdev->dss->mgr_ops->enable(dssdev->dss->mgr_ops_priv,
+ return omap_crtc_dss_enable(dssdev->dss->mgr_ops_priv,
dssdev->dispc_channel);
}
-EXPORT_SYMBOL(dss_mgr_enable);
void dss_mgr_disable(struct omap_dss_device *dssdev)
{
- dssdev->dss->mgr_ops->disable(dssdev->dss->mgr_ops_priv,
+ omap_crtc_dss_disable(dssdev->dss->mgr_ops_priv,
dssdev->dispc_channel);
}
-EXPORT_SYMBOL(dss_mgr_disable);
void dss_mgr_start_update(struct omap_dss_device *dssdev)
{
- dssdev->dss->mgr_ops->start_update(dssdev->dss->mgr_ops_priv,
+ omap_crtc_dss_start_update(dssdev->dss->mgr_ops_priv,
dssdev->dispc_channel);
}
-EXPORT_SYMBOL(dss_mgr_start_update);
int dss_mgr_register_framedone_handler(struct omap_dss_device *dssdev,
void (*handler)(void *), void *data)
{
struct dss_device *dss = dssdev->dss;
- return dss->mgr_ops->register_framedone_handler(dss->mgr_ops_priv,
+ return omap_crtc_dss_register_framedone(dss->mgr_ops_priv,
dssdev->dispc_channel,
handler, data);
}
-EXPORT_SYMBOL(dss_mgr_register_framedone_handler);
void dss_mgr_unregister_framedone_handler(struct omap_dss_device *dssdev,
void (*handler)(void *), void *data)
{
struct dss_device *dss = dssdev->dss;
- dss->mgr_ops->unregister_framedone_handler(dss->mgr_ops_priv,
+ omap_crtc_dss_unregister_framedone(dss->mgr_ops_priv,
dssdev->dispc_channel,
handler, data);
}
-EXPORT_SYMBOL(dss_mgr_unregister_framedone_handler);
diff --git a/drivers/gpu/drm/omapdrm/dss/pll.c b/drivers/gpu/drm/omapdrm/dss/pll.c
index 241a338ace29..4c8246a3ded9 100644
--- a/drivers/gpu/drm/omapdrm/dss/pll.c
+++ b/drivers/gpu/drm/omapdrm/dss/pll.c
@@ -222,6 +222,9 @@ bool dss_pll_calc_a(const struct dss_pll *pll, unsigned long clkin,
n_stop = min((unsigned)(clkin / fint_hw_min), hw->n_max);
n_inc = 1;
+ if (n_start > n_stop)
+ return false;
+
if (hw->errata_i886) {
swap(n_start, n_stop);
n_inc = -1;
@@ -239,6 +242,9 @@ bool dss_pll_calc_a(const struct dss_pll *pll, unsigned long clkin,
hw->m_max);
m_inc = 1;
+ if (m_start > m_stop)
+ continue;
+
if (hw->errata_i886) {
swap(m_start, m_stop);
m_inc = -1;
diff --git a/drivers/gpu/drm/omapdrm/dss/sdi.c b/drivers/gpu/drm/omapdrm/dss/sdi.c
index 282e4c837cd9..91eaae3b9481 100644
--- a/drivers/gpu/drm/omapdrm/dss/sdi.c
+++ b/drivers/gpu/drm/omapdrm/dss/sdi.c
@@ -312,7 +312,6 @@ static int sdi_init_output(struct sdi_device *sdi)
out->dispc_channel = OMAP_DSS_CHANNEL_LCD;
/* We have SDI only on OMAP3, where it's on port 1 */
out->of_port = 1;
- out->owner = THIS_MODULE;
out->bus_flags = DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE /* 15.5.9.1.2 */
| DRM_BUS_FLAG_SYNC_DRIVE_POSEDGE;
diff --git a/drivers/gpu/drm/omapdrm/dss/venc.c b/drivers/gpu/drm/omapdrm/dss/venc.c
index 94cf50d837b0..e522c17955d0 100644
--- a/drivers/gpu/drm/omapdrm/dss/venc.c
+++ b/drivers/gpu/drm/omapdrm/dss/venc.c
@@ -733,9 +733,7 @@ static int venc_init_output(struct venc_device *venc)
out->type = OMAP_DISPLAY_TYPE_VENC;
out->name = "venc.0";
out->dispc_channel = OMAP_DSS_CHANNEL_DIGIT;
- out->owner = THIS_MODULE;
out->of_port = 0;
- out->ops_flags = OMAP_DSS_DEVICE_OP_MODES;
r = omapdss_device_init_output(out, &venc->bridge);
if (r < 0) {
diff --git a/drivers/gpu/drm/omapdrm/omap_connector.c b/drivers/gpu/drm/omapdrm/omap_connector.c
deleted file mode 100644
index 47719b92e22b..000000000000
--- a/drivers/gpu/drm/omapdrm/omap_connector.c
+++ /dev/null
@@ -1,157 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2011 Texas Instruments Incorporated - https://www.ti.com/
- * Author: Rob Clark <[email protected]>
- */
-
-#include <drm/drm_atomic_helper.h>
-#include <drm/drm_crtc.h>
-#include <drm/drm_probe_helper.h>
-
-#include "omap_drv.h"
-
-/*
- * connector funcs
- */
-
-#define to_omap_connector(x) container_of(x, struct omap_connector, base)
-
-struct omap_connector {
- struct drm_connector base;
- struct omap_dss_device *output;
-};
-
-static enum drm_connector_status omap_connector_detect(
- struct drm_connector *connector, bool force)
-{
- return connector_status_connected;
-}
-
-static void omap_connector_destroy(struct drm_connector *connector)
-{
- struct omap_connector *omap_connector = to_omap_connector(connector);
-
- DBG("%s", connector->name);
-
- drm_connector_unregister(connector);
- drm_connector_cleanup(connector);
-
- omapdss_device_put(omap_connector->output);
-
- kfree(omap_connector);
-}
-
-static int omap_connector_get_modes(struct drm_connector *connector)
-{
- struct omap_connector *omap_connector = to_omap_connector(connector);
- struct omap_dss_device *dssdev = NULL;
- struct omap_dss_device *d;
-
- DBG("%s", connector->name);
-
- /*
- * If the display pipeline reports modes (e.g. with a fixed resolution
- * panel or an analog TV output), query it.
- */
- for (d = omap_connector->output; d; d = d->next) {
- if (d->ops_flags & OMAP_DSS_DEVICE_OP_MODES)
- dssdev = d;
- }
-
- if (dssdev)
- return dssdev->ops->get_modes(dssdev, connector);
-
- /* We can't retrieve modes. The KMS core will add the default modes. */
- return 0;
-}
-
-enum drm_mode_status omap_connector_mode_fixup(struct omap_dss_device *dssdev,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- int ret;
-
- drm_mode_copy(adjusted_mode, mode);
-
- for (; dssdev; dssdev = dssdev->next) {
- if (!dssdev->ops || !dssdev->ops->check_timings)
- continue;
-
- ret = dssdev->ops->check_timings(dssdev, adjusted_mode);
- if (ret)
- return MODE_BAD;
- }
-
- return MODE_OK;
-}
-
-static enum drm_mode_status omap_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
-{
- struct omap_connector *omap_connector = to_omap_connector(connector);
- struct drm_display_mode new_mode = {};
- enum drm_mode_status status;
-
- status = omap_connector_mode_fixup(omap_connector->output, mode,
- &new_mode);
- if (status != MODE_OK)
- goto done;
-
- /* Check if vrefresh is still valid. */
- if (drm_mode_vrefresh(mode) != drm_mode_vrefresh(&new_mode))
- status = MODE_NOCLOCK;
-
-done:
- DBG("connector: mode %s: " DRM_MODE_FMT,
- (status == MODE_OK) ? "valid" : "invalid",
- DRM_MODE_ARG(mode));
-
- return status;
-}
-
-static const struct drm_connector_funcs omap_connector_funcs = {
- .reset = drm_atomic_helper_connector_reset,
- .detect = omap_connector_detect,
- .fill_modes = drm_helper_probe_single_connector_modes,
- .destroy = omap_connector_destroy,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
-};
-
-static const struct drm_connector_helper_funcs omap_connector_helper_funcs = {
- .get_modes = omap_connector_get_modes,
- .mode_valid = omap_connector_mode_valid,
-};
-
-/* initialize connector */
-struct drm_connector *omap_connector_init(struct drm_device *dev,
- struct omap_dss_device *output,
- struct drm_encoder *encoder)
-{
- struct drm_connector *connector = NULL;
- struct omap_connector *omap_connector;
-
- DBG("%s", output->name);
-
- omap_connector = kzalloc(sizeof(*omap_connector), GFP_KERNEL);
- if (!omap_connector)
- goto fail;
-
- omap_connector->output = omapdss_device_get(output);
-
- connector = &omap_connector->base;
- connector->interlace_allowed = 1;
- connector->doublescan_allowed = 0;
-
- drm_connector_init(dev, connector, &omap_connector_funcs,
- DRM_MODE_CONNECTOR_DSI);
- drm_connector_helper_add(connector, &omap_connector_helper_funcs);
-
- return connector;
-
-fail:
- if (connector)
- omap_connector_destroy(connector);
-
- return NULL;
-}
diff --git a/drivers/gpu/drm/omapdrm/omap_connector.h b/drivers/gpu/drm/omapdrm/omap_connector.h
deleted file mode 100644
index 0ecd4f1655b7..000000000000
--- a/drivers/gpu/drm/omapdrm/omap_connector.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * omap_connector.h -- OMAP DRM Connector
- *
- * Copyright (C) 2011 Texas Instruments
- * Author: Rob Clark <[email protected]>
- */
-
-#ifndef __OMAPDRM_CONNECTOR_H__
-#define __OMAPDRM_CONNECTOR_H__
-
-#include <linux/types.h>
-
-enum drm_mode_status;
-
-struct drm_connector;
-struct drm_device;
-struct drm_encoder;
-struct omap_dss_device;
-
-struct drm_connector *omap_connector_init(struct drm_device *dev,
- struct omap_dss_device *output,
- struct drm_encoder *encoder);
-enum drm_mode_status omap_connector_mode_fixup(struct omap_dss_device *dssdev,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode);
-
-#endif /* __OMAPDRM_CONNECTOR_H__ */
diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.c b/drivers/gpu/drm/omapdrm/omap_crtc.c
index 7d66269ad998..06a719c104f4 100644
--- a/drivers/gpu/drm/omapdrm/omap_crtc.c
+++ b/drivers/gpu/drm/omapdrm/omap_crtc.c
@@ -100,14 +100,14 @@ int omap_crtc_wait_pending(struct drm_crtc *crtc)
* the upstream part of the video pipe.
*/
-static void omap_crtc_dss_start_update(struct omap_drm_private *priv,
+void omap_crtc_dss_start_update(struct omap_drm_private *priv,
enum omap_channel channel)
{
- priv->dispc_ops->mgr_enable(priv->dispc, channel, true);
+ dispc_mgr_enable(priv->dispc, channel, true);
}
/* Called only from the encoder enable/disable and suspend/resume handlers. */
-static void omap_crtc_set_enabled(struct drm_crtc *crtc, bool enable)
+void omap_crtc_set_enabled(struct drm_crtc *crtc, bool enable)
{
struct omap_crtc_state *omap_state = to_omap_crtc_state(crtc->state);
struct drm_device *dev = crtc->dev;
@@ -128,7 +128,7 @@ static void omap_crtc_set_enabled(struct drm_crtc *crtc, bool enable)
}
if (omap_crtc->pipe->output->type == OMAP_DISPLAY_TYPE_HDMI) {
- priv->dispc_ops->mgr_enable(priv->dispc, channel, enable);
+ dispc_mgr_enable(priv->dispc, channel, enable);
omap_crtc->enabled = enable;
return;
}
@@ -141,9 +141,9 @@ static void omap_crtc_set_enabled(struct drm_crtc *crtc, bool enable)
omap_crtc->ignore_digit_sync_lost = true;
}
- framedone_irq = priv->dispc_ops->mgr_get_framedone_irq(priv->dispc,
+ framedone_irq = dispc_mgr_get_framedone_irq(priv->dispc,
channel);
- vsync_irq = priv->dispc_ops->mgr_get_vsync_irq(priv->dispc, channel);
+ vsync_irq = dispc_mgr_get_vsync_irq(priv->dispc, channel);
if (enable) {
wait = omap_irq_wait_init(dev, vsync_irq, 1);
@@ -163,7 +163,7 @@ static void omap_crtc_set_enabled(struct drm_crtc *crtc, bool enable)
wait = omap_irq_wait_init(dev, vsync_irq, 2);
}
- priv->dispc_ops->mgr_enable(priv->dispc, channel, enable);
+ dispc_mgr_enable(priv->dispc, channel, enable);
omap_crtc->enabled = enable;
ret = omap_irq_wait(dev, wait, msecs_to_jiffies(100));
@@ -180,21 +180,19 @@ static void omap_crtc_set_enabled(struct drm_crtc *crtc, bool enable)
}
-static int omap_crtc_dss_enable(struct omap_drm_private *priv,
- enum omap_channel channel)
+int omap_crtc_dss_enable(struct omap_drm_private *priv, enum omap_channel channel)
{
struct drm_crtc *crtc = priv->channels[channel]->crtc;
struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
- priv->dispc_ops->mgr_set_timings(priv->dispc, omap_crtc->channel,
+ dispc_mgr_set_timings(priv->dispc, omap_crtc->channel,
&omap_crtc->vm);
omap_crtc_set_enabled(&omap_crtc->base, true);
return 0;
}
-static void omap_crtc_dss_disable(struct omap_drm_private *priv,
- enum omap_channel channel)
+void omap_crtc_dss_disable(struct omap_drm_private *priv, enum omap_channel channel)
{
struct drm_crtc *crtc = priv->channels[channel]->crtc;
struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
@@ -202,7 +200,7 @@ static void omap_crtc_dss_disable(struct omap_drm_private *priv,
omap_crtc_set_enabled(&omap_crtc->base, false);
}
-static void omap_crtc_dss_set_timings(struct omap_drm_private *priv,
+void omap_crtc_dss_set_timings(struct omap_drm_private *priv,
enum omap_channel channel,
const struct videomode *vm)
{
@@ -213,7 +211,7 @@ static void omap_crtc_dss_set_timings(struct omap_drm_private *priv,
omap_crtc->vm = *vm;
}
-static void omap_crtc_dss_set_lcd_config(struct omap_drm_private *priv,
+void omap_crtc_dss_set_lcd_config(struct omap_drm_private *priv,
enum omap_channel channel,
const struct dss_lcd_mgr_config *config)
{
@@ -221,11 +219,11 @@ static void omap_crtc_dss_set_lcd_config(struct omap_drm_private *priv,
struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
DBG("%s", omap_crtc->name);
- priv->dispc_ops->mgr_set_lcd_config(priv->dispc, omap_crtc->channel,
+ dispc_mgr_set_lcd_config(priv->dispc, omap_crtc->channel,
config);
}
-static int omap_crtc_dss_register_framedone(
+int omap_crtc_dss_register_framedone(
struct omap_drm_private *priv, enum omap_channel channel,
void (*handler)(void *), void *data)
{
@@ -244,7 +242,7 @@ static int omap_crtc_dss_register_framedone(
return 0;
}
-static void omap_crtc_dss_unregister_framedone(
+void omap_crtc_dss_unregister_framedone(
struct omap_drm_private *priv, enum omap_channel channel,
void (*handler)(void *), void *data)
{
@@ -261,16 +259,6 @@ static void omap_crtc_dss_unregister_framedone(
omap_crtc->framedone_handler_data = NULL;
}
-static const struct dss_mgr_ops mgr_ops = {
- .start_update = omap_crtc_dss_start_update,
- .enable = omap_crtc_dss_enable,
- .disable = omap_crtc_dss_disable,
- .set_timings = omap_crtc_dss_set_timings,
- .set_lcd_config = omap_crtc_dss_set_lcd_config,
- .register_framedone_handler = omap_crtc_dss_register_framedone,
- .unregister_framedone_handler = omap_crtc_dss_unregister_framedone,
-};
-
/* -----------------------------------------------------------------------------
* Setup, Flush and Page Flip
*/
@@ -300,7 +288,7 @@ void omap_crtc_vblank_irq(struct drm_crtc *crtc)
* If the dispc is busy we're racing the flush operation. Try again on
* the next vblank interrupt.
*/
- if (priv->dispc_ops->mgr_go_busy(priv->dispc, omap_crtc->channel)) {
+ if (dispc_mgr_go_busy(priv->dispc, omap_crtc->channel)) {
spin_unlock(&crtc->dev->event_lock);
return;
}
@@ -362,27 +350,14 @@ static void omap_crtc_manual_display_update(struct work_struct *data)
{
struct omap_crtc *omap_crtc =
container_of(data, struct omap_crtc, update_work.work);
- struct drm_display_mode *mode = &omap_crtc->pipe->crtc->mode;
- struct omap_dss_device *dssdev = omap_crtc->pipe->output->next;
+ struct omap_dss_device *dssdev = omap_crtc->pipe->output;
struct drm_device *dev = omap_crtc->base.dev;
- const struct omap_dss_driver *dssdrv;
int ret;
- if (!dssdev) {
- dev_err_once(dev->dev, "missing display dssdev!");
+ if (!dssdev || !dssdev->dsi_ops || !dssdev->dsi_ops->update)
return;
- }
-
- dssdrv = dssdev->driver;
- if (!dssdrv || !dssdrv->update) {
- dev_err_once(dev->dev, "missing or incorrect dssdrv!");
- return;
- }
-
- if (dssdrv->sync)
- dssdrv->sync(dssdev);
- ret = dssdrv->update(dssdev, 0, 0, mode->hdisplay, mode->vdisplay);
+ ret = dssdev->dsi_ops->update(dssdev);
if (ret < 0) {
spin_lock_irq(&dev->event_lock);
omap_crtc->pending = false;
@@ -391,6 +366,33 @@ static void omap_crtc_manual_display_update(struct work_struct *data)
}
}
+static s16 omap_crtc_s31_32_to_s2_8(s64 coef)
+{
+ u64 sign_bit = 1ULL << 63;
+ u64 cbits = (u64)coef;
+
+ s16 ret = clamp_val(((cbits & ~sign_bit) >> 24), 0, 0x1ff);
+
+ if (cbits & sign_bit)
+ ret = -ret;
+
+ return ret;
+}
+
+static void omap_crtc_cpr_coefs_from_ctm(const struct drm_color_ctm *ctm,
+ struct omap_dss_cpr_coefs *cpr)
+{
+ cpr->rr = omap_crtc_s31_32_to_s2_8(ctm->matrix[0]);
+ cpr->rg = omap_crtc_s31_32_to_s2_8(ctm->matrix[1]);
+ cpr->rb = omap_crtc_s31_32_to_s2_8(ctm->matrix[2]);
+ cpr->gr = omap_crtc_s31_32_to_s2_8(ctm->matrix[3]);
+ cpr->gg = omap_crtc_s31_32_to_s2_8(ctm->matrix[4]);
+ cpr->gb = omap_crtc_s31_32_to_s2_8(ctm->matrix[5]);
+ cpr->br = omap_crtc_s31_32_to_s2_8(ctm->matrix[6]);
+ cpr->bg = omap_crtc_s31_32_to_s2_8(ctm->matrix[7]);
+ cpr->bb = omap_crtc_s31_32_to_s2_8(ctm->matrix[8]);
+}
+
static void omap_crtc_write_crtc_properties(struct drm_crtc *crtc)
{
struct omap_drm_private *priv = crtc->dev->dev_private;
@@ -402,9 +404,17 @@ static void omap_crtc_write_crtc_properties(struct drm_crtc *crtc)
info.default_color = 0x000000;
info.trans_enabled = false;
info.partial_alpha_enabled = false;
- info.cpr_enable = false;
- priv->dispc_ops->mgr_setup(priv->dispc, omap_crtc->channel, &info);
+ if (crtc->state->ctm) {
+ struct drm_color_ctm *ctm = crtc->state->ctm->data;
+
+ info.cpr_enable = true;
+ omap_crtc_cpr_coefs_from_ctm(ctm, &info.cpr_coefs);
+ } else {
+ info.cpr_enable = false;
+ }
+
+ dispc_mgr_setup(priv->dispc, omap_crtc->channel, &info);
}
/* -----------------------------------------------------------------------------
@@ -445,7 +455,7 @@ static void omap_crtc_atomic_enable(struct drm_crtc *crtc,
DBG("%s", omap_crtc->name);
- priv->dispc_ops->runtime_get(priv->dispc);
+ dispc_runtime_get(priv->dispc);
/* manual updated display will not trigger vsync irq */
if (omap_state->manually_updated)
@@ -484,7 +494,7 @@ static void omap_crtc_atomic_disable(struct drm_crtc *crtc,
drm_crtc_vblank_off(crtc);
- priv->dispc_ops->runtime_put(priv->dispc);
+ dispc_runtime_put(priv->dispc);
}
static enum drm_mode_status omap_crtc_mode_valid(struct drm_crtc *crtc,
@@ -502,9 +512,8 @@ static enum drm_mode_status omap_crtc_mode_valid(struct drm_crtc *crtc,
* valid DISPC mode. DSI will calculate and configure the
* proper DISPC mode later.
*/
- if (omap_crtc->pipe->output->next == NULL ||
- omap_crtc->pipe->output->next->type != OMAP_DISPLAY_TYPE_DSI) {
- r = priv->dispc_ops->mgr_check_timings(priv->dispc,
+ if (omap_crtc->pipe->output->type != OMAP_DISPLAY_TYPE_DSI) {
+ r = dispc_mgr_check_timings(priv->dispc,
omap_crtc->channel,
&vm);
if (r)
@@ -555,17 +564,16 @@ static void omap_crtc_mode_set_nofb(struct drm_crtc *crtc)
static bool omap_crtc_is_manually_updated(struct drm_crtc *crtc)
{
struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
- struct omap_dss_device *display = omap_crtc->pipe->output->next;
+ struct omap_dss_device *dssdev = omap_crtc->pipe->output;
- if (!display)
+ if (!dssdev || !dssdev->dsi_ops || !dssdev->dsi_ops->is_video_mode)
return false;
- if (display->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE) {
- DBG("detected manually updated display!");
- return true;
- }
+ if (dssdev->dsi_ops->is_video_mode(dssdev))
+ return false;
- return false;
+ DBG("detected manually updated display!");
+ return true;
}
static int omap_crtc_atomic_check(struct drm_crtc *crtc,
@@ -575,8 +583,8 @@ static int omap_crtc_atomic_check(struct drm_crtc *crtc,
crtc);
struct drm_plane_state *pri_state;
- if (crtc_state->color_mgmt_changed && crtc_state->gamma_lut) {
- unsigned int length = crtc_state->gamma_lut->length /
+ if (crtc_state->color_mgmt_changed && crtc_state->degamma_lut) {
+ unsigned int length = crtc_state->degamma_lut->length /
sizeof(struct drm_color_lut);
if (length < 2)
@@ -617,13 +625,13 @@ static void omap_crtc_atomic_flush(struct drm_crtc *crtc,
struct drm_color_lut *lut = NULL;
unsigned int length = 0;
- if (crtc->state->gamma_lut) {
+ if (crtc->state->degamma_lut) {
lut = (struct drm_color_lut *)
- crtc->state->gamma_lut->data;
- length = crtc->state->gamma_lut->length /
+ crtc->state->degamma_lut->data;
+ length = crtc->state->degamma_lut->length /
sizeof(*lut);
}
- priv->dispc_ops->mgr_set_gamma(priv->dispc, omap_crtc->channel,
+ dispc_mgr_set_gamma(priv->dispc, omap_crtc->channel,
lut, length);
}
@@ -648,7 +656,7 @@ static void omap_crtc_atomic_flush(struct drm_crtc *crtc,
WARN_ON(ret != 0);
spin_lock_irq(&crtc->dev->event_lock);
- priv->dispc_ops->mgr_go(priv->dispc, omap_crtc->channel);
+ dispc_mgr_go(priv->dispc, omap_crtc->channel);
omap_crtc_arm_event(crtc);
spin_unlock_irq(&crtc->dev->event_lock);
}
@@ -741,7 +749,6 @@ static const struct drm_crtc_funcs omap_crtc_funcs = {
.set_config = drm_atomic_helper_set_config,
.destroy = omap_crtc_destroy,
.page_flip = drm_atomic_helper_page_flip,
- .gamma_set = drm_atomic_helper_legacy_gamma_set,
.atomic_duplicate_state = omap_crtc_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
.atomic_set_property = omap_crtc_atomic_set_property,
@@ -771,16 +778,6 @@ static const char *channel_names[] = {
[OMAP_DSS_CHANNEL_LCD3] = "lcd3",
};
-void omap_crtc_pre_init(struct omap_drm_private *priv)
-{
- dss_install_mgr_ops(priv->dss, &mgr_ops, priv);
-}
-
-void omap_crtc_pre_uninit(struct omap_drm_private *priv)
-{
- dss_uninstall_mgr_ops(priv->dss);
-}
-
/* initialize crtc */
struct drm_crtc *omap_crtc_init(struct drm_device *dev,
struct omap_drm_pipeline *pipe,
@@ -839,10 +836,10 @@ struct drm_crtc *omap_crtc_init(struct drm_device *dev,
* extracted with dispc_mgr_gamma_size(). If it returns 0
* gamma table is not supported.
*/
- if (priv->dispc_ops->mgr_gamma_size(priv->dispc, channel)) {
+ if (dispc_mgr_gamma_size(priv->dispc, channel)) {
unsigned int gamma_lut_size = 256;
- drm_crtc_enable_color_mgmt(crtc, 0, false, gamma_lut_size);
+ drm_crtc_enable_color_mgmt(crtc, gamma_lut_size, true, 0);
drm_mode_crtc_set_gamma_size(crtc, gamma_lut_size);
}
diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.h b/drivers/gpu/drm/omapdrm/omap_crtc.h
index 2fd57751ae2b..a8b9cbee86e0 100644
--- a/drivers/gpu/drm/omapdrm/omap_crtc.h
+++ b/drivers/gpu/drm/omapdrm/omap_crtc.h
@@ -22,8 +22,6 @@ struct videomode;
struct videomode *omap_crtc_timings(struct drm_crtc *crtc);
enum omap_channel omap_crtc_channel(struct drm_crtc *crtc);
-void omap_crtc_pre_init(struct omap_drm_private *priv);
-void omap_crtc_pre_uninit(struct omap_drm_private *priv);
struct drm_crtc *omap_crtc_init(struct drm_device *dev,
struct omap_drm_pipeline *pipe,
struct drm_plane *plane);
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c
index 42c2ed752095..28bbad1353ee 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.c
+++ b/drivers/gpu/drm/omapdrm/omap_drv.c
@@ -69,7 +69,7 @@ static void omap_atomic_commit_tail(struct drm_atomic_state *old_state)
struct drm_device *dev = old_state->dev;
struct omap_drm_private *priv = dev->dev_private;
- priv->dispc_ops->runtime_get(priv->dispc);
+ dispc_runtime_get(priv->dispc);
/* Apply the atomic update. */
drm_atomic_helper_commit_modeset_disables(dev, old_state);
@@ -113,7 +113,7 @@ static void omap_atomic_commit_tail(struct drm_atomic_state *old_state)
drm_atomic_helper_cleanup_planes(dev, old_state);
- priv->dispc_ops->runtime_put(priv->dispc);
+ dispc_runtime_put(priv->dispc);
}
static const struct drm_mode_config_helper_funcs omap_mode_config_helper_funcs = {
@@ -192,7 +192,7 @@ static int omap_compare_pipelines(const void *a, const void *b)
static int omap_modeset_init_properties(struct drm_device *dev)
{
struct omap_drm_private *priv = dev->dev_private;
- unsigned int num_planes = priv->dispc_ops->get_num_ovls(priv->dispc);
+ unsigned int num_planes = dispc_get_num_ovls(priv->dispc);
priv->zorder_prop = drm_property_create_range(dev, 0, "zorder", 0,
num_planes - 1);
@@ -206,14 +206,7 @@ static int omap_display_id(struct omap_dss_device *output)
{
struct device_node *node = NULL;
- if (output->next) {
- struct omap_dss_device *display = output;
-
- while (display->next)
- display = display->next;
-
- node = display->dev->of_node;
- } else if (output->bridge) {
+ if (output->bridge) {
struct drm_bridge *bridge = output->bridge;
while (drm_bridge_get_next_bridge(bridge))
@@ -228,8 +221,8 @@ static int omap_display_id(struct omap_dss_device *output)
static int omap_modeset_init(struct drm_device *dev)
{
struct omap_drm_private *priv = dev->dev_private;
- int num_ovls = priv->dispc_ops->get_num_ovls(priv->dispc);
- int num_mgrs = priv->dispc_ops->get_num_mgrs(priv->dispc);
+ int num_ovls = dispc_get_num_ovls(priv->dispc);
+ int num_mgrs = dispc_get_num_mgrs(priv->dispc);
unsigned int i;
int ret;
u32 plane_crtc_mask;
@@ -332,19 +325,12 @@ static int omap_modeset_init(struct drm_device *dev)
struct drm_encoder *encoder = pipe->encoder;
struct drm_crtc *crtc;
- if (pipe->output->next) {
- pipe->connector = omap_connector_init(dev, pipe->output,
- encoder);
- if (!pipe->connector)
- return -ENOMEM;
- } else {
- pipe->connector = drm_bridge_connector_init(dev, encoder);
- if (IS_ERR(pipe->connector)) {
- dev_err(priv->dev,
- "unable to create bridge connector for %s\n",
- pipe->output->name);
- return PTR_ERR(pipe->connector);
- }
+ pipe->connector = drm_bridge_connector_init(dev, encoder);
+ if (IS_ERR(pipe->connector)) {
+ dev_err(priv->dev,
+ "unable to create bridge connector for %s\n",
+ pipe->output->name);
+ return PTR_ERR(pipe->connector);
}
drm_connector_attach_encoder(pipe->connector, encoder);
@@ -568,6 +554,7 @@ static const struct soc_device_attribute omapdrm_soc_devices[] = {
static int omapdrm_init(struct omap_drm_private *priv, struct device *dev)
{
const struct soc_device_attribute *soc;
+ struct dss_pdata *pdata = dev->platform_data;
struct drm_device *ddev;
int ret;
@@ -582,11 +569,10 @@ static int omapdrm_init(struct omap_drm_private *priv, struct device *dev)
ddev->dev_private = priv;
priv->dev = dev;
- priv->dss = omapdss_get_dss();
+ priv->dss = pdata->dss;
priv->dispc = dispc_get_dispc(priv->dss);
- priv->dispc_ops = dispc_get_ops(priv->dss);
- omap_crtc_pre_init(priv);
+ priv->dss->mgr_ops_priv = priv;
soc = soc_device_match(omapdrm_soc_devices);
priv->omaprev = soc ? (unsigned int)soc->data : 0;
@@ -596,9 +582,7 @@ static int omapdrm_init(struct omap_drm_private *priv, struct device *dev)
INIT_LIST_HEAD(&priv->obj_list);
/* Get memory bandwidth limits */
- if (priv->dispc_ops->get_memory_bandwidth_limit)
- priv->max_bandwidth =
- priv->dispc_ops->get_memory_bandwidth_limit(priv->dispc);
+ priv->max_bandwidth = dispc_get_memory_bandwidth_limit(priv->dispc);
omap_gem_init(ddev);
@@ -641,7 +625,6 @@ err_gem_deinit:
omap_gem_deinit(ddev);
destroy_workqueue(priv->wq);
omap_disconnect_pipelines(ddev);
- omap_crtc_pre_uninit(priv);
drm_dev_put(ddev);
return ret;
}
@@ -667,7 +650,6 @@ static void omapdrm_cleanup(struct omap_drm_private *priv)
destroy_workqueue(priv->wq);
omap_disconnect_pipelines(ddev);
- omap_crtc_pre_uninit(priv);
drm_dev_put(ddev);
}
@@ -677,9 +659,6 @@ static int pdev_probe(struct platform_device *pdev)
struct omap_drm_private *priv;
int ret;
- if (omapdss_is_initialized() == false)
- return -EPROBE_DEFER;
-
ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (ret) {
dev_err(&pdev->dev, "Failed to set the DMA mask\n");
@@ -748,9 +727,21 @@ static struct platform_driver * const drivers[] = {
static int __init omap_drm_init(void)
{
+ int r;
+
DBG("init");
- return platform_register_drivers(drivers, ARRAY_SIZE(drivers));
+ r = omap_dss_init();
+ if (r)
+ return r;
+
+ r = platform_register_drivers(drivers, ARRAY_SIZE(drivers));
+ if (r) {
+ omap_dss_exit();
+ return r;
+ }
+
+ return 0;
}
static void __exit omap_drm_fini(void)
@@ -758,13 +749,15 @@ static void __exit omap_drm_fini(void)
DBG("fini");
platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
+
+ omap_dss_exit();
}
-/* need late_initcall() so we load after dss_driver's are loaded */
-late_initcall(omap_drm_init);
+module_init(omap_drm_init);
module_exit(omap_drm_fini);
MODULE_AUTHOR("Rob Clark <[email protected]>");
+MODULE_AUTHOR("Tomi Valkeinen <[email protected]>");
MODULE_DESCRIPTION("OMAP DRM Display Driver");
MODULE_ALIAS("platform:" DRIVER_NAME);
MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.h b/drivers/gpu/drm/omapdrm/omap_drv.h
index ae57e7ada876..d6f136984da9 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.h
+++ b/drivers/gpu/drm/omapdrm/omap_drv.h
@@ -12,11 +12,11 @@
#include <linux/workqueue.h>
#include "dss/omapdss.h"
+#include "dss/dss.h"
#include <drm/drm_gem.h>
#include <drm/omap_drm.h>
-#include "omap_connector.h"
#include "omap_crtc.h"
#include "omap_encoder.h"
#include "omap_fb.h"
@@ -47,7 +47,6 @@ struct omap_drm_private {
struct dss_device *dss;
struct dispc_device *dispc;
- const struct dispc_ops *dispc_ops;
unsigned int num_pipes;
struct omap_drm_pipeline pipes[8];
diff --git a/drivers/gpu/drm/omapdrm/omap_encoder.c b/drivers/gpu/drm/omapdrm/omap_encoder.c
index 57e92a4d5937..4dd05bc732da 100644
--- a/drivers/gpu/drm/omapdrm/omap_encoder.c
+++ b/drivers/gpu/drm/omapdrm/omap_encoder.c
@@ -75,7 +75,6 @@ static void omap_encoder_mode_set(struct drm_encoder *encoder,
{
struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
struct omap_dss_device *output = omap_encoder->output;
- struct omap_dss_device *dssdev;
struct drm_device *dev = encoder->dev;
struct drm_connector *connector;
struct drm_bridge *bridge;
@@ -98,9 +97,6 @@ static void omap_encoder_mode_set(struct drm_encoder *encoder,
*
* A better solution is to use DRM's bus-flags through the whole driver.
*/
- for (dssdev = output; dssdev; dssdev = dssdev->next)
- omap_encoder_update_videomode_flags(&vm, dssdev->bus_flags);
-
for (bridge = output->bridge; bridge;
bridge = drm_bridge_get_next_bridge(bridge)) {
if (!bridge->timings)
@@ -113,65 +109,12 @@ static void omap_encoder_mode_set(struct drm_encoder *encoder,
bus_flags = connector->display_info.bus_flags;
omap_encoder_update_videomode_flags(&vm, bus_flags);
- /* Set timings for the dss manager. */
+ /* Set timings for all devices in the display pipeline. */
dss_mgr_set_timings(output, &vm);
}
-static void omap_encoder_disable(struct drm_encoder *encoder)
-{
- struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
- struct omap_dss_device *dssdev = omap_encoder->output;
- struct drm_device *dev = encoder->dev;
-
- dev_dbg(dev->dev, "disable(%s)\n", dssdev->name);
-
- /*
- * Disable the chain of external devices, starting at the one at the
- * internal encoder's output. This is used for DSI outputs only, as
- * dssdev->next is NULL for all other outputs.
- */
- omapdss_device_disable(dssdev->next);
-}
-
-static void omap_encoder_enable(struct drm_encoder *encoder)
-{
- struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
- struct omap_dss_device *dssdev = omap_encoder->output;
- struct drm_device *dev = encoder->dev;
-
- dev_dbg(dev->dev, "enable(%s)\n", dssdev->name);
-
- /*
- * Enable the chain of external devices, starting at the one at the
- * internal encoder's output. This is used for DSI outputs only, as
- * dssdev->next is NULL for all other outputs.
- */
- omapdss_device_enable(dssdev->next);
-}
-
-static int omap_encoder_atomic_check(struct drm_encoder *encoder,
- struct drm_crtc_state *crtc_state,
- struct drm_connector_state *conn_state)
-{
- struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
- enum drm_mode_status status;
-
- status = omap_connector_mode_fixup(omap_encoder->output,
- &crtc_state->mode,
- &crtc_state->adjusted_mode);
- if (status != MODE_OK) {
- dev_err(encoder->dev->dev, "invalid timings: %d\n", status);
- return -EINVAL;
- }
-
- return 0;
-}
-
static const struct drm_encoder_helper_funcs omap_encoder_helper_funcs = {
.mode_set = omap_encoder_mode_set,
- .disable = omap_encoder_disable,
- .enable = omap_encoder_enable,
- .atomic_check = omap_encoder_atomic_check,
};
/* initialize encoder */
diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c
index 30d299ca8795..38af6195d959 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem.c
@@ -1324,8 +1324,7 @@ struct drm_gem_object *omap_gem_new_dmabuf(struct drm_device *dev, size_t size,
}
omap_obj->pages = pages;
- ret = drm_prime_sg_to_page_addr_arrays(sgt, pages, NULL,
- npages);
+ ret = drm_prime_sg_to_page_array(sgt, pages, npages);
if (ret) {
omap_gem_free_object(obj);
obj = ERR_PTR(-ENOMEM);
diff --git a/drivers/gpu/drm/omapdrm/omap_irq.c b/drivers/gpu/drm/omapdrm/omap_irq.c
index 97c83b959f7e..15148d4b35b5 100644
--- a/drivers/gpu/drm/omapdrm/omap_irq.c
+++ b/drivers/gpu/drm/omapdrm/omap_irq.c
@@ -29,7 +29,7 @@ static void omap_irq_update(struct drm_device *dev)
DBG("irqmask=%08x", irqmask);
- priv->dispc_ops->write_irqenable(priv->dispc, irqmask);
+ dispc_write_irqenable(priv->dispc, irqmask);
}
static void omap_irq_wait_handler(struct omap_irq_wait *wait)
@@ -83,7 +83,7 @@ int omap_irq_enable_framedone(struct drm_crtc *crtc, bool enable)
unsigned long flags;
enum omap_channel channel = omap_crtc_channel(crtc);
int framedone_irq =
- priv->dispc_ops->mgr_get_framedone_irq(priv->dispc, channel);
+ dispc_mgr_get_framedone_irq(priv->dispc, channel);
DBG("dev=%p, crtc=%u, enable=%d", dev, channel, enable);
@@ -120,7 +120,7 @@ int omap_irq_enable_vblank(struct drm_crtc *crtc)
DBG("dev=%p, crtc=%u", dev, channel);
spin_lock_irqsave(&priv->wait_lock, flags);
- priv->irq_mask |= priv->dispc_ops->mgr_get_vsync_irq(priv->dispc,
+ priv->irq_mask |= dispc_mgr_get_vsync_irq(priv->dispc,
channel);
omap_irq_update(dev);
spin_unlock_irqrestore(&priv->wait_lock, flags);
@@ -146,7 +146,7 @@ void omap_irq_disable_vblank(struct drm_crtc *crtc)
DBG("dev=%p, crtc=%u", dev, channel);
spin_lock_irqsave(&priv->wait_lock, flags);
- priv->irq_mask &= ~priv->dispc_ops->mgr_get_vsync_irq(priv->dispc,
+ priv->irq_mask &= ~dispc_mgr_get_vsync_irq(priv->dispc,
channel);
omap_irq_update(dev);
spin_unlock_irqrestore(&priv->wait_lock, flags);
@@ -211,9 +211,9 @@ static irqreturn_t omap_irq_handler(int irq, void *arg)
unsigned int id;
u32 irqstatus;
- irqstatus = priv->dispc_ops->read_irqstatus(priv->dispc);
- priv->dispc_ops->clear_irqstatus(priv->dispc, irqstatus);
- priv->dispc_ops->read_irqstatus(priv->dispc); /* flush posted write */
+ irqstatus = dispc_read_irqstatus(priv->dispc);
+ dispc_clear_irqstatus(priv->dispc, irqstatus);
+ dispc_read_irqstatus(priv->dispc); /* flush posted write */
VERB("irqs: %08x", irqstatus);
@@ -221,15 +221,15 @@ static irqreturn_t omap_irq_handler(int irq, void *arg)
struct drm_crtc *crtc = priv->pipes[id].crtc;
enum omap_channel channel = omap_crtc_channel(crtc);
- if (irqstatus & priv->dispc_ops->mgr_get_vsync_irq(priv->dispc, channel)) {
+ if (irqstatus & dispc_mgr_get_vsync_irq(priv->dispc, channel)) {
drm_handle_vblank(dev, id);
omap_crtc_vblank_irq(crtc);
}
- if (irqstatus & priv->dispc_ops->mgr_get_sync_lost_irq(priv->dispc, channel))
+ if (irqstatus & dispc_mgr_get_sync_lost_irq(priv->dispc, channel))
omap_crtc_error_irq(crtc, irqstatus);
- if (irqstatus & priv->dispc_ops->mgr_get_framedone_irq(priv->dispc, channel))
+ if (irqstatus & dispc_mgr_get_framedone_irq(priv->dispc, channel))
omap_crtc_framedone_irq(crtc, irqstatus);
}
@@ -263,7 +263,7 @@ static const u32 omap_underflow_irqs[] = {
int omap_drm_irq_install(struct drm_device *dev)
{
struct omap_drm_private *priv = dev->dev_private;
- unsigned int num_mgrs = priv->dispc_ops->get_num_mgrs(priv->dispc);
+ unsigned int num_mgrs = dispc_get_num_mgrs(priv->dispc);
unsigned int max_planes;
unsigned int i;
int ret;
@@ -281,13 +281,13 @@ int omap_drm_irq_install(struct drm_device *dev)
}
for (i = 0; i < num_mgrs; ++i)
- priv->irq_mask |= priv->dispc_ops->mgr_get_sync_lost_irq(priv->dispc, i);
+ priv->irq_mask |= dispc_mgr_get_sync_lost_irq(priv->dispc, i);
- priv->dispc_ops->runtime_get(priv->dispc);
- priv->dispc_ops->clear_irqstatus(priv->dispc, 0xffffffff);
- priv->dispc_ops->runtime_put(priv->dispc);
+ dispc_runtime_get(priv->dispc);
+ dispc_clear_irqstatus(priv->dispc, 0xffffffff);
+ dispc_runtime_put(priv->dispc);
- ret = priv->dispc_ops->request_irq(priv->dispc, omap_irq_handler, dev);
+ ret = dispc_request_irq(priv->dispc, omap_irq_handler, dev);
if (ret < 0)
return ret;
@@ -305,5 +305,5 @@ void omap_drm_irq_uninstall(struct drm_device *dev)
dev->irq_enabled = false;
- priv->dispc_ops->free_irq(priv->dispc, dev);
+ dispc_free_irq(priv->dispc, dev);
}
diff --git a/drivers/gpu/drm/omapdrm/omap_plane.c b/drivers/gpu/drm/omapdrm/omap_plane.c
index 21e0b9785599..51dc24acea73 100644
--- a/drivers/gpu/drm/omapdrm/omap_plane.c
+++ b/drivers/gpu/drm/omapdrm/omap_plane.c
@@ -59,6 +59,8 @@ static void omap_plane_atomic_update(struct drm_plane *plane,
info.pre_mult_alpha = 1;
else
info.pre_mult_alpha = 0;
+ info.color_encoding = state->color_encoding;
+ info.color_range = state->color_range;
/* update scanout: */
omap_framebuffer_update_scanout(state->fb, state, &info);
@@ -70,17 +72,17 @@ static void omap_plane_atomic_update(struct drm_plane *plane,
&info.paddr, &info.p_uv_addr);
/* and finally, update omapdss: */
- ret = priv->dispc_ops->ovl_setup(priv->dispc, omap_plane->id, &info,
+ ret = dispc_ovl_setup(priv->dispc, omap_plane->id, &info,
omap_crtc_timings(state->crtc), false,
omap_crtc_channel(state->crtc));
if (ret) {
dev_err(plane->dev->dev, "Failed to setup plane %s\n",
omap_plane->name);
- priv->dispc_ops->ovl_enable(priv->dispc, omap_plane->id, false);
+ dispc_ovl_enable(priv->dispc, omap_plane->id, false);
return;
}
- priv->dispc_ops->ovl_enable(priv->dispc, omap_plane->id, true);
+ dispc_ovl_enable(priv->dispc, omap_plane->id, true);
}
static void omap_plane_atomic_disable(struct drm_plane *plane,
@@ -93,7 +95,7 @@ static void omap_plane_atomic_disable(struct drm_plane *plane,
plane->state->zpos = plane->type == DRM_PLANE_TYPE_PRIMARY
? 0 : omap_plane->id;
- priv->dispc_ops->ovl_enable(priv->dispc, omap_plane->id, false);
+ dispc_ovl_enable(priv->dispc, omap_plane->id, false);
}
static int omap_plane_atomic_check(struct drm_plane *plane,
@@ -189,6 +191,8 @@ static void omap_plane_reset(struct drm_plane *plane)
*/
plane->state->zpos = plane->type == DRM_PLANE_TYPE_PRIMARY
? 0 : omap_plane->id;
+ plane->state->color_encoding = DRM_COLOR_YCBCR_BT601;
+ plane->state->color_range = DRM_COLOR_YCBCR_FULL_RANGE;
}
static int omap_plane_atomic_set_property(struct drm_plane *plane,
@@ -232,6 +236,22 @@ static const struct drm_plane_funcs omap_plane_funcs = {
.atomic_get_property = omap_plane_atomic_get_property,
};
+static bool omap_plane_supports_yuv(struct drm_plane *plane)
+{
+ struct omap_drm_private *priv = plane->dev->dev_private;
+ struct omap_plane *omap_plane = to_omap_plane(plane);
+ const u32 *formats = dispc_ovl_get_color_modes(priv->dispc, omap_plane->id);
+ u32 i;
+
+ for (i = 0; formats[i]; i++)
+ if (formats[i] == DRM_FORMAT_YUYV ||
+ formats[i] == DRM_FORMAT_UYVY ||
+ formats[i] == DRM_FORMAT_NV12)
+ return true;
+
+ return false;
+}
+
static const char *plane_id_to_name[] = {
[OMAP_DSS_GFX] = "gfx",
[OMAP_DSS_VIDEO1] = "vid1",
@@ -252,7 +272,7 @@ struct drm_plane *omap_plane_init(struct drm_device *dev,
u32 possible_crtcs)
{
struct omap_drm_private *priv = dev->dev_private;
- unsigned int num_planes = priv->dispc_ops->get_num_ovls(priv->dispc);
+ unsigned int num_planes = dispc_get_num_ovls(priv->dispc);
struct drm_plane *plane;
struct omap_plane *omap_plane;
enum omap_plane_id id;
@@ -271,7 +291,7 @@ struct drm_plane *omap_plane_init(struct drm_device *dev,
if (!omap_plane)
return ERR_PTR(-ENOMEM);
- formats = priv->dispc_ops->ovl_get_color_modes(priv->dispc, id);
+ formats = dispc_ovl_get_color_modes(priv->dispc, id);
for (nformats = 0; formats[nformats]; ++nformats)
;
omap_plane->id = id;
@@ -293,6 +313,15 @@ struct drm_plane *omap_plane_init(struct drm_device *dev,
drm_plane_create_blend_mode_property(plane, BIT(DRM_MODE_BLEND_PREMULTI) |
BIT(DRM_MODE_BLEND_COVERAGE));
+ if (omap_plane_supports_yuv(plane))
+ drm_plane_create_color_properties(plane,
+ BIT(DRM_COLOR_YCBCR_BT601) |
+ BIT(DRM_COLOR_YCBCR_BT709),
+ BIT(DRM_COLOR_YCBCR_FULL_RANGE) |
+ BIT(DRM_COLOR_YCBCR_LIMITED_RANGE),
+ DRM_COLOR_YCBCR_BT601,
+ DRM_COLOR_YCBCR_FULL_RANGE);
+
return plane;
error:
diff --git a/drivers/gpu/drm/omapdrm/tcm-sita.c b/drivers/gpu/drm/omapdrm/tcm-sita.c
index 9e1acbd2c7aa..8338dc665301 100644
--- a/drivers/gpu/drm/omapdrm/tcm-sita.c
+++ b/drivers/gpu/drm/omapdrm/tcm-sita.c
@@ -254,6 +254,5 @@ struct tcm *sita_init(u16 width, u16 height)
return tcm;
error:
- kfree(tcm);
return NULL;
}
diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig
index b4e021ea30f9..4894913936e9 100644
--- a/drivers/gpu/drm/panel/Kconfig
+++ b/drivers/gpu/drm/panel/Kconfig
@@ -57,6 +57,15 @@ config DRM_PANEL_BOE_TV101WUM_NL6
Say Y here if you want to support for BOE TV101WUM and AUO KD101N80
45NA WUXGA PANEL DSI Video Mode panel
+config DRM_PANEL_DSI_CM
+ tristate "Generic DSI command mode panels"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ DRM panel driver for DSI command mode panels with support for
+ embedded and external backlights.
+
config DRM_PANEL_LVDS
tristate "Generic LVDS panel driver"
depends on OF
@@ -145,6 +154,17 @@ config DRM_PANEL_JDI_LT070ME05000
The panel has a 1200(RGB)×1920 (WUXGA) resolution and uses
24 bit per pixel.
+config DRM_PANEL_KHADAS_TS050
+ tristate "Khadas TS050 panel"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ Say Y here if you want to enable support for Khadas TS050 TFT-LCD
+ panel module. The panel has a 1080x1920 resolution and uses
+ 24 bit RGB per pixel. It provides a MIPI DSI interface to
+ the host, a built-in LED backlight and touch controller.
+
config DRM_PANEL_KINGDISPLAY_KD097D04
tristate "Kingdisplay kd097d04 panel"
depends on OF
diff --git a/drivers/gpu/drm/panel/Makefile b/drivers/gpu/drm/panel/Makefile
index ebbf488c7eac..cae4d976c069 100644
--- a/drivers/gpu/drm/panel/Makefile
+++ b/drivers/gpu/drm/panel/Makefile
@@ -4,6 +4,7 @@ obj-$(CONFIG_DRM_PANEL_ARM_VERSATILE) += panel-arm-versatile.o
obj-$(CONFIG_DRM_PANEL_ASUS_Z00T_TM5P5_NT35596) += panel-asus-z00t-tm5p5-n35596.o
obj-$(CONFIG_DRM_PANEL_BOE_HIMAX8279D) += panel-boe-himax8279d.o
obj-$(CONFIG_DRM_PANEL_BOE_TV101WUM_NL6) += panel-boe-tv101wum-nl6.o
+obj-$(CONFIG_DRM_PANEL_DSI_CM) += panel-dsi-cm.o
obj-$(CONFIG_DRM_PANEL_LVDS) += panel-lvds.o
obj-$(CONFIG_DRM_PANEL_SIMPLE) += panel-simple.o
obj-$(CONFIG_DRM_PANEL_ELIDA_KD35T133) += panel-elida-kd35t133.o
@@ -13,6 +14,7 @@ obj-$(CONFIG_DRM_PANEL_ILITEK_IL9322) += panel-ilitek-ili9322.o
obj-$(CONFIG_DRM_PANEL_ILITEK_ILI9881C) += panel-ilitek-ili9881c.o
obj-$(CONFIG_DRM_PANEL_INNOLUX_P079ZCA) += panel-innolux-p079zca.o
obj-$(CONFIG_DRM_PANEL_JDI_LT070ME05000) += panel-jdi-lt070me05000.o
+obj-$(CONFIG_DRM_PANEL_KHADAS_TS050) += panel-khadas-ts050.o
obj-$(CONFIG_DRM_PANEL_KINGDISPLAY_KD097D04) += panel-kingdisplay-kd097d04.o
obj-$(CONFIG_DRM_PANEL_LEADTEK_LTK050H3146W) += panel-leadtek-ltk050h3146w.o
obj-$(CONFIG_DRM_PANEL_LEADTEK_LTK500HD1829) += panel-leadtek-ltk500hd1829.o
diff --git a/drivers/gpu/drm/panel/panel-dsi-cm.c b/drivers/gpu/drm/panel/panel-dsi-cm.c
new file mode 100644
index 000000000000..af381d756ac1
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-dsi-cm.c
@@ -0,0 +1,665 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Generic DSI Command Mode panel driver
+ *
+ * Copyright (C) 2013 Texas Instruments Incorporated - https://www.ti.com/
+ * Author: Tomi Valkeinen <[email protected]>
+ */
+
+#include <linux/backlight.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/jiffies.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/regulator/consumer.h>
+
+#include <drm/drm_connector.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+
+#include <video/mipi_display.h>
+
+#define DCS_GET_ID1 0xda
+#define DCS_GET_ID2 0xdb
+#define DCS_GET_ID3 0xdc
+
+#define DCS_REGULATOR_SUPPLY_NUM 2
+
+static const struct of_device_id dsicm_of_match[];
+
+struct dsic_panel_data {
+ u32 xres;
+ u32 yres;
+ u32 refresh;
+ u32 width_mm;
+ u32 height_mm;
+ u32 max_hs_rate;
+ u32 max_lp_rate;
+};
+
+struct panel_drv_data {
+ struct mipi_dsi_device *dsi;
+ struct drm_panel panel;
+ struct drm_display_mode mode;
+
+ struct mutex lock;
+
+ struct backlight_device *bldev;
+ struct backlight_device *extbldev;
+
+ unsigned long hw_guard_end; /* next value of jiffies when we can
+ * issue the next sleep in/out command
+ */
+ unsigned long hw_guard_wait; /* max guard time in jiffies */
+
+ const struct dsic_panel_data *panel_data;
+
+ struct gpio_desc *reset_gpio;
+
+ struct regulator_bulk_data supplies[DCS_REGULATOR_SUPPLY_NUM];
+
+ bool use_dsi_backlight;
+
+ /* runtime variables */
+ bool enabled;
+
+ bool intro_printed;
+};
+
+static inline struct panel_drv_data *panel_to_ddata(struct drm_panel *panel)
+{
+ return container_of(panel, struct panel_drv_data, panel);
+}
+
+static void dsicm_bl_power(struct panel_drv_data *ddata, bool enable)
+{
+ struct backlight_device *backlight;
+
+ if (ddata->bldev)
+ backlight = ddata->bldev;
+ else if (ddata->extbldev)
+ backlight = ddata->extbldev;
+ else
+ return;
+
+ if (enable) {
+ backlight->props.fb_blank = FB_BLANK_UNBLANK;
+ backlight->props.state = ~(BL_CORE_FBBLANK | BL_CORE_SUSPENDED);
+ backlight->props.power = FB_BLANK_UNBLANK;
+ } else {
+ backlight->props.fb_blank = FB_BLANK_NORMAL;
+ backlight->props.power = FB_BLANK_POWERDOWN;
+ backlight->props.state |= BL_CORE_FBBLANK | BL_CORE_SUSPENDED;
+ }
+
+ backlight_update_status(backlight);
+}
+
+static void hw_guard_start(struct panel_drv_data *ddata, int guard_msec)
+{
+ ddata->hw_guard_wait = msecs_to_jiffies(guard_msec);
+ ddata->hw_guard_end = jiffies + ddata->hw_guard_wait;
+}
+
+static void hw_guard_wait(struct panel_drv_data *ddata)
+{
+ unsigned long wait = ddata->hw_guard_end - jiffies;
+
+ if ((long)wait > 0 && wait <= ddata->hw_guard_wait) {
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(wait);
+ }
+}
+
+static int dsicm_dcs_read_1(struct panel_drv_data *ddata, u8 dcs_cmd, u8 *data)
+{
+ return mipi_dsi_dcs_read(ddata->dsi, dcs_cmd, data, 1);
+}
+
+static int dsicm_dcs_write_1(struct panel_drv_data *ddata, u8 dcs_cmd, u8 param)
+{
+ return mipi_dsi_dcs_write(ddata->dsi, dcs_cmd, &param, 1);
+}
+
+static int dsicm_sleep_in(struct panel_drv_data *ddata)
+
+{
+ int r;
+
+ hw_guard_wait(ddata);
+
+ r = mipi_dsi_dcs_enter_sleep_mode(ddata->dsi);
+ if (r)
+ return r;
+
+ hw_guard_start(ddata, 120);
+
+ usleep_range(5000, 10000);
+
+ return 0;
+}
+
+static int dsicm_sleep_out(struct panel_drv_data *ddata)
+{
+ int r;
+
+ hw_guard_wait(ddata);
+
+ r = mipi_dsi_dcs_exit_sleep_mode(ddata->dsi);
+ if (r)
+ return r;
+
+ hw_guard_start(ddata, 120);
+
+ usleep_range(5000, 10000);
+
+ return 0;
+}
+
+static int dsicm_get_id(struct panel_drv_data *ddata, u8 *id1, u8 *id2, u8 *id3)
+{
+ int r;
+
+ r = dsicm_dcs_read_1(ddata, DCS_GET_ID1, id1);
+ if (r)
+ return r;
+ r = dsicm_dcs_read_1(ddata, DCS_GET_ID2, id2);
+ if (r)
+ return r;
+ r = dsicm_dcs_read_1(ddata, DCS_GET_ID3, id3);
+ if (r)
+ return r;
+
+ return 0;
+}
+
+static int dsicm_set_update_window(struct panel_drv_data *ddata)
+{
+ struct mipi_dsi_device *dsi = ddata->dsi;
+ int r;
+
+ r = mipi_dsi_dcs_set_column_address(dsi, 0, ddata->mode.hdisplay - 1);
+ if (r < 0)
+ return r;
+
+ r = mipi_dsi_dcs_set_page_address(dsi, 0, ddata->mode.vdisplay - 1);
+ if (r < 0)
+ return r;
+
+ return 0;
+}
+
+static int dsicm_bl_update_status(struct backlight_device *dev)
+{
+ struct panel_drv_data *ddata = dev_get_drvdata(&dev->dev);
+ int r = 0;
+ int level;
+
+ if (dev->props.fb_blank == FB_BLANK_UNBLANK &&
+ dev->props.power == FB_BLANK_UNBLANK)
+ level = dev->props.brightness;
+ else
+ level = 0;
+
+ dev_dbg(&ddata->dsi->dev, "update brightness to %d\n", level);
+
+ mutex_lock(&ddata->lock);
+
+ if (ddata->enabled)
+ r = dsicm_dcs_write_1(ddata, MIPI_DCS_SET_DISPLAY_BRIGHTNESS,
+ level);
+
+ mutex_unlock(&ddata->lock);
+
+ return r;
+}
+
+static int dsicm_bl_get_intensity(struct backlight_device *dev)
+{
+ if (dev->props.fb_blank == FB_BLANK_UNBLANK &&
+ dev->props.power == FB_BLANK_UNBLANK)
+ return dev->props.brightness;
+
+ return 0;
+}
+
+static const struct backlight_ops dsicm_bl_ops = {
+ .get_brightness = dsicm_bl_get_intensity,
+ .update_status = dsicm_bl_update_status,
+};
+
+static ssize_t num_dsi_errors_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct panel_drv_data *ddata = dev_get_drvdata(dev);
+ u8 errors = 0;
+ int r = -ENODEV;
+
+ mutex_lock(&ddata->lock);
+
+ if (ddata->enabled)
+ r = dsicm_dcs_read_1(ddata, MIPI_DCS_GET_ERROR_COUNT_ON_DSI, &errors);
+
+ mutex_unlock(&ddata->lock);
+
+ if (r)
+ return r;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", errors);
+}
+
+static ssize_t hw_revision_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct panel_drv_data *ddata = dev_get_drvdata(dev);
+ u8 id1, id2, id3;
+ int r = -ENODEV;
+
+ mutex_lock(&ddata->lock);
+
+ if (ddata->enabled)
+ r = dsicm_get_id(ddata, &id1, &id2, &id3);
+
+ mutex_unlock(&ddata->lock);
+
+ if (r)
+ return r;
+
+ return snprintf(buf, PAGE_SIZE, "%02x.%02x.%02x\n", id1, id2, id3);
+}
+
+static DEVICE_ATTR_RO(num_dsi_errors);
+static DEVICE_ATTR_RO(hw_revision);
+
+static struct attribute *dsicm_attrs[] = {
+ &dev_attr_num_dsi_errors.attr,
+ &dev_attr_hw_revision.attr,
+ NULL,
+};
+
+static const struct attribute_group dsicm_attr_group = {
+ .attrs = dsicm_attrs,
+};
+
+static void dsicm_hw_reset(struct panel_drv_data *ddata)
+{
+ gpiod_set_value(ddata->reset_gpio, 1);
+ udelay(10);
+ /* reset the panel */
+ gpiod_set_value(ddata->reset_gpio, 0);
+ /* assert reset */
+ udelay(10);
+ gpiod_set_value(ddata->reset_gpio, 1);
+ /* wait after releasing reset */
+ usleep_range(5000, 10000);
+}
+
+static int dsicm_power_on(struct panel_drv_data *ddata)
+{
+ u8 id1, id2, id3;
+ int r;
+
+ dsicm_hw_reset(ddata);
+
+ ddata->dsi->mode_flags |= MIPI_DSI_MODE_LPM;
+
+ r = dsicm_sleep_out(ddata);
+ if (r)
+ goto err;
+
+ r = dsicm_get_id(ddata, &id1, &id2, &id3);
+ if (r)
+ goto err;
+
+ r = dsicm_dcs_write_1(ddata, MIPI_DCS_SET_DISPLAY_BRIGHTNESS, 0xff);
+ if (r)
+ goto err;
+
+ r = dsicm_dcs_write_1(ddata, MIPI_DCS_WRITE_CONTROL_DISPLAY,
+ (1<<2) | (1<<5)); /* BL | BCTRL */
+ if (r)
+ goto err;
+
+ r = mipi_dsi_dcs_set_pixel_format(ddata->dsi, MIPI_DCS_PIXEL_FMT_24BIT);
+ if (r)
+ goto err;
+
+ r = dsicm_set_update_window(ddata);
+ if (r)
+ goto err;
+
+ r = mipi_dsi_dcs_set_display_on(ddata->dsi);
+ if (r)
+ goto err;
+
+ r = mipi_dsi_dcs_set_tear_on(ddata->dsi, MIPI_DSI_DCS_TEAR_MODE_VBLANK);
+ if (r)
+ goto err;
+
+ /* possible panel bug */
+ msleep(100);
+
+ ddata->enabled = true;
+
+ if (!ddata->intro_printed) {
+ dev_info(&ddata->dsi->dev, "panel revision %02x.%02x.%02x\n",
+ id1, id2, id3);
+ ddata->intro_printed = true;
+ }
+
+ ddata->dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
+
+ return 0;
+err:
+ dev_err(&ddata->dsi->dev, "error while enabling panel, issuing HW reset\n");
+
+ dsicm_hw_reset(ddata);
+
+ return r;
+}
+
+static int dsicm_power_off(struct panel_drv_data *ddata)
+{
+ int r;
+
+ ddata->enabled = false;
+
+ r = mipi_dsi_dcs_set_display_off(ddata->dsi);
+ if (!r)
+ r = dsicm_sleep_in(ddata);
+
+ if (r) {
+ dev_err(&ddata->dsi->dev,
+ "error disabling panel, issuing HW reset\n");
+ dsicm_hw_reset(ddata);
+ }
+
+ return r;
+}
+
+static int dsicm_prepare(struct drm_panel *panel)
+{
+ struct panel_drv_data *ddata = panel_to_ddata(panel);
+ int r;
+
+ r = regulator_bulk_enable(ARRAY_SIZE(ddata->supplies), ddata->supplies);
+ if (r)
+ dev_err(&ddata->dsi->dev, "failed to enable supplies: %d\n", r);
+
+ return r;
+}
+
+static int dsicm_enable(struct drm_panel *panel)
+{
+ struct panel_drv_data *ddata = panel_to_ddata(panel);
+ int r;
+
+ mutex_lock(&ddata->lock);
+
+ r = dsicm_power_on(ddata);
+ if (r)
+ goto err;
+
+ mutex_unlock(&ddata->lock);
+
+ dsicm_bl_power(ddata, true);
+
+ return 0;
+err:
+ dev_err(&ddata->dsi->dev, "enable failed (%d)\n", r);
+ mutex_unlock(&ddata->lock);
+ return r;
+}
+
+static int dsicm_unprepare(struct drm_panel *panel)
+{
+ struct panel_drv_data *ddata = panel_to_ddata(panel);
+ int r;
+
+ r = regulator_bulk_disable(ARRAY_SIZE(ddata->supplies), ddata->supplies);
+ if (r)
+ dev_err(&ddata->dsi->dev, "failed to disable supplies: %d\n", r);
+
+ return r;
+}
+
+static int dsicm_disable(struct drm_panel *panel)
+{
+ struct panel_drv_data *ddata = panel_to_ddata(panel);
+ int r;
+
+ dsicm_bl_power(ddata, false);
+
+ mutex_lock(&ddata->lock);
+
+ r = dsicm_power_off(ddata);
+
+ mutex_unlock(&ddata->lock);
+
+ return r;
+}
+
+static int dsicm_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
+{
+ struct panel_drv_data *ddata = panel_to_ddata(panel);
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(connector->dev, &ddata->mode);
+ if (!mode) {
+ dev_err(&ddata->dsi->dev, "failed to add mode %ux%ux@%u kHz\n",
+ ddata->mode.hdisplay, ddata->mode.vdisplay,
+ ddata->mode.clock);
+ return -ENOMEM;
+ }
+
+ connector->display_info.width_mm = ddata->panel_data->width_mm;
+ connector->display_info.height_mm = ddata->panel_data->height_mm;
+
+ drm_mode_probed_add(connector, mode);
+
+ return 1;
+}
+
+static const struct drm_panel_funcs dsicm_panel_funcs = {
+ .unprepare = dsicm_unprepare,
+ .disable = dsicm_disable,
+ .prepare = dsicm_prepare,
+ .enable = dsicm_enable,
+ .get_modes = dsicm_get_modes,
+};
+
+static int dsicm_probe_of(struct mipi_dsi_device *dsi)
+{
+ struct backlight_device *backlight;
+ struct panel_drv_data *ddata = mipi_dsi_get_drvdata(dsi);
+ int err;
+ struct drm_display_mode *mode = &ddata->mode;
+
+ ddata->reset_gpio = devm_gpiod_get(&dsi->dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(ddata->reset_gpio)) {
+ err = PTR_ERR(ddata->reset_gpio);
+ dev_err(&dsi->dev, "reset gpio request failed: %d", err);
+ return err;
+ }
+
+ mode->hdisplay = mode->hsync_start = mode->hsync_end = mode->htotal =
+ ddata->panel_data->xres;
+ mode->vdisplay = mode->vsync_start = mode->vsync_end = mode->vtotal =
+ ddata->panel_data->yres;
+ mode->clock = ddata->panel_data->xres * ddata->panel_data->yres *
+ ddata->panel_data->refresh / 1000;
+ mode->width_mm = ddata->panel_data->width_mm;
+ mode->height_mm = ddata->panel_data->height_mm;
+ mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+ drm_mode_set_name(mode);
+
+ ddata->supplies[0].supply = "vpnl";
+ ddata->supplies[1].supply = "vddi";
+ err = devm_regulator_bulk_get(&dsi->dev, ARRAY_SIZE(ddata->supplies),
+ ddata->supplies);
+ if (err)
+ return err;
+
+ backlight = devm_of_find_backlight(&dsi->dev);
+ if (IS_ERR(backlight))
+ return PTR_ERR(backlight);
+
+ /* If no backlight device is found assume native backlight support */
+ if (backlight)
+ ddata->extbldev = backlight;
+ else
+ ddata->use_dsi_backlight = true;
+
+ return 0;
+}
+
+static int dsicm_probe(struct mipi_dsi_device *dsi)
+{
+ struct panel_drv_data *ddata;
+ struct backlight_device *bldev = NULL;
+ struct device *dev = &dsi->dev;
+ int r;
+
+ dev_dbg(dev, "probe\n");
+
+ ddata = devm_kzalloc(dev, sizeof(*ddata), GFP_KERNEL);
+ if (!ddata)
+ return -ENOMEM;
+
+ mipi_dsi_set_drvdata(dsi, ddata);
+ ddata->dsi = dsi;
+
+ ddata->panel_data = of_device_get_match_data(dev);
+ if (!ddata->panel_data)
+ return -ENODEV;
+
+ r = dsicm_probe_of(dsi);
+ if (r)
+ return r;
+
+ mutex_init(&ddata->lock);
+
+ dsicm_hw_reset(ddata);
+
+ drm_panel_init(&ddata->panel, dev, &dsicm_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+
+ if (ddata->use_dsi_backlight) {
+ struct backlight_properties props = { 0 };
+ props.max_brightness = 255;
+ props.type = BACKLIGHT_RAW;
+
+ bldev = devm_backlight_device_register(dev, dev_name(dev),
+ dev, ddata, &dsicm_bl_ops, &props);
+ if (IS_ERR(bldev)) {
+ r = PTR_ERR(bldev);
+ goto err_bl;
+ }
+
+ ddata->bldev = bldev;
+ }
+
+ r = sysfs_create_group(&dev->kobj, &dsicm_attr_group);
+ if (r) {
+ dev_err(dev, "failed to create sysfs files\n");
+ goto err_bl;
+ }
+
+ dsi->lanes = 2;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->mode_flags = MIPI_DSI_CLOCK_NON_CONTINUOUS |
+ MIPI_DSI_MODE_EOT_PACKET;
+ dsi->hs_rate = ddata->panel_data->max_hs_rate;
+ dsi->lp_rate = ddata->panel_data->max_lp_rate;
+
+ drm_panel_add(&ddata->panel);
+
+ r = mipi_dsi_attach(dsi);
+ if (r < 0)
+ goto err_dsi_attach;
+
+ return 0;
+
+err_dsi_attach:
+ drm_panel_remove(&ddata->panel);
+ sysfs_remove_group(&dsi->dev.kobj, &dsicm_attr_group);
+err_bl:
+ if (ddata->extbldev)
+ put_device(&ddata->extbldev->dev);
+
+ return r;
+}
+
+static int dsicm_remove(struct mipi_dsi_device *dsi)
+{
+ struct panel_drv_data *ddata = mipi_dsi_get_drvdata(dsi);
+
+ dev_dbg(&dsi->dev, "remove\n");
+
+ mipi_dsi_detach(dsi);
+
+ drm_panel_remove(&ddata->panel);
+
+ sysfs_remove_group(&dsi->dev.kobj, &dsicm_attr_group);
+
+ if (ddata->extbldev)
+ put_device(&ddata->extbldev->dev);
+
+ return 0;
+}
+
+static const struct dsic_panel_data taal_data = {
+ .xres = 864,
+ .yres = 480,
+ .refresh = 60,
+ .width_mm = 0,
+ .height_mm = 0,
+ .max_hs_rate = 300000000,
+ .max_lp_rate = 10000000,
+};
+
+static const struct dsic_panel_data himalaya_data = {
+ .xres = 480,
+ .yres = 864,
+ .refresh = 60,
+ .width_mm = 49,
+ .height_mm = 88,
+ .max_hs_rate = 300000000,
+ .max_lp_rate = 10000000,
+};
+
+static const struct dsic_panel_data droid4_data = {
+ .xres = 540,
+ .yres = 960,
+ .refresh = 60,
+ .width_mm = 50,
+ .height_mm = 89,
+ .max_hs_rate = 300000000,
+ .max_lp_rate = 10000000,
+};
+
+static const struct of_device_id dsicm_of_match[] = {
+ { .compatible = "tpo,taal", .data = &taal_data },
+ { .compatible = "nokia,himalaya", &himalaya_data },
+ { .compatible = "motorola,droid4-panel", &droid4_data },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, dsicm_of_match);
+
+static struct mipi_dsi_driver dsicm_driver = {
+ .probe = dsicm_probe,
+ .remove = dsicm_remove,
+ .driver = {
+ .name = "panel-dsi-cm",
+ .of_match_table = dsicm_of_match,
+ },
+};
+module_mipi_dsi_driver(dsicm_driver);
+
+MODULE_AUTHOR("Tomi Valkeinen <[email protected]>");
+MODULE_DESCRIPTION("Generic DSI Command Mode Panel Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/panel/panel-khadas-ts050.c b/drivers/gpu/drm/panel/panel-khadas-ts050.c
new file mode 100644
index 000000000000..8f6ac1a40c31
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-khadas-ts050.c
@@ -0,0 +1,870 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020 BayLibre, SAS
+ * Author: Neil Armstrong <[email protected]>
+ */
+
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/regulator/consumer.h>
+
+#include <video/mipi_display.h>
+
+#include <drm/drm_crtc.h>
+#include <drm/drm_device.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+
+struct khadas_ts050_panel {
+ struct drm_panel base;
+ struct mipi_dsi_device *link;
+
+ struct regulator *supply;
+ struct gpio_desc *reset_gpio;
+ struct gpio_desc *enable_gpio;
+
+ bool prepared;
+ bool enabled;
+};
+
+struct khadas_ts050_panel_cmd {
+ u8 cmd;
+ u8 data;
+};
+
+/* Only the CMD1 User Command set is documented */
+static const struct khadas_ts050_panel_cmd init_code[] = {
+ /* Select Unknown CMD Page (Undocumented) */
+ {0xff, 0xee},
+ /* Reload CMD1: Don't reload default value to register */
+ {0xfb, 0x01},
+ {0x1f, 0x45},
+ {0x24, 0x4f},
+ {0x38, 0xc8},
+ {0x39, 0x27},
+ {0x1e, 0x77},
+ {0x1d, 0x0f},
+ {0x7e, 0x71},
+ {0x7c, 0x03},
+ {0xff, 0x00},
+ {0xfb, 0x01},
+ {0x35, 0x01},
+ /* Select CMD2 Page0 (Undocumented) */
+ {0xff, 0x01},
+ /* Reload CMD1: Don't reload default value to register */
+ {0xfb, 0x01},
+ {0x00, 0x01},
+ {0x01, 0x55},
+ {0x02, 0x40},
+ {0x05, 0x40},
+ {0x06, 0x4a},
+ {0x07, 0x24},
+ {0x08, 0x0c},
+ {0x0b, 0x7d},
+ {0x0c, 0x7d},
+ {0x0e, 0xb0},
+ {0x0f, 0xae},
+ {0x11, 0x10},
+ {0x12, 0x10},
+ {0x13, 0x03},
+ {0x14, 0x4a},
+ {0x15, 0x12},
+ {0x16, 0x12},
+ {0x18, 0x00},
+ {0x19, 0x77},
+ {0x1a, 0x55},
+ {0x1b, 0x13},
+ {0x1c, 0x00},
+ {0x1d, 0x00},
+ {0x1e, 0x13},
+ {0x1f, 0x00},
+ {0x23, 0x00},
+ {0x24, 0x00},
+ {0x25, 0x00},
+ {0x26, 0x00},
+ {0x27, 0x00},
+ {0x28, 0x00},
+ {0x35, 0x00},
+ {0x66, 0x00},
+ {0x58, 0x82},
+ {0x59, 0x02},
+ {0x5a, 0x02},
+ {0x5b, 0x02},
+ {0x5c, 0x82},
+ {0x5d, 0x82},
+ {0x5e, 0x02},
+ {0x5f, 0x02},
+ {0x72, 0x31},
+ /* Select CMD2 Page4 (Undocumented) */
+ {0xff, 0x05},
+ /* Reload CMD1: Don't reload default value to register */
+ {0xfb, 0x01},
+ {0x00, 0x01},
+ {0x01, 0x0b},
+ {0x02, 0x0c},
+ {0x03, 0x09},
+ {0x04, 0x0a},
+ {0x05, 0x00},
+ {0x06, 0x0f},
+ {0x07, 0x10},
+ {0x08, 0x00},
+ {0x09, 0x00},
+ {0x0a, 0x00},
+ {0x0b, 0x00},
+ {0x0c, 0x00},
+ {0x0d, 0x13},
+ {0x0e, 0x15},
+ {0x0f, 0x17},
+ {0x10, 0x01},
+ {0x11, 0x0b},
+ {0x12, 0x0c},
+ {0x13, 0x09},
+ {0x14, 0x0a},
+ {0x15, 0x00},
+ {0x16, 0x0f},
+ {0x17, 0x10},
+ {0x18, 0x00},
+ {0x19, 0x00},
+ {0x1a, 0x00},
+ {0x1b, 0x00},
+ {0x1c, 0x00},
+ {0x1d, 0x13},
+ {0x1e, 0x15},
+ {0x1f, 0x17},
+ {0x20, 0x00},
+ {0x21, 0x03},
+ {0x22, 0x01},
+ {0x23, 0x40},
+ {0x24, 0x40},
+ {0x25, 0xed},
+ {0x29, 0x58},
+ {0x2a, 0x12},
+ {0x2b, 0x01},
+ {0x4b, 0x06},
+ {0x4c, 0x11},
+ {0x4d, 0x20},
+ {0x4e, 0x02},
+ {0x4f, 0x02},
+ {0x50, 0x20},
+ {0x51, 0x61},
+ {0x52, 0x01},
+ {0x53, 0x63},
+ {0x54, 0x77},
+ {0x55, 0xed},
+ {0x5b, 0x00},
+ {0x5c, 0x00},
+ {0x5d, 0x00},
+ {0x5e, 0x00},
+ {0x5f, 0x15},
+ {0x60, 0x75},
+ {0x61, 0x00},
+ {0x62, 0x00},
+ {0x63, 0x00},
+ {0x64, 0x00},
+ {0x65, 0x00},
+ {0x66, 0x00},
+ {0x67, 0x00},
+ {0x68, 0x04},
+ {0x69, 0x00},
+ {0x6a, 0x00},
+ {0x6c, 0x40},
+ {0x75, 0x01},
+ {0x76, 0x01},
+ {0x7a, 0x80},
+ {0x7b, 0xa3},
+ {0x7c, 0xd8},
+ {0x7d, 0x60},
+ {0x7f, 0x15},
+ {0x80, 0x81},
+ {0x83, 0x05},
+ {0x93, 0x08},
+ {0x94, 0x10},
+ {0x8a, 0x00},
+ {0x9b, 0x0f},
+ {0xea, 0xff},
+ {0xec, 0x00},
+ /* Select CMD2 Page0 (Undocumented) */
+ {0xff, 0x01},
+ /* Reload CMD1: Don't reload default value to register */
+ {0xfb, 0x01},
+ {0x75, 0x00},
+ {0x76, 0xdf},
+ {0x77, 0x00},
+ {0x78, 0xe4},
+ {0x79, 0x00},
+ {0x7a, 0xed},
+ {0x7b, 0x00},
+ {0x7c, 0xf6},
+ {0x7d, 0x00},
+ {0x7e, 0xff},
+ {0x7f, 0x01},
+ {0x80, 0x07},
+ {0x81, 0x01},
+ {0x82, 0x10},
+ {0x83, 0x01},
+ {0x84, 0x18},
+ {0x85, 0x01},
+ {0x86, 0x20},
+ {0x87, 0x01},
+ {0x88, 0x3d},
+ {0x89, 0x01},
+ {0x8a, 0x56},
+ {0x8b, 0x01},
+ {0x8c, 0x84},
+ {0x8d, 0x01},
+ {0x8e, 0xab},
+ {0x8f, 0x01},
+ {0x90, 0xec},
+ {0x91, 0x02},
+ {0x92, 0x22},
+ {0x93, 0x02},
+ {0x94, 0x23},
+ {0x95, 0x02},
+ {0x96, 0x55},
+ {0x97, 0x02},
+ {0x98, 0x8b},
+ {0x99, 0x02},
+ {0x9a, 0xaf},
+ {0x9b, 0x02},
+ {0x9c, 0xdf},
+ {0x9d, 0x03},
+ {0x9e, 0x01},
+ {0x9f, 0x03},
+ {0xa0, 0x2c},
+ {0xa2, 0x03},
+ {0xa3, 0x39},
+ {0xa4, 0x03},
+ {0xa5, 0x47},
+ {0xa6, 0x03},
+ {0xa7, 0x56},
+ {0xa9, 0x03},
+ {0xaa, 0x66},
+ {0xab, 0x03},
+ {0xac, 0x76},
+ {0xad, 0x03},
+ {0xae, 0x85},
+ {0xaf, 0x03},
+ {0xb0, 0x90},
+ {0xb1, 0x03},
+ {0xb2, 0xcb},
+ {0xb3, 0x00},
+ {0xb4, 0xdf},
+ {0xb5, 0x00},
+ {0xb6, 0xe4},
+ {0xb7, 0x00},
+ {0xb8, 0xed},
+ {0xb9, 0x00},
+ {0xba, 0xf6},
+ {0xbb, 0x00},
+ {0xbc, 0xff},
+ {0xbd, 0x01},
+ {0xbe, 0x07},
+ {0xbf, 0x01},
+ {0xc0, 0x10},
+ {0xc1, 0x01},
+ {0xc2, 0x18},
+ {0xc3, 0x01},
+ {0xc4, 0x20},
+ {0xc5, 0x01},
+ {0xc6, 0x3d},
+ {0xc7, 0x01},
+ {0xc8, 0x56},
+ {0xc9, 0x01},
+ {0xca, 0x84},
+ {0xcb, 0x01},
+ {0xcc, 0xab},
+ {0xcd, 0x01},
+ {0xce, 0xec},
+ {0xcf, 0x02},
+ {0xd0, 0x22},
+ {0xd1, 0x02},
+ {0xd2, 0x23},
+ {0xd3, 0x02},
+ {0xd4, 0x55},
+ {0xd5, 0x02},
+ {0xd6, 0x8b},
+ {0xd7, 0x02},
+ {0xd8, 0xaf},
+ {0xd9, 0x02},
+ {0xda, 0xdf},
+ {0xdb, 0x03},
+ {0xdc, 0x01},
+ {0xdd, 0x03},
+ {0xde, 0x2c},
+ {0xdf, 0x03},
+ {0xe0, 0x39},
+ {0xe1, 0x03},
+ {0xe2, 0x47},
+ {0xe3, 0x03},
+ {0xe4, 0x56},
+ {0xe5, 0x03},
+ {0xe6, 0x66},
+ {0xe7, 0x03},
+ {0xe8, 0x76},
+ {0xe9, 0x03},
+ {0xea, 0x85},
+ {0xeb, 0x03},
+ {0xec, 0x90},
+ {0xed, 0x03},
+ {0xee, 0xcb},
+ {0xef, 0x00},
+ {0xf0, 0xbb},
+ {0xf1, 0x00},
+ {0xf2, 0xc0},
+ {0xf3, 0x00},
+ {0xf4, 0xcc},
+ {0xf5, 0x00},
+ {0xf6, 0xd6},
+ {0xf7, 0x00},
+ {0xf8, 0xe1},
+ {0xf9, 0x00},
+ {0xfa, 0xea},
+ /* Select CMD2 Page2 (Undocumented) */
+ {0xff, 0x02},
+ /* Reload CMD1: Don't reload default value to register */
+ {0xfb, 0x01},
+ {0x00, 0x00},
+ {0x01, 0xf4},
+ {0x02, 0x00},
+ {0x03, 0xef},
+ {0x04, 0x01},
+ {0x05, 0x07},
+ {0x06, 0x01},
+ {0x07, 0x28},
+ {0x08, 0x01},
+ {0x09, 0x44},
+ {0x0a, 0x01},
+ {0x0b, 0x76},
+ {0x0c, 0x01},
+ {0x0d, 0xa0},
+ {0x0e, 0x01},
+ {0x0f, 0xe7},
+ {0x10, 0x02},
+ {0x11, 0x1f},
+ {0x12, 0x02},
+ {0x13, 0x22},
+ {0x14, 0x02},
+ {0x15, 0x54},
+ {0x16, 0x02},
+ {0x17, 0x8b},
+ {0x18, 0x02},
+ {0x19, 0xaf},
+ {0x1a, 0x02},
+ {0x1b, 0xe0},
+ {0x1c, 0x03},
+ {0x1d, 0x01},
+ {0x1e, 0x03},
+ {0x1f, 0x2d},
+ {0x20, 0x03},
+ {0x21, 0x39},
+ {0x22, 0x03},
+ {0x23, 0x47},
+ {0x24, 0x03},
+ {0x25, 0x57},
+ {0x26, 0x03},
+ {0x27, 0x65},
+ {0x28, 0x03},
+ {0x29, 0x77},
+ {0x2a, 0x03},
+ {0x2b, 0x85},
+ {0x2d, 0x03},
+ {0x2f, 0x8f},
+ {0x30, 0x03},
+ {0x31, 0xcb},
+ {0x32, 0x00},
+ {0x33, 0xbb},
+ {0x34, 0x00},
+ {0x35, 0xc0},
+ {0x36, 0x00},
+ {0x37, 0xcc},
+ {0x38, 0x00},
+ {0x39, 0xd6},
+ {0x3a, 0x00},
+ {0x3b, 0xe1},
+ {0x3d, 0x00},
+ {0x3f, 0xea},
+ {0x40, 0x00},
+ {0x41, 0xf4},
+ {0x42, 0x00},
+ {0x43, 0xfe},
+ {0x44, 0x01},
+ {0x45, 0x07},
+ {0x46, 0x01},
+ {0x47, 0x28},
+ {0x48, 0x01},
+ {0x49, 0x44},
+ {0x4a, 0x01},
+ {0x4b, 0x76},
+ {0x4c, 0x01},
+ {0x4d, 0xa0},
+ {0x4e, 0x01},
+ {0x4f, 0xe7},
+ {0x50, 0x02},
+ {0x51, 0x1f},
+ {0x52, 0x02},
+ {0x53, 0x22},
+ {0x54, 0x02},
+ {0x55, 0x54},
+ {0x56, 0x02},
+ {0x58, 0x8b},
+ {0x59, 0x02},
+ {0x5a, 0xaf},
+ {0x5b, 0x02},
+ {0x5c, 0xe0},
+ {0x5d, 0x03},
+ {0x5e, 0x01},
+ {0x5f, 0x03},
+ {0x60, 0x2d},
+ {0x61, 0x03},
+ {0x62, 0x39},
+ {0x63, 0x03},
+ {0x64, 0x47},
+ {0x65, 0x03},
+ {0x66, 0x57},
+ {0x67, 0x03},
+ {0x68, 0x65},
+ {0x69, 0x03},
+ {0x6a, 0x77},
+ {0x6b, 0x03},
+ {0x6c, 0x85},
+ {0x6d, 0x03},
+ {0x6e, 0x8f},
+ {0x6f, 0x03},
+ {0x70, 0xcb},
+ {0x71, 0x00},
+ {0x72, 0x00},
+ {0x73, 0x00},
+ {0x74, 0x21},
+ {0x75, 0x00},
+ {0x76, 0x4c},
+ {0x77, 0x00},
+ {0x78, 0x6b},
+ {0x79, 0x00},
+ {0x7a, 0x85},
+ {0x7b, 0x00},
+ {0x7c, 0x9a},
+ {0x7d, 0x00},
+ {0x7e, 0xad},
+ {0x7f, 0x00},
+ {0x80, 0xbe},
+ {0x81, 0x00},
+ {0x82, 0xcd},
+ {0x83, 0x01},
+ {0x84, 0x01},
+ {0x85, 0x01},
+ {0x86, 0x29},
+ {0x87, 0x01},
+ {0x88, 0x68},
+ {0x89, 0x01},
+ {0x8a, 0x98},
+ {0x8b, 0x01},
+ {0x8c, 0xe5},
+ {0x8d, 0x02},
+ {0x8e, 0x1e},
+ {0x8f, 0x02},
+ {0x90, 0x30},
+ {0x91, 0x02},
+ {0x92, 0x52},
+ {0x93, 0x02},
+ {0x94, 0x88},
+ {0x95, 0x02},
+ {0x96, 0xaa},
+ {0x97, 0x02},
+ {0x98, 0xd7},
+ {0x99, 0x02},
+ {0x9a, 0xf7},
+ {0x9b, 0x03},
+ {0x9c, 0x21},
+ {0x9d, 0x03},
+ {0x9e, 0x2e},
+ {0x9f, 0x03},
+ {0xa0, 0x3d},
+ {0xa2, 0x03},
+ {0xa3, 0x4c},
+ {0xa4, 0x03},
+ {0xa5, 0x5e},
+ {0xa6, 0x03},
+ {0xa7, 0x71},
+ {0xa9, 0x03},
+ {0xaa, 0x86},
+ {0xab, 0x03},
+ {0xac, 0x94},
+ {0xad, 0x03},
+ {0xae, 0xfa},
+ {0xaf, 0x00},
+ {0xb0, 0x00},
+ {0xb1, 0x00},
+ {0xb2, 0x21},
+ {0xb3, 0x00},
+ {0xb4, 0x4c},
+ {0xb5, 0x00},
+ {0xb6, 0x6b},
+ {0xb7, 0x00},
+ {0xb8, 0x85},
+ {0xb9, 0x00},
+ {0xba, 0x9a},
+ {0xbb, 0x00},
+ {0xbc, 0xad},
+ {0xbd, 0x00},
+ {0xbe, 0xbe},
+ {0xbf, 0x00},
+ {0xc0, 0xcd},
+ {0xc1, 0x01},
+ {0xc2, 0x01},
+ {0xc3, 0x01},
+ {0xc4, 0x29},
+ {0xc5, 0x01},
+ {0xc6, 0x68},
+ {0xc7, 0x01},
+ {0xc8, 0x98},
+ {0xc9, 0x01},
+ {0xca, 0xe5},
+ {0xcb, 0x02},
+ {0xcc, 0x1e},
+ {0xcd, 0x02},
+ {0xce, 0x20},
+ {0xcf, 0x02},
+ {0xd0, 0x52},
+ {0xd1, 0x02},
+ {0xd2, 0x88},
+ {0xd3, 0x02},
+ {0xd4, 0xaa},
+ {0xd5, 0x02},
+ {0xd6, 0xd7},
+ {0xd7, 0x02},
+ {0xd8, 0xf7},
+ {0xd9, 0x03},
+ {0xda, 0x21},
+ {0xdb, 0x03},
+ {0xdc, 0x2e},
+ {0xdd, 0x03},
+ {0xde, 0x3d},
+ {0xdf, 0x03},
+ {0xe0, 0x4c},
+ {0xe1, 0x03},
+ {0xe2, 0x5e},
+ {0xe3, 0x03},
+ {0xe4, 0x71},
+ {0xe5, 0x03},
+ {0xe6, 0x86},
+ {0xe7, 0x03},
+ {0xe8, 0x94},
+ {0xe9, 0x03},
+ {0xea, 0xfa},
+ /* Select CMD2 Page0 (Undocumented) */
+ {0xff, 0x01},
+ /* Reload CMD1: Don't reload default value to register */
+ {0xfb, 0x01},
+ /* Select CMD2 Page1 (Undocumented) */
+ {0xff, 0x02},
+ /* Reload CMD1: Don't reload default value to register */
+ {0xfb, 0x01},
+ /* Select CMD2 Page3 (Undocumented) */
+ {0xff, 0x04},
+ /* Reload CMD1: Don't reload default value to register */
+ {0xfb, 0x01},
+ /* Select CMD1 */
+ {0xff, 0x00},
+ {0xd3, 0x05}, /* RGBMIPICTRL: VSYNC back porch = 5 */
+ {0xd4, 0x04}, /* RGBMIPICTRL: VSYNC front porch = 4 */
+};
+
+static inline
+struct khadas_ts050_panel *to_khadas_ts050_panel(struct drm_panel *panel)
+{
+ return container_of(panel, struct khadas_ts050_panel, base);
+}
+
+static int khadas_ts050_panel_prepare(struct drm_panel *panel)
+{
+ struct khadas_ts050_panel *khadas_ts050 = to_khadas_ts050_panel(panel);
+ unsigned int i;
+ int err;
+
+ if (khadas_ts050->prepared)
+ return 0;
+
+ gpiod_set_value_cansleep(khadas_ts050->enable_gpio, 0);
+
+ err = regulator_enable(khadas_ts050->supply);
+ if (err < 0)
+ return err;
+
+ gpiod_set_value_cansleep(khadas_ts050->enable_gpio, 1);
+
+ msleep(60);
+
+ gpiod_set_value_cansleep(khadas_ts050->reset_gpio, 1);
+
+ usleep_range(10000, 11000);
+
+ gpiod_set_value_cansleep(khadas_ts050->reset_gpio, 0);
+
+ /* Select CMD2 page 4 (Undocumented) */
+ mipi_dsi_dcs_write(khadas_ts050->link, 0xff, (u8[]){ 0x05 }, 1);
+
+ /* Reload CMD1: Don't reload default value to register */
+ mipi_dsi_dcs_write(khadas_ts050->link, 0xfb, (u8[]){ 0x01 }, 1);
+
+ mipi_dsi_dcs_write(khadas_ts050->link, 0xc5, (u8[]){ 0x01 }, 1);
+
+ msleep(100);
+
+ for (i = 0; i < ARRAY_SIZE(init_code); i++) {
+ err = mipi_dsi_dcs_write(khadas_ts050->link,
+ init_code[i].cmd,
+ &init_code[i].data, 1);
+ if (err < 0) {
+ dev_err(panel->dev, "failed write cmds: %d\n", err);
+ goto poweroff;
+ }
+ }
+
+ err = mipi_dsi_dcs_exit_sleep_mode(khadas_ts050->link);
+ if (err < 0) {
+ dev_err(panel->dev, "failed to exit sleep mode: %d\n", err);
+ goto poweroff;
+ }
+
+ msleep(120);
+
+ /* Select CMD1 */
+ mipi_dsi_dcs_write(khadas_ts050->link, 0xff, (u8[]){ 0x00 }, 1);
+
+ err = mipi_dsi_dcs_set_tear_on(khadas_ts050->link,
+ MIPI_DSI_DCS_TEAR_MODE_VBLANK);
+ if (err < 0) {
+ dev_err(panel->dev, "failed to set tear on: %d\n", err);
+ goto poweroff;
+ }
+
+ err = mipi_dsi_dcs_set_display_on(khadas_ts050->link);
+ if (err < 0) {
+ dev_err(panel->dev, "failed to set display on: %d\n", err);
+ goto poweroff;
+ }
+
+ usleep_range(10000, 11000);
+
+ khadas_ts050->prepared = true;
+
+ return 0;
+
+poweroff:
+ gpiod_set_value_cansleep(khadas_ts050->enable_gpio, 0);
+ gpiod_set_value_cansleep(khadas_ts050->reset_gpio, 1);
+
+ regulator_disable(khadas_ts050->supply);
+
+ return err;
+}
+
+static int khadas_ts050_panel_unprepare(struct drm_panel *panel)
+{
+ struct khadas_ts050_panel *khadas_ts050 = to_khadas_ts050_panel(panel);
+ int err;
+
+ if (!khadas_ts050->prepared)
+ return 0;
+
+ khadas_ts050->prepared = false;
+
+ err = mipi_dsi_dcs_enter_sleep_mode(khadas_ts050->link);
+ if (err < 0)
+ dev_err(panel->dev, "failed to enter sleep mode: %d\n", err);
+
+ msleep(150);
+
+ gpiod_set_value_cansleep(khadas_ts050->enable_gpio, 0);
+ gpiod_set_value_cansleep(khadas_ts050->reset_gpio, 1);
+
+ err = regulator_disable(khadas_ts050->supply);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+static int khadas_ts050_panel_enable(struct drm_panel *panel)
+{
+ struct khadas_ts050_panel *khadas_ts050 = to_khadas_ts050_panel(panel);
+
+ khadas_ts050->enabled = true;
+
+ return 0;
+}
+
+static int khadas_ts050_panel_disable(struct drm_panel *panel)
+{
+ struct khadas_ts050_panel *khadas_ts050 = to_khadas_ts050_panel(panel);
+ int err;
+
+ if (!khadas_ts050->enabled)
+ return 0;
+
+ err = mipi_dsi_dcs_set_display_off(khadas_ts050->link);
+ if (err < 0)
+ dev_err(panel->dev, "failed to set display off: %d\n", err);
+
+ usleep_range(10000, 11000);
+
+ khadas_ts050->enabled = false;
+
+ return 0;
+}
+
+static const struct drm_display_mode default_mode = {
+ .clock = 120000,
+ .hdisplay = 1088,
+ .hsync_start = 1088 + 104,
+ .hsync_end = 1088 + 104 + 4,
+ .htotal = 1088 + 104 + 4 + 127,
+ .vdisplay = 1920,
+ .vsync_start = 1920 + 4,
+ .vsync_end = 1920 + 4 + 2,
+ .vtotal = 1920 + 4 + 2 + 3,
+ .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
+};
+
+static int khadas_ts050_panel_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
+{
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(connector->dev, &default_mode);
+ if (!mode) {
+ dev_err(panel->dev, "failed to add mode %ux%u@%u\n",
+ default_mode.hdisplay, default_mode.vdisplay,
+ drm_mode_vrefresh(&default_mode));
+ return -ENOMEM;
+ }
+
+ drm_mode_set_name(mode);
+
+ drm_mode_probed_add(connector, mode);
+
+ connector->display_info.width_mm = 64;
+ connector->display_info.height_mm = 118;
+ connector->display_info.bpc = 8;
+
+ return 1;
+}
+
+static const struct drm_panel_funcs khadas_ts050_panel_funcs = {
+ .prepare = khadas_ts050_panel_prepare,
+ .unprepare = khadas_ts050_panel_unprepare,
+ .enable = khadas_ts050_panel_enable,
+ .disable = khadas_ts050_panel_disable,
+ .get_modes = khadas_ts050_panel_get_modes,
+};
+
+static const struct of_device_id khadas_ts050_of_match[] = {
+ { .compatible = "khadas,ts050", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, khadas_ts050_of_match);
+
+static int khadas_ts050_panel_add(struct khadas_ts050_panel *khadas_ts050)
+{
+ struct device *dev = &khadas_ts050->link->dev;
+ int err;
+
+ khadas_ts050->supply = devm_regulator_get(dev, "power");
+ if (IS_ERR(khadas_ts050->supply))
+ return dev_err_probe(dev, PTR_ERR(khadas_ts050->supply),
+ "failed to get power supply");
+
+ khadas_ts050->reset_gpio = devm_gpiod_get(dev, "reset",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(khadas_ts050->reset_gpio))
+ return dev_err_probe(dev, PTR_ERR(khadas_ts050->reset_gpio),
+ "failed to get reset gpio");
+
+ khadas_ts050->enable_gpio = devm_gpiod_get(dev, "enable",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(khadas_ts050->enable_gpio))
+ return dev_err_probe(dev, PTR_ERR(khadas_ts050->enable_gpio),
+ "failed to get enable gpio");
+
+ drm_panel_init(&khadas_ts050->base, &khadas_ts050->link->dev,
+ &khadas_ts050_panel_funcs, DRM_MODE_CONNECTOR_DSI);
+
+ err = drm_panel_of_backlight(&khadas_ts050->base);
+ if (err)
+ return err;
+
+ drm_panel_add(&khadas_ts050->base);
+
+ return 0;
+}
+
+static int khadas_ts050_panel_probe(struct mipi_dsi_device *dsi)
+{
+ struct khadas_ts050_panel *khadas_ts050;
+ int err;
+
+ dsi->lanes = 4;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
+ MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_EOT_PACKET;
+
+ khadas_ts050 = devm_kzalloc(&dsi->dev, sizeof(*khadas_ts050),
+ GFP_KERNEL);
+ if (!khadas_ts050)
+ return -ENOMEM;
+
+ mipi_dsi_set_drvdata(dsi, khadas_ts050);
+ khadas_ts050->link = dsi;
+
+ err = khadas_ts050_panel_add(khadas_ts050);
+ if (err < 0)
+ return err;
+
+ err = mipi_dsi_attach(dsi);
+ if (err)
+ drm_panel_remove(&khadas_ts050->base);
+
+ return err;
+}
+
+static int khadas_ts050_panel_remove(struct mipi_dsi_device *dsi)
+{
+ struct khadas_ts050_panel *khadas_ts050 = mipi_dsi_get_drvdata(dsi);
+ int err;
+
+ err = mipi_dsi_detach(dsi);
+ if (err < 0)
+ dev_err(&dsi->dev, "failed to detach from DSI host: %d\n", err);
+
+ drm_panel_remove(&khadas_ts050->base);
+ drm_panel_disable(&khadas_ts050->base);
+ drm_panel_unprepare(&khadas_ts050->base);
+
+ return 0;
+}
+
+static void khadas_ts050_panel_shutdown(struct mipi_dsi_device *dsi)
+{
+ struct khadas_ts050_panel *khadas_ts050 = mipi_dsi_get_drvdata(dsi);
+
+ drm_panel_disable(&khadas_ts050->base);
+ drm_panel_unprepare(&khadas_ts050->base);
+}
+
+static struct mipi_dsi_driver khadas_ts050_panel_driver = {
+ .driver = {
+ .name = "panel-khadas-ts050",
+ .of_match_table = khadas_ts050_of_match,
+ },
+ .probe = khadas_ts050_panel_probe,
+ .remove = khadas_ts050_panel_remove,
+ .shutdown = khadas_ts050_panel_shutdown,
+};
+module_mipi_dsi_driver(khadas_ts050_panel_driver);
+
+MODULE_AUTHOR("Neil Armstrong <[email protected]>");
+MODULE_DESCRIPTION("Khadas TS050 panel driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/panel/panel-mantix-mlaf057we51.c b/drivers/gpu/drm/panel/panel-mantix-mlaf057we51.c
index 0c5f22e95c2d..30f28ad4df6b 100644
--- a/drivers/gpu/drm/panel/panel-mantix-mlaf057we51.c
+++ b/drivers/gpu/drm/panel/panel-mantix-mlaf057we51.c
@@ -9,6 +9,7 @@
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/module.h>
+#include <linux/of_device.h>
#include <linux/regulator/consumer.h>
#include <video/mipi_display.h>
@@ -22,6 +23,7 @@
/* Manufacturer specific Commands send via DSI */
#define MANTIX_CMD_OTP_STOP_RELOAD_MIPI 0x41
#define MANTIX_CMD_INT_CANCEL 0x4C
+#define MANTIX_CMD_SPI_FINISH 0x90
struct mantix {
struct device *dev;
@@ -33,6 +35,8 @@ struct mantix {
struct regulator *avdd;
struct regulator *avee;
struct regulator *vddi;
+
+ const struct drm_display_mode *default_mode;
};
static inline struct mantix *panel_to_mantix(struct drm_panel *panel)
@@ -66,6 +70,10 @@ static int mantix_init_sequence(struct mantix *ctx)
dsi_generic_write_seq(dsi, 0x80, 0x64, 0x00, 0x64, 0x00, 0x00);
msleep(20);
+ dsi_generic_write_seq(dsi, MANTIX_CMD_SPI_FINISH, 0xA5);
+ dsi_generic_write_seq(dsi, MANTIX_CMD_OTP_STOP_RELOAD_MIPI, 0x00, 0x2F);
+ msleep(20);
+
dev_dbg(dev, "Panel init sequence done\n");
return 0;
}
@@ -182,7 +190,7 @@ static int mantix_prepare(struct drm_panel *panel)
return 0;
}
-static const struct drm_display_mode default_mode = {
+static const struct drm_display_mode default_mode_mantix = {
.hdisplay = 720,
.hsync_start = 720 + 45,
.hsync_end = 720 + 45 + 14,
@@ -197,17 +205,32 @@ static const struct drm_display_mode default_mode = {
.height_mm = 130,
};
+static const struct drm_display_mode default_mode_ys = {
+ .hdisplay = 720,
+ .hsync_start = 720 + 45,
+ .hsync_end = 720 + 45 + 14,
+ .htotal = 720 + 45 + 14 + 25,
+ .vdisplay = 1440,
+ .vsync_start = 1440 + 175,
+ .vsync_end = 1440 + 175 + 8,
+ .vtotal = 1440 + 175 + 8 + 50,
+ .clock = 85298,
+ .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
+ .width_mm = 65,
+ .height_mm = 130,
+};
+
static int mantix_get_modes(struct drm_panel *panel,
struct drm_connector *connector)
{
struct mantix *ctx = panel_to_mantix(panel);
struct drm_display_mode *mode;
- mode = drm_mode_duplicate(connector->dev, &default_mode);
+ mode = drm_mode_duplicate(connector->dev, ctx->default_mode);
if (!mode) {
dev_err(ctx->dev, "Failed to add mode %ux%u@%u\n",
- default_mode.hdisplay, default_mode.vdisplay,
- drm_mode_vrefresh(&default_mode));
+ ctx->default_mode->hdisplay, ctx->default_mode->vdisplay,
+ drm_mode_vrefresh(ctx->default_mode));
return -ENOMEM;
}
@@ -238,6 +261,7 @@ static int mantix_probe(struct mipi_dsi_device *dsi)
ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
+ ctx->default_mode = of_device_get_match_data(dev);
ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
if (IS_ERR(ctx->reset_gpio)) {
@@ -288,8 +312,8 @@ static int mantix_probe(struct mipi_dsi_device *dsi)
}
dev_info(dev, "%ux%u@%u %ubpp dsi %udl - ready\n",
- default_mode.hdisplay, default_mode.vdisplay,
- drm_mode_vrefresh(&default_mode),
+ ctx->default_mode->hdisplay, ctx->default_mode->vdisplay,
+ drm_mode_vrefresh(ctx->default_mode),
mipi_dsi_pixel_format_to_bpp(dsi->format), dsi->lanes);
return 0;
@@ -316,7 +340,8 @@ static int mantix_remove(struct mipi_dsi_device *dsi)
}
static const struct of_device_id mantix_of_match[] = {
- { .compatible = "mantix,mlaf057we51-x" },
+ { .compatible = "mantix,mlaf057we51-x", .data = &default_mode_mantix },
+ { .compatible = "ys,ys57pss36bh5gq", .data = &default_mode_ys },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, mantix_of_match);
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e63m0.c b/drivers/gpu/drm/panel/panel-samsung-s6e63m0.c
index 6b4e97bfd46e..603c5dfe8768 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6e63m0.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e63m0.c
@@ -25,6 +25,14 @@
/* Manufacturer Command Set */
#define MCS_ELVSS_ON 0xb1
#define MCS_TEMP_SWIRE 0xb2
+#define MCS_PENTILE_1 0xb3
+#define MCS_PENTILE_2 0xb4
+#define MCS_GAMMA_DELTA_Y_RED 0xb5
+#define MCS_GAMMA_DELTA_X_RED 0xb6
+#define MCS_GAMMA_DELTA_Y_GREEN 0xb7
+#define MCS_GAMMA_DELTA_X_GREEN 0xb8
+#define MCS_GAMMA_DELTA_Y_BLUE 0xb9
+#define MCS_GAMMA_DELTA_X_BLUE 0xba
#define MCS_MIECTL1 0xc0
#define MCS_BCMODE 0xc1
#define MCS_ERROR_CHECK 0xd5
@@ -281,6 +289,7 @@ struct s6e63m0 {
struct backlight_device *bl_dev;
u8 lcd_type;
u8 elvss_pulse;
+ bool dsi_mode;
struct regulator_bulk_data supplies[2];
struct gpio_desc *reset_gpio;
@@ -395,9 +404,21 @@ static int s6e63m0_check_lcd_type(struct s6e63m0 *ctx)
static void s6e63m0_init(struct s6e63m0 *ctx)
{
- s6e63m0_dcs_write_seq_static(ctx, MCS_PANELCTL,
- 0x01, 0x27, 0x27, 0x07, 0x07, 0x54, 0x9f,
- 0x63, 0x8f, 0x1a, 0x33, 0x0d, 0x00, 0x00);
+ /*
+ * We do not know why there is a difference in the DSI mode.
+ * (No datasheet.)
+ *
+ * In the vendor driver this sequence is called
+ * "SEQ_PANEL_CONDITION_SET" or "DCS_CMD_SEQ_PANEL_COND_SET".
+ */
+ if (ctx->dsi_mode)
+ s6e63m0_dcs_write_seq_static(ctx, MCS_PANELCTL,
+ 0x01, 0x2c, 0x2c, 0x07, 0x07, 0x5f, 0xb3,
+ 0x6d, 0x97, 0x1d, 0x3a, 0x0f, 0x00, 0x00);
+ else
+ s6e63m0_dcs_write_seq_static(ctx, MCS_PANELCTL,
+ 0x01, 0x27, 0x27, 0x07, 0x07, 0x54, 0x9f,
+ 0x63, 0x8f, 0x1a, 0x33, 0x0d, 0x00, 0x00);
s6e63m0_dcs_write_seq_static(ctx, MCS_DISCTL,
0x02, 0x03, 0x1c, 0x10, 0x10);
@@ -414,40 +435,40 @@ static void s6e63m0_init(struct s6e63m0 *ctx)
s6e63m0_dcs_write_seq_static(ctx, MCS_SRCCTL,
0x00, 0x8e, 0x07);
- s6e63m0_dcs_write_seq_static(ctx, 0xb3, 0x6c);
+ s6e63m0_dcs_write_seq_static(ctx, MCS_PENTILE_1, 0x6c);
- s6e63m0_dcs_write_seq_static(ctx, 0xb5,
+ s6e63m0_dcs_write_seq_static(ctx, MCS_GAMMA_DELTA_Y_RED,
0x2c, 0x12, 0x0c, 0x0a, 0x10, 0x0e, 0x17,
0x13, 0x1f, 0x1a, 0x2a, 0x24, 0x1f, 0x1b,
0x1a, 0x17, 0x2b, 0x26, 0x22, 0x20, 0x3a,
0x34, 0x30, 0x2c, 0x29, 0x26, 0x25, 0x23,
0x21, 0x20, 0x1e, 0x1e);
- s6e63m0_dcs_write_seq_static(ctx, 0xb6,
+ s6e63m0_dcs_write_seq_static(ctx, MCS_GAMMA_DELTA_X_RED,
0x00, 0x00, 0x11, 0x22, 0x33, 0x44, 0x44,
0x44, 0x55, 0x55, 0x66, 0x66, 0x66, 0x66,
0x66, 0x66);
- s6e63m0_dcs_write_seq_static(ctx, 0xb7,
+ s6e63m0_dcs_write_seq_static(ctx, MCS_GAMMA_DELTA_Y_GREEN,
0x2c, 0x12, 0x0c, 0x0a, 0x10, 0x0e, 0x17,
0x13, 0x1f, 0x1a, 0x2a, 0x24, 0x1f, 0x1b,
0x1a, 0x17, 0x2b, 0x26, 0x22, 0x20, 0x3a,
0x34, 0x30, 0x2c, 0x29, 0x26, 0x25, 0x23,
0x21, 0x20, 0x1e, 0x1e);
- s6e63m0_dcs_write_seq_static(ctx, 0xb8,
+ s6e63m0_dcs_write_seq_static(ctx, MCS_GAMMA_DELTA_X_GREEN,
0x00, 0x00, 0x11, 0x22, 0x33, 0x44, 0x44,
0x44, 0x55, 0x55, 0x66, 0x66, 0x66, 0x66,
0x66, 0x66);
- s6e63m0_dcs_write_seq_static(ctx, 0xb9,
+ s6e63m0_dcs_write_seq_static(ctx, MCS_GAMMA_DELTA_Y_BLUE,
0x2c, 0x12, 0x0c, 0x0a, 0x10, 0x0e, 0x17,
0x13, 0x1f, 0x1a, 0x2a, 0x24, 0x1f, 0x1b,
0x1a, 0x17, 0x2b, 0x26, 0x22, 0x20, 0x3a,
0x34, 0x30, 0x2c, 0x29, 0x26, 0x25, 0x23,
0x21, 0x20, 0x1e, 0x1e);
- s6e63m0_dcs_write_seq_static(ctx, 0xba,
+ s6e63m0_dcs_write_seq_static(ctx, MCS_GAMMA_DELTA_X_BLUE,
0x00, 0x00, 0x11, 0x22, 0x33, 0x44, 0x44,
0x44, 0x55, 0x55, 0x66, 0x66, 0x66, 0x66,
0x66, 0x66);
@@ -671,12 +692,12 @@ static const struct backlight_ops s6e63m0_backlight_ops = {
.update_status = s6e63m0_set_brightness,
};
-static int s6e63m0_backlight_register(struct s6e63m0 *ctx)
+static int s6e63m0_backlight_register(struct s6e63m0 *ctx, u32 max_brightness)
{
struct backlight_properties props = {
.type = BACKLIGHT_RAW,
- .brightness = MAX_BRIGHTNESS,
- .max_brightness = MAX_BRIGHTNESS
+ .brightness = max_brightness,
+ .max_brightness = max_brightness,
};
struct device *dev = ctx->dev;
int ret = 0;
@@ -698,12 +719,14 @@ int s6e63m0_probe(struct device *dev,
bool dsi_mode)
{
struct s6e63m0 *ctx;
+ u32 max_brightness;
int ret;
ctx = devm_kzalloc(dev, sizeof(struct s6e63m0), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
+ ctx->dsi_mode = dsi_mode;
ctx->dcs_read = dcs_read;
ctx->dcs_write = dcs_write;
dev_set_drvdata(dev, ctx);
@@ -712,6 +735,14 @@ int s6e63m0_probe(struct device *dev,
ctx->enabled = false;
ctx->prepared = false;
+ ret = device_property_read_u32(dev, "max-brightness", &max_brightness);
+ if (ret)
+ max_brightness = MAX_BRIGHTNESS;
+ if (max_brightness > MAX_BRIGHTNESS) {
+ dev_err(dev, "illegal max brightness specified\n");
+ max_brightness = MAX_BRIGHTNESS;
+ }
+
ctx->supplies[0].supply = "vdd3";
ctx->supplies[1].supply = "vci";
ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(ctx->supplies),
@@ -731,7 +762,7 @@ int s6e63m0_probe(struct device *dev,
dsi_mode ? DRM_MODE_CONNECTOR_DSI :
DRM_MODE_CONNECTOR_DPI);
- ret = s6e63m0_backlight_register(ctx);
+ ret = s6e63m0_backlight_register(ctx, max_brightness);
if (ret < 0)
return ret;
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index 41bbec72b2da..4e2dad314c79 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -39,72 +39,145 @@
#include <drm/drm_panel.h>
/**
- * struct panel_desc
- * @modes: Pointer to array of fixed modes appropriate for this panel. If
- * only one mode then this can just be the address of this the mode.
- * NOTE: cannot be used with "timings" and also if this is specified
- * then you cannot override the mode in the device tree.
- * @num_modes: Number of elements in modes array.
- * @timings: Pointer to array of display timings. NOTE: cannot be used with
- * "modes" and also these will be used to validate a device tree
- * override if one is present.
- * @num_timings: Number of elements in timings array.
- * @bpc: Bits per color.
- * @size: Structure containing the physical size of this panel.
- * @delay: Structure containing various delay values for this panel.
- * @bus_format: See MEDIA_BUS_FMT_... defines.
- * @bus_flags: See DRM_BUS_FLAG_... defines.
- * @connector_type: LVDS, eDP, DSI, DPI, etc.
+ * struct panel_desc - Describes a simple panel.
*/
struct panel_desc {
+ /**
+ * @modes: Pointer to array of fixed modes appropriate for this panel.
+ *
+ * If only one mode then this can just be the address of the mode.
+ * NOTE: cannot be used with "timings" and also if this is specified
+ * then you cannot override the mode in the device tree.
+ */
const struct drm_display_mode *modes;
+
+ /** @num_modes: Number of elements in modes array. */
unsigned int num_modes;
+
+ /**
+ * @timings: Pointer to array of display timings
+ *
+ * NOTE: cannot be used with "modes" and also these will be used to
+ * validate a device tree override if one is present.
+ */
const struct display_timing *timings;
+
+ /** @num_timings: Number of elements in timings array. */
unsigned int num_timings;
+ /** @bpc: Bits per color. */
unsigned int bpc;
- /**
- * @width: width (in millimeters) of the panel's active display area
- * @height: height (in millimeters) of the panel's active display area
- */
+ /** @size: Structure containing the physical size of this panel. */
struct {
+ /**
+ * @size.width: Width (in mm) of the active display area.
+ */
unsigned int width;
+
+ /**
+ * @size.height: Height (in mm) of the active display area.
+ */
unsigned int height;
} size;
- /**
- * @prepare: the time (in milliseconds) that it takes for the panel to
- * become ready and start receiving video data
- * @hpd_absent_delay: Add this to the prepare delay if we know Hot
- * Plug Detect isn't used.
- * @enable: the time (in milliseconds) that it takes for the panel to
- * display the first valid frame after starting to receive
- * video data
- * @disable: the time (in milliseconds) that it takes for the panel to
- * turn the display off (no content is visible)
- * @unprepare: the time (in milliseconds) that it takes for the panel
- * to power itself down completely
- */
+ /** @delay: Structure containing various delay values for this panel. */
struct {
+ /**
+ * @delay.prepare: Time for the panel to become ready.
+ *
+ * The time (in milliseconds) that it takes for the panel to
+ * become ready and start receiving video data
+ */
unsigned int prepare;
+
+ /**
+ * @delay.hpd_absent_delay: Time to wait if HPD isn't hooked up.
+ *
+ * Add this to the prepare delay if we know Hot Plug Detect
+ * isn't used.
+ */
unsigned int hpd_absent_delay;
+
+ /**
+ * @delay.prepare_to_enable: Time between prepare and enable.
+ *
+ * The minimum time, in milliseconds, that needs to have passed
+ * between when prepare finished and enable may begin. If at
+ * enable time less time has passed since prepare finished,
+ * the driver waits for the remaining time.
+ *
+ * If a fixed enable delay is also specified, we'll start
+ * counting before delaying for the fixed delay.
+ *
+ * If a fixed prepare delay is also specified, we won't start
+ * counting until after the fixed delay. We can't overlap this
+ * fixed delay with the min time because the fixed delay
+ * doesn't happen at the end of the function if a HPD GPIO was
+ * specified.
+ *
+ * In other words:
+ * prepare()
+ * ...
+ * // do fixed prepare delay
+ * // wait for HPD GPIO if applicable
+ * // start counting for prepare_to_enable
+ *
+ * enable()
+ * // do fixed enable delay
+ * // enforce prepare_to_enable min time
+ */
+ unsigned int prepare_to_enable;
+
+ /**
+ * @delay.enable: Time for the panel to display a valid frame.
+ *
+ * The time (in milliseconds) that it takes for the panel to
+ * display the first valid frame after starting to receive
+ * video data.
+ */
unsigned int enable;
+
+ /**
+ * @delay.disable: Time for the panel to turn the display off.
+ *
+ * The time (in milliseconds) that it takes for the panel to
+ * turn the display off (no content is visible).
+ */
unsigned int disable;
+
+ /**
+ * @delay.unprepare: Time to power down completely.
+ *
+ * The time (in milliseconds) that it takes for the panel
+ * to power itself down completely.
+ *
+ * This time is used to prevent a future "prepare" from
+ * starting until at least this many milliseconds has passed.
+ * If at prepare time less time has passed since unprepare
+ * finished, the driver waits for the remaining time.
+ */
unsigned int unprepare;
} delay;
+ /** @bus_format: See MEDIA_BUS_FMT_... defines. */
u32 bus_format;
+
+ /** @bus_flags: See DRM_BUS_FLAG_... defines. */
u32 bus_flags;
+
+ /** @connector_type: LVDS, eDP, DSI, DPI, etc. */
int connector_type;
};
struct panel_simple {
struct drm_panel base;
- bool prepared;
bool enabled;
bool no_hpd;
+ ktime_t prepared_time;
+ ktime_t unprepared_time;
+
const struct panel_desc *desc;
struct regulator *supply;
@@ -232,6 +305,20 @@ static int panel_simple_get_non_edid_modes(struct panel_simple *panel,
return num;
}
+static void panel_simple_wait(ktime_t start_ktime, unsigned int min_ms)
+{
+ ktime_t now_ktime, min_ktime;
+
+ if (!min_ms)
+ return;
+
+ min_ktime = ktime_add(start_ktime, ms_to_ktime(min_ms));
+ now_ktime = ktime_get();
+
+ if (ktime_before(now_ktime, min_ktime))
+ msleep(ktime_to_ms(ktime_sub(min_ktime, now_ktime)) + 1);
+}
+
static int panel_simple_disable(struct drm_panel *panel)
{
struct panel_simple *p = to_panel_simple(panel);
@@ -251,17 +338,15 @@ static int panel_simple_unprepare(struct drm_panel *panel)
{
struct panel_simple *p = to_panel_simple(panel);
- if (!p->prepared)
+ if (p->prepared_time == 0)
return 0;
gpiod_set_value_cansleep(p->enable_gpio, 0);
regulator_disable(p->supply);
- if (p->desc->delay.unprepare)
- msleep(p->desc->delay.unprepare);
-
- p->prepared = false;
+ p->prepared_time = 0;
+ p->unprepared_time = ktime_get();
return 0;
}
@@ -298,9 +383,11 @@ static int panel_simple_prepare(struct drm_panel *panel)
int err;
int hpd_asserted;
- if (p->prepared)
+ if (p->prepared_time != 0)
return 0;
+ panel_simple_wait(p->unprepared_time, p->desc->delay.unprepare);
+
err = regulator_enable(p->supply);
if (err < 0) {
dev_err(panel->dev, "failed to enable supply: %d\n", err);
@@ -335,7 +422,7 @@ static int panel_simple_prepare(struct drm_panel *panel)
}
}
- p->prepared = true;
+ p->prepared_time = ktime_get();
return 0;
}
@@ -350,6 +437,8 @@ static int panel_simple_enable(struct drm_panel *panel)
if (p->desc->delay.enable)
msleep(p->desc->delay.enable);
+ panel_simple_wait(p->prepared_time, p->desc->delay.prepare_to_enable);
+
p->enabled = true;
return 0;
@@ -516,7 +605,7 @@ static int panel_simple_probe(struct device *dev, const struct panel_desc *desc)
return -ENOMEM;
panel->enabled = false;
- panel->prepared = false;
+ panel->prepared_time = 0;
panel->desc = desc;
panel->no_hpd = of_property_read_bool(dev->of_node, "no-hpd");
@@ -1318,6 +1407,51 @@ static const struct panel_desc boe_nv101wxmn51 = {
},
};
+static const struct drm_display_mode boe_nv110wtm_n61_modes[] = {
+ {
+ .clock = 207800,
+ .hdisplay = 2160,
+ .hsync_start = 2160 + 48,
+ .hsync_end = 2160 + 48 + 32,
+ .htotal = 2160 + 48 + 32 + 100,
+ .vdisplay = 1440,
+ .vsync_start = 1440 + 3,
+ .vsync_end = 1440 + 3 + 6,
+ .vtotal = 1440 + 3 + 6 + 31,
+ .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC,
+ },
+ {
+ .clock = 138500,
+ .hdisplay = 2160,
+ .hsync_start = 2160 + 48,
+ .hsync_end = 2160 + 48 + 32,
+ .htotal = 2160 + 48 + 32 + 100,
+ .vdisplay = 1440,
+ .vsync_start = 1440 + 3,
+ .vsync_end = 1440 + 3 + 6,
+ .vtotal = 1440 + 3 + 6 + 31,
+ .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC,
+ },
+};
+
+static const struct panel_desc boe_nv110wtm_n61 = {
+ .modes = boe_nv110wtm_n61_modes,
+ .num_modes = ARRAY_SIZE(boe_nv110wtm_n61_modes),
+ .bpc = 8,
+ .size = {
+ .width = 233,
+ .height = 155,
+ },
+ .delay = {
+ .hpd_absent_delay = 200,
+ .prepare_to_enable = 80,
+ .unprepare = 500,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
+ .bus_flags = DRM_BUS_FLAG_DATA_MSB_TO_LSB,
+ .connector_type = DRM_MODE_CONNECTOR_eDP,
+};
+
/* Also used for boe_nv133fhm_n62 */
static const struct drm_display_mode boe_nv133fhm_n61_modes = {
.clock = 147840,
@@ -2265,6 +2399,8 @@ static const struct panel_desc innolux_n116bge = {
.width = 256,
.height = 144,
},
+ .bus_format = MEDIA_BUS_FMT_RGB666_1X18,
+ .connector_type = DRM_MODE_CONNECTOR_eDP,
};
static const struct drm_display_mode innolux_n125hce_gn1_mode = {
@@ -4034,6 +4170,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "boe,nv101wxmn51",
.data = &boe_nv101wxmn51,
}, {
+ .compatible = "boe,nv110wtm-n61",
+ .data = &boe_nv110wtm_n61,
+ }, {
.compatible = "boe,nv133fhm-n61",
.data = &boe_nv133fhm_n61,
}, {
diff --git a/drivers/gpu/drm/panel/panel-sitronix-st7703.c b/drivers/gpu/drm/panel/panel-sitronix-st7703.c
index b30510b1696a..a2c303e5732c 100644
--- a/drivers/gpu/drm/panel/panel-sitronix-st7703.c
+++ b/drivers/gpu/drm/panel/panel-sitronix-st7703.c
@@ -530,10 +530,8 @@ static int st7703_probe(struct mipi_dsi_device *dsi)
return -ENOMEM;
ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
- if (IS_ERR(ctx->reset_gpio)) {
- dev_err(dev, "cannot get reset gpio\n");
- return PTR_ERR(ctx->reset_gpio);
- }
+ if (IS_ERR(ctx->reset_gpio))
+ return dev_err_probe(dev, PTR_ERR(ctx->reset_gpio), "Failed to get reset gpio\n");
mipi_dsi_set_drvdata(dsi, ctx);
@@ -545,19 +543,13 @@ static int st7703_probe(struct mipi_dsi_device *dsi)
dsi->lanes = ctx->desc->lanes;
ctx->vcc = devm_regulator_get(dev, "vcc");
- if (IS_ERR(ctx->vcc)) {
- ret = PTR_ERR(ctx->vcc);
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "Failed to request vcc regulator: %d\n", ret);
- return ret;
- }
+ if (IS_ERR(ctx->vcc))
+ return dev_err_probe(dev, PTR_ERR(ctx->vcc), "Failed to request vcc regulator\n");
+
ctx->iovcc = devm_regulator_get(dev, "iovcc");
- if (IS_ERR(ctx->iovcc)) {
- ret = PTR_ERR(ctx->iovcc);
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "Failed to request iovcc regulator: %d\n", ret);
- return ret;
- }
+ if (IS_ERR(ctx->iovcc))
+ return dev_err_probe(dev, PTR_ERR(ctx->iovcc),
+ "Failed to request iovcc regulator\n");
drm_panel_init(&ctx->panel, dev, &st7703_drm_funcs,
DRM_MODE_CONNECTOR_DSI);
diff --git a/drivers/gpu/drm/panfrost/panfrost_devfreq.c b/drivers/gpu/drm/panfrost/panfrost_devfreq.c
index f44d28fad085..56b3f5935703 100644
--- a/drivers/gpu/drm/panfrost/panfrost_devfreq.c
+++ b/drivers/gpu/drm/panfrost/panfrost_devfreq.c
@@ -76,6 +76,7 @@ static int panfrost_devfreq_get_dev_status(struct device *dev,
}
static struct devfreq_dev_profile panfrost_devfreq_profile = {
+ .timer = DEVFREQ_TIMER_DELAYED,
.polling_ms = 50, /* ~3 frames */
.target = panfrost_devfreq_target,
.get_dev_status = panfrost_devfreq_get_dev_status,
diff --git a/drivers/gpu/drm/pl111/pl111_drv.c b/drivers/gpu/drm/pl111/pl111_drv.c
index 40e6708fbbe2..e4dcaef6c143 100644
--- a/drivers/gpu/drm/pl111/pl111_drv.c
+++ b/drivers/gpu/drm/pl111/pl111_drv.c
@@ -228,7 +228,7 @@ static const struct drm_driver pl111_drm_driver = {
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_import_sg_table = pl111_gem_import_sg_table,
- .gem_prime_mmap = drm_gem_cma_prime_mmap,
+ .gem_prime_mmap = drm_gem_prime_mmap,
#if defined(CONFIG_DEBUG_FS)
.debugfs_init = pl111_debugfs_init,
diff --git a/drivers/gpu/drm/qxl/qxl_dev.h b/drivers/gpu/drm/qxl/qxl_dev.h
index a7bc31f6d565..06caa61b5d66 100644
--- a/drivers/gpu/drm/qxl/qxl_dev.h
+++ b/drivers/gpu/drm/qxl/qxl_dev.h
@@ -271,7 +271,7 @@ struct qxl_mode {
/* qxl-1 compat: fixed */
struct qxl_modes {
uint32_t n_modes;
- struct qxl_mode modes[0];
+ struct qxl_mode modes[];
};
/* qxl-1 compat: append only */
@@ -382,12 +382,12 @@ struct qxl_data_chunk {
uint32_t data_size;
QXLPHYSICAL prev_chunk;
QXLPHYSICAL next_chunk;
- uint8_t data[0];
+ uint8_t data[];
};
struct qxl_message {
union qxl_release_info release_info;
- uint8_t data[0];
+ uint8_t data[];
};
struct qxl_compat_update_cmd {
@@ -469,7 +469,7 @@ struct qxl_raster_glyph {
struct qxl_point glyph_origin;
uint16_t width;
uint16_t height;
- uint8_t data[0];
+ uint8_t data[];
};
struct qxl_string {
@@ -768,7 +768,7 @@ enum {
struct qxl_path_seg {
uint32_t flags;
uint32_t count;
- struct qxl_point_fix points[0];
+ struct qxl_point_fix points[];
};
struct qxl_path {
@@ -819,7 +819,7 @@ struct qxl_image_descriptor {
struct qxl_palette {
uint64_t unique;
uint16_t num_ents;
- uint32_t ents[0];
+ uint32_t ents[];
};
struct qxl_bitmap {
@@ -838,7 +838,7 @@ struct qxl_surface_id {
struct qxl_encoder_data {
uint32_t data_size;
- uint8_t data[0];
+ uint8_t data[];
};
struct qxl_image {
@@ -868,7 +868,7 @@ struct qxl_monitors_config {
uint16_t count;
uint16_t max_allowed; /* If it is 0 no fixed limit is given by the
driver */
- struct qxl_head heads[0];
+ struct qxl_head heads[];
};
#pragma pack(pop)
diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c
index 6e7f16f4cec7..fb5f6a5e81d7 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.c
+++ b/drivers/gpu/drm/qxl/qxl_drv.c
@@ -163,7 +163,7 @@ DEFINE_DRM_GEM_FOPS(qxl_fops);
static int qxl_drm_freeze(struct drm_device *dev)
{
- struct pci_dev *pdev = dev->pdev;
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
struct qxl_device *qdev = to_qxl(dev);
int ret;
diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
index 8bd0f916dfbc..83b54f0dad61 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.h
+++ b/drivers/gpu/drm/qxl/qxl_drv.h
@@ -46,7 +46,6 @@
#include <drm/ttm/ttm_bo_api.h>
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_execbuf_util.h>
-#include <drm/ttm/ttm_module.h>
#include <drm/ttm/ttm_placement.h>
#include "qxl_dev.h"
diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c
index 16e1e589508e..b6075f452b9e 100644
--- a/drivers/gpu/drm/qxl/qxl_ioctl.c
+++ b/drivers/gpu/drm/qxl/qxl_ioctl.c
@@ -370,13 +370,14 @@ static int qxl_clientcap_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct qxl_device *qdev = to_qxl(dev);
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
struct drm_qxl_clientcap *param = data;
int byte, idx;
byte = param->index / 8;
idx = param->index % 8;
- if (dev->pdev->revision < 4)
+ if (pdev->revision < 4)
return -ENOSYS;
if (byte >= 58)
diff --git a/drivers/gpu/drm/qxl/qxl_irq.c b/drivers/gpu/drm/qxl/qxl_irq.c
index 1ba5a702d763..ddf6588a2a38 100644
--- a/drivers/gpu/drm/qxl/qxl_irq.c
+++ b/drivers/gpu/drm/qxl/qxl_irq.c
@@ -81,6 +81,7 @@ static void qxl_client_monitors_config_work_func(struct work_struct *work)
int qxl_irq_init(struct qxl_device *qdev)
{
+ struct pci_dev *pdev = to_pci_dev(qdev->ddev.dev);
int ret;
init_waitqueue_head(&qdev->display_event);
@@ -93,7 +94,7 @@ int qxl_irq_init(struct qxl_device *qdev)
atomic_set(&qdev->irq_received_cursor, 0);
atomic_set(&qdev->irq_received_io_cmd, 0);
qdev->irq_received_error = 0;
- ret = drm_irq_install(&qdev->ddev, qdev->ddev.pdev->irq);
+ ret = drm_irq_install(&qdev->ddev, pdev->irq);
qdev->ram_header->int_mask = QXL_INTERRUPT_MASK;
if (unlikely(ret != 0)) {
DRM_ERROR("Failed installing irq: %d\n", ret);
diff --git a/drivers/gpu/drm/qxl/qxl_kms.c b/drivers/gpu/drm/qxl/qxl_kms.c
index 228e2b9198f1..4a60a52ab62e 100644
--- a/drivers/gpu/drm/qxl/qxl_kms.c
+++ b/drivers/gpu/drm/qxl/qxl_kms.c
@@ -111,7 +111,6 @@ int qxl_device_init(struct qxl_device *qdev,
{
int r, sb;
- qdev->ddev.pdev = pdev;
pci_set_drvdata(pdev, &qdev->ddev);
mutex_init(&qdev->gem.mutex);
diff --git a/drivers/gpu/drm/qxl/qxl_object.h b/drivers/gpu/drm/qxl/qxl_object.h
index ebf24c9d2bf2..e60a8f88e226 100644
--- a/drivers/gpu/drm/qxl/qxl_object.h
+++ b/drivers/gpu/drm/qxl/qxl_object.h
@@ -50,7 +50,7 @@ static inline void qxl_bo_unreserve(struct qxl_bo *bo)
static inline unsigned long qxl_bo_size(struct qxl_bo *bo)
{
- return bo->tbo.num_pages << PAGE_SHIFT;
+ return bo->tbo.base.size;
}
static inline u64 qxl_bo_mmap_offset(struct qxl_bo *bo)
diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c
index e75e364655b8..0fcfc952d5e9 100644
--- a/drivers/gpu/drm/qxl/qxl_release.c
+++ b/drivers/gpu/drm/qxl/qxl_release.c
@@ -456,7 +456,7 @@ void qxl_release_fence_buffer_objects(struct qxl_release *release)
bo = entry->bo;
dma_resv_add_shared_fence(bo->base.resv, &release->base);
- ttm_bo_move_to_lru_tail(bo, NULL);
+ ttm_bo_move_to_lru_tail(bo, &bo->mem, NULL);
dma_resv_unlock(bo->base.resv);
}
spin_unlock(&ttm_bo_glob.lru_lock);
diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
index 7dd0c69baa47..33c09dc94f8b 100644
--- a/drivers/gpu/drm/qxl/qxl_ttm.c
+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
@@ -31,7 +31,6 @@
#include <drm/qxl_drm.h>
#include <drm/ttm/ttm_bo_api.h>
#include <drm/ttm/ttm_bo_driver.h>
-#include <drm/ttm/ttm_module.h>
#include <drm/ttm/ttm_placement.h>
#include "qxl_drv.h"
diff --git a/drivers/gpu/drm/r128/r128_ioc32.c b/drivers/gpu/drm/r128/r128_ioc32.c
index 6ac71755c22d..cdeb1db87222 100644
--- a/drivers/gpu/drm/r128/r128_ioc32.c
+++ b/drivers/gpu/drm/r128/r128_ioc32.c
@@ -1,4 +1,4 @@
-/**
+/*
* \file r128_ioc32.c
*
* 32-bit ioctl compatibility routines for the R128 DRM.
@@ -170,13 +170,13 @@ drm_ioctl_compat_t *r128_compat_ioctls[] = {
};
/**
- * Called whenever a 32-bit process running under a 64-bit kernel
- * performs an ioctl on /dev/dri/card<n>.
+ * r128_compat_ioctl - Called whenever a 32-bit process running under
+ * a 64-bit kernel performs an ioctl on /dev/dri/card<n>.
*
- * \param filp file pointer.
- * \param cmd command.
- * \param arg user argument.
- * \return zero on success or negative number on failure.
+ * @filp: file pointer.
+ * @cmd: command.
+ * @arg: user argument.
+ * return: zero on success or negative number on failure.
*/
long r128_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index 683de198e18d..0fce73b9a646 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -2062,9 +2062,9 @@ atombios_apply_encoder_quirks(struct drm_encoder *encoder,
struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
/* Funky macbooks */
- if ((dev->pdev->device == 0x71C5) &&
- (dev->pdev->subsystem_vendor == 0x106b) &&
- (dev->pdev->subsystem_device == 0x0080)) {
+ if ((rdev->pdev->device == 0x71C5) &&
+ (rdev->pdev->subsystem_vendor == 0x106b) &&
+ (rdev->pdev->subsystem_device == 0x0080)) {
if (radeon_encoder->devices & ATOM_DEVICE_LCD1_SUPPORT) {
uint32_t lvtma_bit_depth_control = RREG32(AVIVO_LVTMA_BIT_DEPTH_CONTROL);
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index aef4efc692b1..2955bb32d5ad 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -2612,7 +2612,6 @@ int r100_asic_reset(struct radeon_device *rdev, bool hard)
void r100_set_common_regs(struct radeon_device *rdev)
{
- struct drm_device *dev = rdev->ddev;
bool force_dac2 = false;
u32 tmp;
@@ -2630,7 +2629,7 @@ void r100_set_common_regs(struct radeon_device *rdev)
* don't report it in the bios connector
* table.
*/
- switch (dev->pdev->device) {
+ switch (rdev->pdev->device) {
/* RN50 */
case 0x515e:
case 0x5969:
@@ -2640,17 +2639,17 @@ void r100_set_common_regs(struct radeon_device *rdev)
case 0x5159:
case 0x515a:
/* DELL triple head servers */
- if ((dev->pdev->subsystem_vendor == 0x1028 /* DELL */) &&
- ((dev->pdev->subsystem_device == 0x016c) ||
- (dev->pdev->subsystem_device == 0x016d) ||
- (dev->pdev->subsystem_device == 0x016e) ||
- (dev->pdev->subsystem_device == 0x016f) ||
- (dev->pdev->subsystem_device == 0x0170) ||
- (dev->pdev->subsystem_device == 0x017d) ||
- (dev->pdev->subsystem_device == 0x017e) ||
- (dev->pdev->subsystem_device == 0x0183) ||
- (dev->pdev->subsystem_device == 0x018a) ||
- (dev->pdev->subsystem_device == 0x019a)))
+ if ((rdev->pdev->subsystem_vendor == 0x1028 /* DELL */) &&
+ ((rdev->pdev->subsystem_device == 0x016c) ||
+ (rdev->pdev->subsystem_device == 0x016d) ||
+ (rdev->pdev->subsystem_device == 0x016e) ||
+ (rdev->pdev->subsystem_device == 0x016f) ||
+ (rdev->pdev->subsystem_device == 0x0170) ||
+ (rdev->pdev->subsystem_device == 0x017d) ||
+ (rdev->pdev->subsystem_device == 0x017e) ||
+ (rdev->pdev->subsystem_device == 0x0183) ||
+ (rdev->pdev->subsystem_device == 0x018a) ||
+ (rdev->pdev->subsystem_device == 0x019a)))
force_dac2 = true;
break;
}
@@ -2798,7 +2797,7 @@ void r100_vram_init_sizes(struct radeon_device *rdev)
rdev->mc.real_vram_size = 8192 * 1024;
WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size);
}
- /* Fix for RN50, M6, M7 with 8/16/32(??) MBs of VRAM -
+ /* Fix for RN50, M6, M7 with 8/16/32(??) MBs of VRAM -
* Novell bug 204882 + along with lots of ubuntu ones
*/
if (rdev->mc.aper_size > config_aper_size)
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index dc68e538d5a9..34b7c6f16479 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -220,7 +220,7 @@ int r600_fmt_get_nblocksx(u32 format, u32 w)
if (bw == 0)
return 0;
- return (w + bw - 1) / bw;
+ return DIV_ROUND_UP(w, bw);
}
int r600_fmt_get_nblocksy(u32 format, u32 h)
@@ -234,7 +234,7 @@ int r600_fmt_get_nblocksy(u32 format, u32 h)
if (bh == 0)
return 0;
- return (h + bh - 1) / bh;
+ return DIV_ROUND_UP(h, bh);
}
struct array_mode_checker {
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 5f3adba43e47..f09989bdce98 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -75,7 +75,6 @@
#include <drm/ttm/ttm_bo_api.h>
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_placement.h>
-#include <drm/ttm/ttm_module.h>
#include <drm/ttm/ttm_execbuf_util.h>
#include <drm/drm_gem.h>
@@ -2314,6 +2313,9 @@ struct radeon_device {
struct device *dev;
struct drm_device *ddev;
struct pci_dev *pdev;
+#ifdef __alpha__
+ struct pci_controller *hose;
+#endif
struct rw_semaphore exclusive_lock;
/* ASIC */
union radeon_asic_config config;
@@ -2623,14 +2625,14 @@ void r100_pll_errata_after_index(struct radeon_device *rdev);
(rdev->family == CHIP_RV410) || \
(rdev->family == CHIP_RS400) || \
(rdev->family == CHIP_RS480))
-#define ASIC_IS_X2(rdev) ((rdev->ddev->pdev->device == 0x9441) || \
- (rdev->ddev->pdev->device == 0x9443) || \
- (rdev->ddev->pdev->device == 0x944B) || \
- (rdev->ddev->pdev->device == 0x9506) || \
- (rdev->ddev->pdev->device == 0x9509) || \
- (rdev->ddev->pdev->device == 0x950F) || \
- (rdev->ddev->pdev->device == 0x689C) || \
- (rdev->ddev->pdev->device == 0x689D))
+#define ASIC_IS_X2(rdev) ((rdev->pdev->device == 0x9441) || \
+ (rdev->pdev->device == 0x9443) || \
+ (rdev->pdev->device == 0x944B) || \
+ (rdev->pdev->device == 0x9506) || \
+ (rdev->pdev->device == 0x9509) || \
+ (rdev->pdev->device == 0x950F) || \
+ (rdev->pdev->device == 0x689C) || \
+ (rdev->pdev->device == 0x689D))
#define ASIC_IS_AVIVO(rdev) ((rdev->family >= CHIP_RS600))
#define ASIC_IS_DCE2(rdev) ((rdev->family == CHIP_RS600) || \
(rdev->family == CHIP_RS690) || \
@@ -2653,14 +2655,14 @@ void r100_pll_errata_after_index(struct radeon_device *rdev);
#define ASIC_IS_DCE83(rdev) ((rdev->family == CHIP_KABINI) || \
(rdev->family == CHIP_MULLINS))
-#define ASIC_IS_LOMBOK(rdev) ((rdev->ddev->pdev->device == 0x6849) || \
- (rdev->ddev->pdev->device == 0x6850) || \
- (rdev->ddev->pdev->device == 0x6858) || \
- (rdev->ddev->pdev->device == 0x6859) || \
- (rdev->ddev->pdev->device == 0x6840) || \
- (rdev->ddev->pdev->device == 0x6841) || \
- (rdev->ddev->pdev->device == 0x6842) || \
- (rdev->ddev->pdev->device == 0x6843))
+#define ASIC_IS_LOMBOK(rdev) ((rdev->pdev->device == 0x6849) || \
+ (rdev->pdev->device == 0x6850) || \
+ (rdev->pdev->device == 0x6858) || \
+ (rdev->pdev->device == 0x6859) || \
+ (rdev->pdev->device == 0x6840) || \
+ (rdev->pdev->device == 0x6841) || \
+ (rdev->pdev->device == 0x6842) || \
+ (rdev->pdev->device == 0x6843))
/*
* BIOS helpers.
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index be96d9b64e43..42301b4e56f5 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -284,46 +284,47 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
uint16_t *line_mux,
struct radeon_hpd *hpd)
{
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
/* Asus M2A-VM HDMI board lists the DVI port as HDMI */
- if ((dev->pdev->device == 0x791e) &&
- (dev->pdev->subsystem_vendor == 0x1043) &&
- (dev->pdev->subsystem_device == 0x826d)) {
+ if ((pdev->device == 0x791e) &&
+ (pdev->subsystem_vendor == 0x1043) &&
+ (pdev->subsystem_device == 0x826d)) {
if ((*connector_type == DRM_MODE_CONNECTOR_HDMIA) &&
(supported_device == ATOM_DEVICE_DFP3_SUPPORT))
*connector_type = DRM_MODE_CONNECTOR_DVID;
}
/* Asrock RS600 board lists the DVI port as HDMI */
- if ((dev->pdev->device == 0x7941) &&
- (dev->pdev->subsystem_vendor == 0x1849) &&
- (dev->pdev->subsystem_device == 0x7941)) {
+ if ((pdev->device == 0x7941) &&
+ (pdev->subsystem_vendor == 0x1849) &&
+ (pdev->subsystem_device == 0x7941)) {
if ((*connector_type == DRM_MODE_CONNECTOR_HDMIA) &&
(supported_device == ATOM_DEVICE_DFP3_SUPPORT))
*connector_type = DRM_MODE_CONNECTOR_DVID;
}
/* MSI K9A2GM V2/V3 board has no HDMI or DVI */
- if ((dev->pdev->device == 0x796e) &&
- (dev->pdev->subsystem_vendor == 0x1462) &&
- (dev->pdev->subsystem_device == 0x7302)) {
+ if ((pdev->device == 0x796e) &&
+ (pdev->subsystem_vendor == 0x1462) &&
+ (pdev->subsystem_device == 0x7302)) {
if ((supported_device == ATOM_DEVICE_DFP2_SUPPORT) ||
(supported_device == ATOM_DEVICE_DFP3_SUPPORT))
return false;
}
/* a-bit f-i90hd - ciaranm on #radeonhd - this board has no DVI */
- if ((dev->pdev->device == 0x7941) &&
- (dev->pdev->subsystem_vendor == 0x147b) &&
- (dev->pdev->subsystem_device == 0x2412)) {
+ if ((pdev->device == 0x7941) &&
+ (pdev->subsystem_vendor == 0x147b) &&
+ (pdev->subsystem_device == 0x2412)) {
if (*connector_type == DRM_MODE_CONNECTOR_DVII)
return false;
}
/* Falcon NW laptop lists vga ddc line for LVDS */
- if ((dev->pdev->device == 0x5653) &&
- (dev->pdev->subsystem_vendor == 0x1462) &&
- (dev->pdev->subsystem_device == 0x0291)) {
+ if ((pdev->device == 0x5653) &&
+ (pdev->subsystem_vendor == 0x1462) &&
+ (pdev->subsystem_device == 0x0291)) {
if (*connector_type == DRM_MODE_CONNECTOR_LVDS) {
i2c_bus->valid = false;
*line_mux = 53;
@@ -331,26 +332,26 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
}
/* HIS X1300 is DVI+VGA, not DVI+DVI */
- if ((dev->pdev->device == 0x7146) &&
- (dev->pdev->subsystem_vendor == 0x17af) &&
- (dev->pdev->subsystem_device == 0x2058)) {
+ if ((pdev->device == 0x7146) &&
+ (pdev->subsystem_vendor == 0x17af) &&
+ (pdev->subsystem_device == 0x2058)) {
if (supported_device == ATOM_DEVICE_DFP1_SUPPORT)
return false;
}
/* Gigabyte X1300 is DVI+VGA, not DVI+DVI */
- if ((dev->pdev->device == 0x7142) &&
- (dev->pdev->subsystem_vendor == 0x1458) &&
- (dev->pdev->subsystem_device == 0x2134)) {
+ if ((pdev->device == 0x7142) &&
+ (pdev->subsystem_vendor == 0x1458) &&
+ (pdev->subsystem_device == 0x2134)) {
if (supported_device == ATOM_DEVICE_DFP1_SUPPORT)
return false;
}
/* Funky macbooks */
- if ((dev->pdev->device == 0x71C5) &&
- (dev->pdev->subsystem_vendor == 0x106b) &&
- (dev->pdev->subsystem_device == 0x0080)) {
+ if ((pdev->device == 0x71C5) &&
+ (pdev->subsystem_vendor == 0x106b) &&
+ (pdev->subsystem_device == 0x0080)) {
if ((supported_device == ATOM_DEVICE_CRT1_SUPPORT) ||
(supported_device == ATOM_DEVICE_DFP2_SUPPORT))
return false;
@@ -366,27 +367,27 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
}
/* ASUS HD 3600 XT board lists the DVI port as HDMI */
- if ((dev->pdev->device == 0x9598) &&
- (dev->pdev->subsystem_vendor == 0x1043) &&
- (dev->pdev->subsystem_device == 0x01da)) {
+ if ((pdev->device == 0x9598) &&
+ (pdev->subsystem_vendor == 0x1043) &&
+ (pdev->subsystem_device == 0x01da)) {
if (*connector_type == DRM_MODE_CONNECTOR_HDMIA) {
*connector_type = DRM_MODE_CONNECTOR_DVII;
}
}
/* ASUS HD 3600 board lists the DVI port as HDMI */
- if ((dev->pdev->device == 0x9598) &&
- (dev->pdev->subsystem_vendor == 0x1043) &&
- (dev->pdev->subsystem_device == 0x01e4)) {
+ if ((pdev->device == 0x9598) &&
+ (pdev->subsystem_vendor == 0x1043) &&
+ (pdev->subsystem_device == 0x01e4)) {
if (*connector_type == DRM_MODE_CONNECTOR_HDMIA) {
*connector_type = DRM_MODE_CONNECTOR_DVII;
}
}
/* ASUS HD 3450 board lists the DVI port as HDMI */
- if ((dev->pdev->device == 0x95C5) &&
- (dev->pdev->subsystem_vendor == 0x1043) &&
- (dev->pdev->subsystem_device == 0x01e2)) {
+ if ((pdev->device == 0x95C5) &&
+ (pdev->subsystem_vendor == 0x1043) &&
+ (pdev->subsystem_device == 0x01e2)) {
if (*connector_type == DRM_MODE_CONNECTOR_HDMIA) {
*connector_type = DRM_MODE_CONNECTOR_DVII;
}
@@ -411,9 +412,9 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
* with different crtcs which isn't possible on the hardware
* side and leaves no crtcs for LVDS or VGA.
*/
- if (((dev->pdev->device == 0x95c4) || (dev->pdev->device == 0x9591)) &&
- (dev->pdev->subsystem_vendor == 0x1025) &&
- (dev->pdev->subsystem_device == 0x013c)) {
+ if (((pdev->device == 0x95c4) || (pdev->device == 0x9591)) &&
+ (pdev->subsystem_vendor == 0x1025) &&
+ (pdev->subsystem_device == 0x013c)) {
if ((*connector_type == DRM_MODE_CONNECTOR_DVII) &&
(supported_device == ATOM_DEVICE_DFP1_SUPPORT)) {
/* actually it's a DVI-D port not DVI-I */
@@ -425,9 +426,9 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
/* XFX Pine Group device rv730 reports no VGA DDC lines
* even though they are wired up to record 0x93
*/
- if ((dev->pdev->device == 0x9498) &&
- (dev->pdev->subsystem_vendor == 0x1682) &&
- (dev->pdev->subsystem_device == 0x2452) &&
+ if ((pdev->device == 0x9498) &&
+ (pdev->subsystem_vendor == 0x1682) &&
+ (pdev->subsystem_device == 0x2452) &&
(i2c_bus->valid == false) &&
!(supported_device & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT))) {
struct radeon_device *rdev = dev->dev_private;
@@ -435,11 +436,11 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
}
/* Fujitsu D3003-S2 board lists DVI-I as DVI-D and VGA */
- if (((dev->pdev->device == 0x9802) ||
- (dev->pdev->device == 0x9805) ||
- (dev->pdev->device == 0x9806)) &&
- (dev->pdev->subsystem_vendor == 0x1734) &&
- (dev->pdev->subsystem_device == 0x11bd)) {
+ if (((pdev->device == 0x9802) ||
+ (pdev->device == 0x9805) ||
+ (pdev->device == 0x9806)) &&
+ (pdev->subsystem_vendor == 0x1734) &&
+ (pdev->subsystem_device == 0x11bd)) {
if (*connector_type == DRM_MODE_CONNECTOR_VGA) {
*connector_type = DRM_MODE_CONNECTOR_DVII;
*line_mux = 0x3103;
diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c
index bb29cf02974d..500796dc5d74 100644
--- a/drivers/gpu/drm/radeon/radeon_bios.c
+++ b/drivers/gpu/drm/radeon/radeon_bios.c
@@ -528,7 +528,7 @@ static bool legacy_read_disabled_bios(struct radeon_device *rdev)
crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL);
fp2_gen_cntl = 0;
- if (rdev->ddev->pdev->device == PCI_DEVICE_ID_ATI_RADEON_QY) {
+ if (rdev->pdev->device == PCI_DEVICE_ID_ATI_RADEON_QY) {
fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL);
}
@@ -565,7 +565,7 @@ static bool legacy_read_disabled_bios(struct radeon_device *rdev)
(RADEON_CRTC_SYNC_TRISTAT |
RADEON_CRTC_DISPLAY_DIS)));
- if (rdev->ddev->pdev->device == PCI_DEVICE_ID_ATI_RADEON_QY) {
+ if (rdev->pdev->device == PCI_DEVICE_ID_ATI_RADEON_QY) {
WREG32(RADEON_FP2_GEN_CNTL, (fp2_gen_cntl & ~RADEON_FP2_ON));
}
@@ -583,7 +583,7 @@ static bool legacy_read_disabled_bios(struct radeon_device *rdev)
WREG32(RADEON_CRTC2_GEN_CNTL, crtc2_gen_cntl);
}
WREG32(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl);
- if (rdev->ddev->pdev->device == PCI_DEVICE_ID_ATI_RADEON_QY) {
+ if (rdev->pdev->device == PCI_DEVICE_ID_ATI_RADEON_QY) {
WREG32(RADEON_FP2_GEN_CNTL, fp2_gen_cntl);
}
return r;
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index ff2135059c07..783a6b8802d5 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -894,13 +894,13 @@ struct radeon_encoder_primary_dac *radeon_combios_get_primary_dac_info(struct
/* quirks */
/* Radeon 7000 (RV100) */
- if (((dev->pdev->device == 0x5159) &&
- (dev->pdev->subsystem_vendor == 0x174B) &&
- (dev->pdev->subsystem_device == 0x7c28)) ||
+ if (((rdev->pdev->device == 0x5159) &&
+ (rdev->pdev->subsystem_vendor == 0x174B) &&
+ (rdev->pdev->subsystem_device == 0x7c28)) ||
/* Radeon 9100 (R200) */
- ((dev->pdev->device == 0x514D) &&
- (dev->pdev->subsystem_vendor == 0x174B) &&
- (dev->pdev->subsystem_device == 0x7149))) {
+ ((rdev->pdev->device == 0x514D) &&
+ (rdev->pdev->subsystem_vendor == 0x174B) &&
+ (rdev->pdev->subsystem_device == 0x7149))) {
/* vbios value is bad, use the default */
found = 0;
}
@@ -2221,20 +2221,21 @@ static bool radeon_apply_legacy_quirks(struct drm_device *dev,
struct radeon_i2c_bus_rec *ddc_i2c,
struct radeon_hpd *hpd)
{
+ struct radeon_device *rdev = dev->dev_private;
/* Certain IBM chipset RN50s have a BIOS reporting two VGAs,
one with VGA DDC and one with CRT2 DDC. - kill the CRT2 DDC one */
- if (dev->pdev->device == 0x515e &&
- dev->pdev->subsystem_vendor == 0x1014) {
+ if (rdev->pdev->device == 0x515e &&
+ rdev->pdev->subsystem_vendor == 0x1014) {
if (*legacy_connector == CONNECTOR_CRT_LEGACY &&
ddc_i2c->mask_clk_reg == RADEON_GPIO_CRT2_DDC)
return false;
}
/* X300 card with extra non-existent DVI port */
- if (dev->pdev->device == 0x5B60 &&
- dev->pdev->subsystem_vendor == 0x17af &&
- dev->pdev->subsystem_device == 0x201e && bios_index == 2) {
+ if (rdev->pdev->device == 0x5B60 &&
+ rdev->pdev->subsystem_vendor == 0x17af &&
+ rdev->pdev->subsystem_device == 0x201e && bios_index == 2) {
if (*legacy_connector == CONNECTOR_DVI_I_LEGACY)
return false;
}
@@ -2244,22 +2245,24 @@ static bool radeon_apply_legacy_quirks(struct drm_device *dev,
static bool radeon_apply_legacy_tv_quirks(struct drm_device *dev)
{
+ struct radeon_device *rdev = dev->dev_private;
+
/* Acer 5102 has non-existent TV port */
- if (dev->pdev->device == 0x5975 &&
- dev->pdev->subsystem_vendor == 0x1025 &&
- dev->pdev->subsystem_device == 0x009f)
+ if (rdev->pdev->device == 0x5975 &&
+ rdev->pdev->subsystem_vendor == 0x1025 &&
+ rdev->pdev->subsystem_device == 0x009f)
return false;
/* HP dc5750 has non-existent TV port */
- if (dev->pdev->device == 0x5974 &&
- dev->pdev->subsystem_vendor == 0x103c &&
- dev->pdev->subsystem_device == 0x280a)
+ if (rdev->pdev->device == 0x5974 &&
+ rdev->pdev->subsystem_vendor == 0x103c &&
+ rdev->pdev->subsystem_device == 0x280a)
return false;
/* MSI S270 has non-existent TV port */
- if (dev->pdev->device == 0x5955 &&
- dev->pdev->subsystem_vendor == 0x1462 &&
- dev->pdev->subsystem_device == 0x0131)
+ if (rdev->pdev->device == 0x5955 &&
+ rdev->pdev->subsystem_vendor == 0x1462 &&
+ rdev->pdev->subsystem_device == 0x0131)
return false;
return true;
@@ -2413,9 +2416,9 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
/* RV100 board with external TDMS bit mis-set.
* Actually uses internal TMDS, clear the bit.
*/
- if (dev->pdev->device == 0x5159 &&
- dev->pdev->subsystem_vendor == 0x1014 &&
- dev->pdev->subsystem_device == 0x029A) {
+ if (rdev->pdev->device == 0x5159 &&
+ rdev->pdev->subsystem_vendor == 0x1014 &&
+ rdev->pdev->subsystem_device == 0x029A) {
tmp &= ~(1 << 4);
}
if ((tmp >> 4) & 0x1) {
@@ -2707,9 +2710,9 @@ void radeon_combios_get_power_modes(struct radeon_device *rdev)
/* boards with a thermal chip, but no overdrive table */
/* Asus 9600xt has an f75375 on the monid bus */
- if ((dev->pdev->device == 0x4152) &&
- (dev->pdev->subsystem_vendor == 0x1043) &&
- (dev->pdev->subsystem_device == 0xc002)) {
+ if ((rdev->pdev->device == 0x4152) &&
+ (rdev->pdev->subsystem_vendor == 0x1043) &&
+ (rdev->pdev->subsystem_device == 0xc002)) {
i2c_bus = combios_setup_i2c_bus(rdev, DDC_MONID, 0, 0);
rdev->pm.i2c_bus = radeon_i2c_lookup(rdev, &i2c_bus);
if (rdev->pm.i2c_bus) {
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index c6262fce7440..35e937d39b51 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -130,8 +130,7 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
* IGP chips to avoid image corruptions
*/
if (p->ring == R600_RING_TYPE_UVD_INDEX &&
- (i <= 0 || pci_find_capability(p->rdev->ddev->pdev,
- PCI_CAP_ID_AGP) ||
+ (i <= 0 || pci_find_capability(p->rdev->pdev, PCI_CAP_ID_AGP) ||
p->rdev->family == CHIP_RS780 ||
p->rdev->family == CHIP_RS880)) {
@@ -401,7 +400,8 @@ static int cmp_size_smaller_first(void *priv, struct list_head *a,
struct radeon_bo_list *lb = list_entry(b, struct radeon_bo_list, tv.head);
/* Sort A before B if A is smaller. */
- return (int)la->robj->tbo.num_pages - (int)lb->robj->tbo.num_pages;
+ return (int)la->robj->tbo.mem.num_pages -
+ (int)lb->robj->tbo.mem.num_pages;
}
/**
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index ebccaa5b2d0e..2cbf14fc6ece 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1562,6 +1562,7 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend,
bool fbcon, bool freeze)
{
struct radeon_device *rdev;
+ struct pci_dev *pdev;
struct drm_crtc *crtc;
struct drm_connector *connector;
int i, r;
@@ -1571,6 +1572,7 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend,
}
rdev = dev->dev_private;
+ pdev = to_pci_dev(dev->dev);
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
@@ -1636,14 +1638,14 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend,
radeon_agp_suspend(rdev);
- pci_save_state(dev->pdev);
+ pci_save_state(pdev);
if (freeze && rdev->family >= CHIP_CEDAR && !(rdev->flags & RADEON_IS_IGP)) {
rdev->asic->asic_reset(rdev, true);
- pci_restore_state(dev->pdev);
+ pci_restore_state(pdev);
} else if (suspend) {
/* Shut down the device */
- pci_disable_device(dev->pdev);
- pci_set_power_state(dev->pdev, PCI_D3hot);
+ pci_disable_device(pdev);
+ pci_set_power_state(pdev, PCI_D3hot);
}
if (fbcon) {
@@ -1665,6 +1667,7 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
{
struct drm_connector *connector;
struct radeon_device *rdev = dev->dev_private;
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
struct drm_crtc *crtc;
int r;
@@ -1675,9 +1678,9 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
console_lock();
}
if (resume) {
- pci_set_power_state(dev->pdev, PCI_D0);
- pci_restore_state(dev->pdev);
- if (pci_enable_device(dev->pdev)) {
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+ if (pci_enable_device(pdev)) {
if (fbcon)
console_unlock();
return -1;
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 3a6fedad002d..652af7a134bd 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -1317,7 +1317,7 @@ radeon_user_framebuffer_create(struct drm_device *dev,
obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[0]);
if (obj == NULL) {
- dev_err(&dev->pdev->dev, "No GEM object associated to handle 0x%08X, "
+ dev_err(dev->dev, "No GEM object associated to handle 0x%08X, "
"can't create framebuffer\n", mode_cmd->handles[0]);
return ERR_PTR(-ENOENT);
}
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index e45d7344ac2b..efeb115ae70e 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -342,14 +342,9 @@ static int radeon_pci_probe(struct pci_dev *pdev,
if (ret)
goto err_free;
- dev->pdev = pdev;
-#ifdef __alpha__
- dev->hose = pdev->sysdata;
-#endif
-
pci_set_drvdata(pdev, dev);
- if (pci_find_capability(dev->pdev, PCI_CAP_ID_AGP))
+ if (pci_find_capability(pdev, PCI_CAP_ID_AGP))
dev->agp = drm_agp_init(dev);
if (dev->agp) {
dev->agp->agp_mtrr = arch_phys_wc_add(
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index fc4212633bdf..0b206b052972 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -290,7 +290,7 @@ static int radeonfb_create(struct drm_fb_helper *helper,
DRM_INFO("fb depth is %d\n", fb->format->depth);
DRM_INFO(" pitch is %d\n", fb->pitches[0]);
- vga_switcheroo_client_fb_set(rdev->ddev->pdev, info);
+ vga_switcheroo_client_fb_set(rdev->pdev, info);
return 0;
out:
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index b6b21d2e7262..941826923247 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -651,7 +651,7 @@ int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
}
if (args->offset < RADEON_VA_RESERVED_SIZE) {
- dev_err(&dev->pdev->dev,
+ dev_err(dev->dev,
"offset 0x%lX is in reserved area 0x%X\n",
(unsigned long)args->offset,
RADEON_VA_RESERVED_SIZE);
@@ -665,7 +665,7 @@ int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
*/
invalid_flags = RADEON_VM_PAGE_VALID | RADEON_VM_PAGE_SYSTEM;
if ((args->flags & invalid_flags)) {
- dev_err(&dev->pdev->dev, "invalid flags 0x%08X vs 0x%08X\n",
+ dev_err(dev->dev, "invalid flags 0x%08X vs 0x%08X\n",
args->flags, invalid_flags);
args->operation = RADEON_VA_RESULT_ERROR;
return -EINVAL;
@@ -676,7 +676,7 @@ int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
case RADEON_VA_UNMAP:
break;
default:
- dev_err(&dev->pdev->dev, "unsupported operation %d\n",
+ dev_err(dev->dev, "unsupported operation %d\n",
args->operation);
args->operation = RADEON_VA_RESULT_ERROR;
return -EINVAL;
diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c
index e543d993f73e..314d066e68e9 100644
--- a/drivers/gpu/drm/radeon/radeon_i2c.c
+++ b/drivers/gpu/drm/radeon/radeon_i2c.c
@@ -919,7 +919,7 @@ struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev,
i2c->rec = *rec;
i2c->adapter.owner = THIS_MODULE;
i2c->adapter.class = I2C_CLASS_DDC;
- i2c->adapter.dev.parent = &dev->pdev->dev;
+ i2c->adapter.dev.parent = dev->dev;
i2c->dev = dev;
i2c_set_adapdata(&i2c->adapter, i2c);
mutex_init(&i2c->mutex);
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index b8b7f627f0a9..84d0b1a3355f 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -314,7 +314,7 @@ int radeon_irq_kms_init(struct radeon_device *rdev)
INIT_WORK(&rdev->audio_work, r600_audio_update_hdmi);
rdev->irq.installed = true;
- r = drm_irq_install(rdev->ddev, rdev->ddev->pdev->irq);
+ r = drm_irq_install(rdev->ddev, rdev->pdev->irq);
if (r) {
rdev->irq.installed = false;
flush_delayed_work(&rdev->hotplug_work);
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 50cee4880bb4..2479d6ab7a36 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -76,7 +76,7 @@ void radeon_driver_unload_kms(struct drm_device *dev)
}
radeon_acpi_fini(rdev);
-
+
radeon_modeset_fini(rdev);
radeon_device_fini(rdev);
@@ -105,6 +105,7 @@ done_free:
*/
int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
{
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
struct radeon_device *rdev;
int r, acpi_status;
@@ -114,10 +115,14 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
}
dev->dev_private = (void *)rdev;
+#ifdef __alpha__
+ rdev->hose = pdev->sysdata;
+#endif
+
/* update BUS flag */
- if (pci_find_capability(dev->pdev, PCI_CAP_ID_AGP)) {
+ if (pci_find_capability(pdev, PCI_CAP_ID_AGP)) {
flags |= RADEON_IS_AGP;
- } else if (pci_is_pcie(dev->pdev)) {
+ } else if (pci_is_pcie(pdev)) {
flags |= RADEON_IS_PCIE;
} else {
flags |= RADEON_IS_PCI;
@@ -126,7 +131,7 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
if ((radeon_runtime_pm != 0) &&
radeon_has_atpx() &&
((flags & RADEON_IS_IGP) == 0) &&
- !pci_is_thunderbolt_attached(dev->pdev))
+ !pci_is_thunderbolt_attached(pdev))
flags |= RADEON_IS_PX;
/* radeon_device_init should report only fatal error
@@ -135,9 +140,9 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
* properly initialize the GPU MC controller and permit
* VRAM allocation
*/
- r = radeon_device_init(rdev, dev, dev->pdev, flags);
+ r = radeon_device_init(rdev, dev, pdev, flags);
if (r) {
- dev_err(&dev->pdev->dev, "Fatal error during GPU init\n");
+ dev_err(dev->dev, "Fatal error during GPU init\n");
goto out;
}
@@ -147,7 +152,7 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
*/
r = radeon_modeset_init(rdev);
if (r)
- dev_err(&dev->pdev->dev, "Fatal error during modeset init\n");
+ dev_err(dev->dev, "Fatal error during modeset init\n");
/* Call ACPI methods: require modeset init
* but failure is not fatal
@@ -155,8 +160,7 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
if (!r) {
acpi_status = radeon_acpi_init(rdev);
if (acpi_status)
- dev_dbg(&dev->pdev->dev,
- "Error during ACPI methods call\n");
+ dev_dbg(dev->dev, "Error during ACPI methods call\n");
}
if (radeon_is_px(dev)) {
@@ -239,7 +243,7 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
switch (info->request) {
case RADEON_INFO_DEVICE_ID:
- *value = dev->pdev->device;
+ *value = to_pci_dev(dev->dev)->device;
break;
case RADEON_INFO_NUM_GB_PIPES:
*value = rdev->num_gb_pipes;
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
index e64fd0ce6707..7fdb77d48d6a 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
@@ -974,9 +974,9 @@ static void radeon_legacy_tmds_ext_mode_set(struct drm_encoder *encoder,
/* XXX: these are oem specific */
if (ASIC_IS_R300(rdev)) {
- if ((dev->pdev->device == 0x4850) &&
- (dev->pdev->subsystem_vendor == 0x1028) &&
- (dev->pdev->subsystem_device == 0x2001)) /* Dell Inspiron 8600 */
+ if ((rdev->pdev->device == 0x4850) &&
+ (rdev->pdev->subsystem_vendor == 0x1028) &&
+ (rdev->pdev->subsystem_device == 0x2001)) /* Dell Inspiron 8600 */
fp2_gen_cntl |= R300_FP2_DVO_CLOCK_MODE_SINGLE;
else
fp2_gen_cntl |= RADEON_FP2_PAD_FLOP_EN | R300_FP2_DVO_CLOCK_MODE_SINGLE;
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 8bc5ad1d6585..9b81786782de 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -53,20 +53,19 @@ static void radeon_update_memory_usage(struct radeon_bo *bo,
unsigned mem_type, int sign)
{
struct radeon_device *rdev = bo->rdev;
- u64 size = (u64)bo->tbo.num_pages << PAGE_SHIFT;
switch (mem_type) {
case TTM_PL_TT:
if (sign > 0)
- atomic64_add(size, &rdev->gtt_usage);
+ atomic64_add(bo->tbo.base.size, &rdev->gtt_usage);
else
- atomic64_sub(size, &rdev->gtt_usage);
+ atomic64_sub(bo->tbo.base.size, &rdev->gtt_usage);
break;
case TTM_PL_VRAM:
if (sign > 0)
- atomic64_add(size, &rdev->vram_usage);
+ atomic64_add(bo->tbo.base.size, &rdev->vram_usage);
else
- atomic64_sub(size, &rdev->vram_usage);
+ atomic64_sub(bo->tbo.base.size, &rdev->vram_usage);
break;
}
}
@@ -255,7 +254,7 @@ int radeon_bo_kmap(struct radeon_bo *bo, void **ptr)
}
return 0;
}
- r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
+ r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.mem.num_pages, &bo->kmap);
if (r) {
return r;
}
@@ -609,7 +608,7 @@ int radeon_bo_get_surface_reg(struct radeon_bo *bo)
out:
radeon_set_surface_reg(rdev, i, bo->tiling_flags, bo->pitch,
bo->tbo.mem.start << PAGE_SHIFT,
- bo->tbo.num_pages << PAGE_SHIFT);
+ bo->tbo.base.size);
return 0;
}
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
index d606e9a935e3..9896d8231fe5 100644
--- a/drivers/gpu/drm/radeon/radeon_object.h
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -109,12 +109,12 @@ static inline u64 radeon_bo_gpu_offset(struct radeon_bo *bo)
static inline unsigned long radeon_bo_size(struct radeon_bo *bo)
{
- return bo->tbo.num_pages << PAGE_SHIFT;
+ return bo->tbo.base.size;
}
static inline unsigned radeon_bo_ngpu_pages(struct radeon_bo *bo)
{
- return (bo->tbo.num_pages << PAGE_SHIFT) / RADEON_GPU_PAGE_SIZE;
+ return bo->tbo.base.size / RADEON_GPU_PAGE_SIZE;
}
static inline unsigned radeon_bo_gpu_page_alignment(struct radeon_bo *bo)
diff --git a/drivers/gpu/drm/radeon/radeon_prime.c b/drivers/gpu/drm/radeon/radeon_prime.c
index dd482edc819c..ab29eb9e8667 100644
--- a/drivers/gpu/drm/radeon/radeon_prime.c
+++ b/drivers/gpu/drm/radeon/radeon_prime.c
@@ -35,9 +35,9 @@
struct sg_table *radeon_gem_prime_get_sg_table(struct drm_gem_object *obj)
{
struct radeon_bo *bo = gem_to_radeon_bo(obj);
- int npages = bo->tbo.num_pages;
- return drm_prime_pages_to_sg(obj->dev, bo->tbo.ttm->pages, npages);
+ return drm_prime_pages_to_sg(obj->dev, bo->tbo.ttm->pages,
+ bo->tbo.ttm->num_pages);
}
struct drm_gem_object *radeon_gem_prime_import_sg_table(struct drm_device *dev,
diff --git a/drivers/gpu/drm/radeon/radeon_trace.h b/drivers/gpu/drm/radeon/radeon_trace.h
index c93f3ab3c4e3..1729cb9a95c5 100644
--- a/drivers/gpu/drm/radeon/radeon_trace.h
+++ b/drivers/gpu/drm/radeon/radeon_trace.h
@@ -22,7 +22,7 @@ TRACE_EVENT(radeon_bo_create,
TP_fast_assign(
__entry->bo = bo;
- __entry->pages = bo->tbo.num_pages;
+ __entry->pages = bo->tbo.mem.num_pages;
),
TP_printk("bo=%p, pages=%u", __entry->bo, __entry->pages)
);
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index d59ef6e92a40..e8c66d10478f 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -46,7 +46,6 @@
#include <drm/radeon_drm.h>
#include <drm/ttm/ttm_bo_api.h>
#include <drm/ttm/ttm_bo_driver.h>
-#include <drm/ttm/ttm_module.h>
#include <drm/ttm/ttm_placement.h>
#include "radeon_reg.h"
@@ -276,7 +275,7 @@ static int radeon_bo_move(struct ttm_buffer_object *bo, bool evict,
out:
/* update statistics */
- atomic64_add((u64)bo->num_pages << PAGE_SHIFT, &rdev->num_bytes_moved);
+ atomic64_add(bo->base.size, &rdev->num_bytes_moved);
radeon_bo_move_notify(bo, evict, new_mem);
return 0;
}
@@ -325,7 +324,7 @@ static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_reso
* access, as done in ttm_bo_vm_fault().
*/
mem->bus.offset = (mem->bus.offset & 0x0ffffffffUL) +
- rdev->ddev->hose->dense_mem_base;
+ rdev->hose->dense_mem_base;
#endif
break;
default:
@@ -396,8 +395,8 @@ static int radeon_ttm_tt_pin_userptr(struct ttm_bo_device *bdev, struct ttm_tt *
if (r)
goto release_sg;
- drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
- gtt->ttm.dma_address, ttm->num_pages);
+ drm_prime_sg_to_dma_addr_array(ttm->sg, gtt->ttm.dma_address,
+ ttm->num_pages);
return 0;
@@ -535,7 +534,7 @@ static struct ttm_tt *radeon_ttm_tt_create(struct ttm_buffer_object *bo,
else
caching = ttm_cached;
- if (ttm_dma_tt_init(&gtt->ttm, bo, page_flags, caching)) {
+ if (ttm_sg_tt_init(&gtt->ttm, bo, page_flags, caching)) {
kfree(gtt);
return NULL;
}
@@ -573,8 +572,8 @@ static int radeon_ttm_tt_populate(struct ttm_bo_device *bdev,
}
if (slave && ttm->sg) {
- drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
- gtt->ttm.dma_address, ttm->num_pages);
+ drm_prime_sg_to_dma_addr_array(ttm->sg, gtt->ttm.dma_address,
+ ttm->num_pages);
return 0;
}
@@ -730,9 +729,6 @@ int radeon_ttm_init(struct radeon_device *rdev)
}
rdev->mman.initialized = true;
- ttm_pool_init(&rdev->mman.bdev.pool, rdev->dev, rdev->need_swiotlb,
- dma_addressing_limited(&rdev->pdev->dev));
-
r = radeon_ttm_init_vram(rdev);
if (r) {
DRM_ERROR("Failed initializing VRAM heap.\n");
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
index 39c1c339be7b..dfa9fdbe98da 100644
--- a/drivers/gpu/drm/radeon/radeon_uvd.c
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -781,7 +781,7 @@ int radeon_uvd_get_create_msg(struct radeon_device *rdev, int ring,
uint64_t offs = radeon_bo_size(rdev->uvd.vcpu_bo) -
RADEON_GPU_PAGE_SIZE;
- uint32_t *msg = rdev->uvd.cpu_addr + offs;
+ uint32_t __iomem *msg = (void __iomem *)(rdev->uvd.cpu_addr + offs);
uint64_t addr = rdev->uvd.gpu_addr + offs;
int r, i;
@@ -791,19 +791,19 @@ int radeon_uvd_get_create_msg(struct radeon_device *rdev, int ring,
return r;
/* stitch together an UVD create msg */
- msg[0] = cpu_to_le32(0x00000de4);
- msg[1] = cpu_to_le32(0x00000000);
- msg[2] = cpu_to_le32(handle);
- msg[3] = cpu_to_le32(0x00000000);
- msg[4] = cpu_to_le32(0x00000000);
- msg[5] = cpu_to_le32(0x00000000);
- msg[6] = cpu_to_le32(0x00000000);
- msg[7] = cpu_to_le32(0x00000780);
- msg[8] = cpu_to_le32(0x00000440);
- msg[9] = cpu_to_le32(0x00000000);
- msg[10] = cpu_to_le32(0x01b37000);
+ writel(cpu_to_le32(0x00000de4), &msg[0]);
+ writel(0x0, (void __iomem *)&msg[1]);
+ writel(cpu_to_le32(handle), &msg[2]);
+ writel(0x0, &msg[3]);
+ writel(0x0, &msg[4]);
+ writel(0x0, &msg[5]);
+ writel(0x0, &msg[6]);
+ writel(cpu_to_le32(0x00000780), &msg[7]);
+ writel(cpu_to_le32(0x00000440), &msg[8]);
+ writel(0x0, &msg[9]);
+ writel(cpu_to_le32(0x01b37000), &msg[10]);
for (i = 11; i < 1024; ++i)
- msg[i] = cpu_to_le32(0x0);
+ writel(0x0, &msg[i]);
r = radeon_uvd_send_msg(rdev, ring, addr, fence);
radeon_bo_unreserve(rdev->uvd.vcpu_bo);
@@ -817,7 +817,7 @@ int radeon_uvd_get_destroy_msg(struct radeon_device *rdev, int ring,
uint64_t offs = radeon_bo_size(rdev->uvd.vcpu_bo) -
RADEON_GPU_PAGE_SIZE;
- uint32_t *msg = rdev->uvd.cpu_addr + offs;
+ uint32_t __iomem *msg = (void __iomem *)(rdev->uvd.cpu_addr + offs);
uint64_t addr = rdev->uvd.gpu_addr + offs;
int r, i;
@@ -827,12 +827,12 @@ int radeon_uvd_get_destroy_msg(struct radeon_device *rdev, int ring,
return r;
/* stitch together an UVD destroy msg */
- msg[0] = cpu_to_le32(0x00000de4);
- msg[1] = cpu_to_le32(0x00000002);
- msg[2] = cpu_to_le32(handle);
- msg[3] = cpu_to_le32(0x00000000);
+ writel(cpu_to_le32(0x00000de4), &msg[0]);
+ writel(cpu_to_le32(0x00000002), &msg[1]);
+ writel(cpu_to_le32(handle), &msg[2]);
+ writel(0x0, &msg[3]);
for (i = 4; i < 1024; ++i)
- msg[i] = cpu_to_le32(0x0);
+ writel(0x0, &msg[i]);
r = radeon_uvd_send_msg(rdev, ring, addr, fence);
radeon_bo_unreserve(rdev->uvd.vcpu_bo);
diff --git a/drivers/gpu/drm/radeon/rs780_dpm.c b/drivers/gpu/drm/radeon/rs780_dpm.c
index 17390074277a..24ad12409120 100644
--- a/drivers/gpu/drm/radeon/rs780_dpm.c
+++ b/drivers/gpu/drm/radeon/rs780_dpm.c
@@ -223,16 +223,15 @@ static void rs780_preset_starting_fbdiv(struct radeon_device *rdev)
static void rs780_voltage_scaling_init(struct radeon_device *rdev)
{
struct igp_power_info *pi = rs780_get_pi(rdev);
- struct drm_device *dev = rdev->ddev;
u32 fv_throt_pwm_fb_div_range[3];
u32 fv_throt_pwm_range[4];
- if (dev->pdev->device == 0x9614) {
+ if (rdev->pdev->device == 0x9614) {
fv_throt_pwm_fb_div_range[0] = RS780D_FVTHROTPWMFBDIVRANGEREG0_DFLT;
fv_throt_pwm_fb_div_range[1] = RS780D_FVTHROTPWMFBDIVRANGEREG1_DFLT;
fv_throt_pwm_fb_div_range[2] = RS780D_FVTHROTPWMFBDIVRANGEREG2_DFLT;
- } else if ((dev->pdev->device == 0x9714) ||
- (dev->pdev->device == 0x9715)) {
+ } else if ((rdev->pdev->device == 0x9714) ||
+ (rdev->pdev->device == 0x9715)) {
fv_throt_pwm_fb_div_range[0] = RS880D_FVTHROTPWMFBDIVRANGEREG0_DFLT;
fv_throt_pwm_fb_div_range[1] = RS880D_FVTHROTPWMFBDIVRANGEREG1_DFLT;
fv_throt_pwm_fb_div_range[2] = RS880D_FVTHROTPWMFBDIVRANGEREG2_DFLT;
diff --git a/drivers/gpu/drm/rcar-du/rcar_cmm.c b/drivers/gpu/drm/rcar-du/rcar_cmm.c
index c578095b09a5..382d53f8a22e 100644
--- a/drivers/gpu/drm/rcar-du/rcar_cmm.c
+++ b/drivers/gpu/drm/rcar-du/rcar_cmm.c
@@ -122,7 +122,7 @@ int rcar_cmm_enable(struct platform_device *pdev)
{
int ret;
- ret = pm_runtime_get_sync(&pdev->dev);
+ ret = pm_runtime_resume_and_get(&pdev->dev);
if (ret < 0)
return ret;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
index b5fb941e0f53..ea7e39d03545 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
@@ -730,13 +730,10 @@ static void rcar_du_crtc_atomic_enable(struct drm_crtc *crtc,
*/
if (rcdu->info->lvds_clk_mask & BIT(rcrtc->index) &&
rstate->outputs == BIT(RCAR_DU_OUTPUT_DPAD0)) {
- struct rcar_du_encoder *encoder =
- rcdu->encoders[RCAR_DU_OUTPUT_LVDS0 + rcrtc->index];
+ struct drm_bridge *bridge = rcdu->lvds[rcrtc->index];
const struct drm_display_mode *mode =
&crtc->state->adjusted_mode;
- struct drm_bridge *bridge;
- bridge = drm_bridge_chain_get_first_bridge(&encoder->base);
rcar_lvds_clk_enable(bridge, mode->clock * 1000);
}
@@ -764,15 +761,12 @@ static void rcar_du_crtc_atomic_disable(struct drm_crtc *crtc,
if (rcdu->info->lvds_clk_mask & BIT(rcrtc->index) &&
rstate->outputs == BIT(RCAR_DU_OUTPUT_DPAD0)) {
- struct rcar_du_encoder *encoder =
- rcdu->encoders[RCAR_DU_OUTPUT_LVDS0 + rcrtc->index];
- struct drm_bridge *bridge;
+ struct drm_bridge *bridge = rcdu->lvds[rcrtc->index];
/*
* Disable the LVDS clock output, see
* rcar_du_crtc_atomic_enable().
*/
- bridge = drm_bridge_chain_get_first_bridge(&encoder->base);
rcar_lvds_clk_disable(bridge);
}
@@ -1144,7 +1138,6 @@ static const struct drm_crtc_funcs crtc_funcs_gen3 = {
.set_crc_source = rcar_du_crtc_set_crc_source,
.verify_crc_source = rcar_du_crtc_verify_crc_source,
.get_crc_sources = rcar_du_crtc_get_crc_sources,
- .gamma_set = drm_atomic_helper_legacy_gamma_set,
};
/* -----------------------------------------------------------------------------
@@ -1257,7 +1250,7 @@ int rcar_du_crtc_create(struct rcar_du_group *rgrp, unsigned int swindex,
else
primary = &rgrp->planes[swindex % 2].plane;
- ret = drm_crtc_init_with_planes(rcdu->ddev, crtc, primary, NULL,
+ ret = drm_crtc_init_with_planes(&rcdu->ddev, crtc, primary, NULL,
rcdu->info->gen <= 2 ?
&crtc_funcs_gen2 : &crtc_funcs_gen3,
NULL);
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
index 600056dff374..bfbff90588cb 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
@@ -18,10 +18,11 @@
#include <linux/wait.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_drv.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fb_helper.h>
-#include <drm/drm_drv.h>
#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_managed.h>
#include <drm/drm_probe_helper.h>
#include "rcar_du_drv.h"
@@ -527,14 +528,14 @@ static int rcar_du_pm_suspend(struct device *dev)
{
struct rcar_du_device *rcdu = dev_get_drvdata(dev);
- return drm_mode_config_helper_suspend(rcdu->ddev);
+ return drm_mode_config_helper_suspend(&rcdu->ddev);
}
static int rcar_du_pm_resume(struct device *dev)
{
struct rcar_du_device *rcdu = dev_get_drvdata(dev);
- return drm_mode_config_helper_resume(rcdu->ddev);
+ return drm_mode_config_helper_resume(&rcdu->ddev);
}
#endif
@@ -549,7 +550,7 @@ static const struct dev_pm_ops rcar_du_pm_ops = {
static int rcar_du_remove(struct platform_device *pdev)
{
struct rcar_du_device *rcdu = platform_get_drvdata(pdev);
- struct drm_device *ddev = rcdu->ddev;
+ struct drm_device *ddev = &rcdu->ddev;
drm_dev_unregister(ddev);
@@ -563,14 +564,14 @@ static int rcar_du_remove(struct platform_device *pdev)
static int rcar_du_probe(struct platform_device *pdev)
{
struct rcar_du_device *rcdu;
- struct drm_device *ddev;
struct resource *mem;
int ret;
/* Allocate and initialize the R-Car device structure. */
- rcdu = devm_kzalloc(&pdev->dev, sizeof(*rcdu), GFP_KERNEL);
- if (rcdu == NULL)
- return -ENOMEM;
+ rcdu = devm_drm_dev_alloc(&pdev->dev, &rcar_du_driver,
+ struct rcar_du_device, ddev);
+ if (IS_ERR(rcdu))
+ return PTR_ERR(rcdu);
rcdu->dev = &pdev->dev;
rcdu->info = of_device_get_match_data(rcdu->dev);
@@ -584,13 +585,6 @@ static int rcar_du_probe(struct platform_device *pdev)
return PTR_ERR(rcdu->mmio);
/* DRM/KMS objects */
- ddev = drm_dev_alloc(&rcar_du_driver, &pdev->dev);
- if (IS_ERR(ddev))
- return PTR_ERR(ddev);
-
- rcdu->ddev = ddev;
- ddev->dev_private = rcdu;
-
ret = rcar_du_modeset_init(rcdu);
if (ret < 0) {
if (ret != -EPROBE_DEFER)
@@ -599,25 +593,24 @@ static int rcar_du_probe(struct platform_device *pdev)
goto error;
}
- ddev->irq_enabled = 1;
+ rcdu->ddev.irq_enabled = 1;
/*
* Register the DRM device with the core and the connectors with
* sysfs.
*/
- ret = drm_dev_register(ddev, 0);
+ ret = drm_dev_register(&rcdu->ddev, 0);
if (ret)
goto error;
DRM_INFO("Device %s probed\n", dev_name(&pdev->dev));
- drm_fbdev_generic_setup(ddev, 32);
+ drm_fbdev_generic_setup(&rcdu->ddev, 32);
return 0;
error:
- rcar_du_remove(pdev);
-
+ drm_kms_helper_poll_fini(&rcdu->ddev);
return ret;
}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.h b/drivers/gpu/drm/rcar-du/rcar_du_drv.h
index 61504c54e2ec..02ca2d0e1b55 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.h
@@ -13,6 +13,8 @@
#include <linux/kernel.h>
#include <linux/wait.h>
+#include <drm/drm_device.h>
+
#include "rcar_cmm.h"
#include "rcar_du_crtc.h"
#include "rcar_du_group.h"
@@ -20,10 +22,9 @@
struct clk;
struct device;
-struct drm_device;
+struct drm_bridge;
struct drm_property;
struct rcar_du_device;
-struct rcar_du_encoder;
#define RCAR_DU_FEATURE_CRTC_IRQ_CLOCK BIT(0) /* Per-CRTC IRQ and clock */
#define RCAR_DU_FEATURE_VSP1_SOURCE BIT(1) /* Has inputs from VSP1 */
@@ -71,6 +72,7 @@ struct rcar_du_device_info {
#define RCAR_DU_MAX_CRTCS 4
#define RCAR_DU_MAX_GROUPS DIV_ROUND_UP(RCAR_DU_MAX_CRTCS, 2)
#define RCAR_DU_MAX_VSPS 4
+#define RCAR_DU_MAX_LVDS 2
struct rcar_du_device {
struct device *dev;
@@ -78,16 +80,15 @@ struct rcar_du_device {
void __iomem *mmio;
- struct drm_device *ddev;
+ struct drm_device ddev;
struct rcar_du_crtc crtcs[RCAR_DU_MAX_CRTCS];
unsigned int num_crtcs;
- struct rcar_du_encoder *encoders[RCAR_DU_OUTPUT_MAX];
-
struct rcar_du_group groups[RCAR_DU_MAX_GROUPS];
struct platform_device *cmms[RCAR_DU_MAX_CRTCS];
struct rcar_du_vsp vsps[RCAR_DU_MAX_VSPS];
+ struct drm_bridge *lvds[RCAR_DU_MAX_LVDS];
struct {
struct drm_property *colorkey;
@@ -98,6 +99,11 @@ struct rcar_du_device {
unsigned int vspd1_sink;
};
+static inline struct rcar_du_device *to_rcar_du_device(struct drm_device *dev)
+{
+ return container_of(dev, struct rcar_du_device, ddev);
+}
+
static inline bool rcar_du_has(struct rcar_du_device *rcdu,
unsigned int feature)
{
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
index b0335da0c161..ba8c6038cd63 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
@@ -8,12 +8,13 @@
*/
#include <linux/export.h>
+#include <linux/slab.h>
#include <drm/drm_bridge.h>
#include <drm/drm_crtc.h>
+#include <drm/drm_managed.h>
#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_panel.h>
-#include <drm/drm_simple_kms_helper.h>
#include "rcar_du_drv.h"
#include "rcar_du_encoder.h"
@@ -44,26 +45,25 @@ static unsigned int rcar_du_encoder_count_ports(struct device_node *node)
return num_ports;
}
+static const struct drm_encoder_funcs rcar_du_encoder_funcs = {
+};
+
+static void rcar_du_encoder_release(struct drm_device *dev, void *res)
+{
+ struct rcar_du_encoder *renc = res;
+
+ drm_encoder_cleanup(&renc->base);
+ kfree(renc);
+}
+
int rcar_du_encoder_init(struct rcar_du_device *rcdu,
enum rcar_du_output output,
struct device_node *enc_node)
{
struct rcar_du_encoder *renc;
- struct drm_encoder *encoder;
struct drm_bridge *bridge;
int ret;
- renc = devm_kzalloc(rcdu->dev, sizeof(*renc), GFP_KERNEL);
- if (renc == NULL)
- return -ENOMEM;
-
- rcdu->encoders[output] = renc;
- renc->output = output;
- encoder = rcar_encoder_to_drm_encoder(renc);
-
- dev_dbg(rcdu->dev, "initializing encoder %pOF for output %u\n",
- enc_node, output);
-
/*
* Locate the DRM bridge from the DT node. For the DPAD outputs, if the
* DT node has a single port, assume that it describes a panel and
@@ -74,57 +74,57 @@ int rcar_du_encoder_init(struct rcar_du_device *rcdu,
rcar_du_encoder_count_ports(enc_node) == 1) {
struct drm_panel *panel = of_drm_find_panel(enc_node);
- if (IS_ERR(panel)) {
- ret = PTR_ERR(panel);
- goto done;
- }
+ if (IS_ERR(panel))
+ return PTR_ERR(panel);
bridge = devm_drm_panel_bridge_add_typed(rcdu->dev, panel,
DRM_MODE_CONNECTOR_DPI);
- if (IS_ERR(bridge)) {
- ret = PTR_ERR(bridge);
- goto done;
- }
+ if (IS_ERR(bridge))
+ return PTR_ERR(bridge);
} else {
bridge = of_drm_find_bridge(enc_node);
- if (!bridge) {
- ret = -EPROBE_DEFER;
- goto done;
- }
+ if (!bridge)
+ return -EPROBE_DEFER;
+
+ if (output == RCAR_DU_OUTPUT_LVDS0 ||
+ output == RCAR_DU_OUTPUT_LVDS1)
+ rcdu->lvds[output - RCAR_DU_OUTPUT_LVDS0] = bridge;
}
/*
- * On Gen3 skip the LVDS1 output if the LVDS1 encoder is used as a
- * companion for LVDS0 in dual-link mode.
+ * Create and initialize the encoder. On Gen3 skip the LVDS1 output if
+ * the LVDS1 encoder is used as a companion for LVDS0 in dual-link
+ * mode.
*/
if (rcdu->info->gen >= 3 && output == RCAR_DU_OUTPUT_LVDS1) {
- if (rcar_lvds_dual_link(bridge)) {
- ret = -ENOLINK;
- goto done;
- }
+ if (rcar_lvds_dual_link(bridge))
+ return -ENOLINK;
}
- ret = drm_simple_encoder_init(rcdu->ddev, encoder,
- DRM_MODE_ENCODER_NONE);
- if (ret < 0)
- goto done;
+ renc = kzalloc(sizeof(*renc), GFP_KERNEL);
+ if (renc == NULL)
+ return -ENOMEM;
- /*
- * Attach the bridge to the encoder. The bridge will create the
- * connector.
- */
- ret = drm_bridge_attach(encoder, bridge, NULL, 0);
- if (ret) {
- drm_encoder_cleanup(encoder);
- return ret;
- }
+ renc->output = output;
+
+ dev_dbg(rcdu->dev, "initializing encoder %pOF for output %u\n",
+ enc_node, output);
-done:
+ ret = drm_encoder_init(&rcdu->ddev, &renc->base, &rcar_du_encoder_funcs,
+ DRM_MODE_ENCODER_NONE, NULL);
if (ret < 0) {
- if (encoder->name)
- encoder->funcs->destroy(encoder);
- devm_kfree(rcdu->dev, renc);
+ kfree(renc);
+ return ret;
}
- return ret;
+ ret = drmm_add_action_or_reset(&rcdu->ddev, rcar_du_encoder_release,
+ renc);
+ if (ret)
+ return ret;
+
+ /*
+ * Attach the bridge to the encoder. The bridge will create the
+ * connector.
+ */
+ return drm_bridge_attach(&renc->base, bridge, NULL, 0);
}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_encoder.h b/drivers/gpu/drm/rcar-du/rcar_du_encoder.h
index df9be4524301..73560563fb31 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_encoder.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_encoder.h
@@ -22,8 +22,6 @@ struct rcar_du_encoder {
#define to_rcar_encoder(e) \
container_of(e, struct rcar_du_encoder, base)
-#define rcar_encoder_to_drm_encoder(e) (&(e)->base)
-
int rcar_du_encoder_init(struct rcar_du_device *rcdu,
enum rcar_du_output output,
struct device_node *enc_node);
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
index 72dda446355f..fdb8a0d127ad 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
@@ -14,6 +14,7 @@
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_managed.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
@@ -327,7 +328,7 @@ const struct rcar_du_format_info *rcar_du_format_info(u32 fourcc)
int rcar_du_dumb_create(struct drm_file *file, struct drm_device *dev,
struct drm_mode_create_dumb *args)
{
- struct rcar_du_device *rcdu = dev->dev_private;
+ struct rcar_du_device *rcdu = to_rcar_du_device(dev);
unsigned int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
unsigned int align;
@@ -349,7 +350,7 @@ static struct drm_framebuffer *
rcar_du_fb_create(struct drm_device *dev, struct drm_file *file_priv,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
- struct rcar_du_device *rcdu = dev->dev_private;
+ struct rcar_du_device *rcdu = to_rcar_du_device(dev);
const struct rcar_du_format_info *format;
unsigned int chroma_pitch;
unsigned int max_pitch;
@@ -421,7 +422,7 @@ rcar_du_fb_create(struct drm_device *dev, struct drm_file *file_priv,
static int rcar_du_atomic_check(struct drm_device *dev,
struct drm_atomic_state *state)
{
- struct rcar_du_device *rcdu = dev->dev_private;
+ struct rcar_du_device *rcdu = to_rcar_du_device(dev);
int ret;
ret = drm_atomic_helper_check(dev, state);
@@ -437,7 +438,7 @@ static int rcar_du_atomic_check(struct drm_device *dev,
static void rcar_du_atomic_commit_tail(struct drm_atomic_state *old_state)
{
struct drm_device *dev = old_state->dev;
- struct rcar_du_device *rcdu = dev->dev_private;
+ struct rcar_du_device *rcdu = to_rcar_du_device(dev);
struct drm_crtc_state *crtc_state;
struct drm_crtc *crtc;
unsigned int i;
@@ -583,7 +584,7 @@ static int rcar_du_properties_init(struct rcar_du_device *rcdu)
* or enable source color keying (1).
*/
rcdu->props.colorkey =
- drm_property_create_range(rcdu->ddev, 0, "colorkey",
+ drm_property_create_range(&rcdu->ddev, 0, "colorkey",
0, 0x01ffffff);
if (rcdu->props.colorkey == NULL)
return -ENOMEM;
@@ -700,10 +701,10 @@ static int rcar_du_cmm_init(struct rcar_du_device *rcdu)
int ret;
cmm = of_parse_phandle(np, "renesas,cmms", i);
- if (IS_ERR(cmm)) {
+ if (!cmm) {
dev_err(rcdu->dev,
"Failed to parse 'renesas,cmms' property\n");
- return PTR_ERR(cmm);
+ return -EINVAL;
}
if (!of_device_is_available(cmm)) {
@@ -713,10 +714,10 @@ static int rcar_du_cmm_init(struct rcar_du_device *rcdu)
}
pdev = of_find_device_by_node(cmm);
- if (IS_ERR(pdev)) {
+ if (!pdev) {
dev_err(rcdu->dev, "No device found for CMM%u\n", i);
of_node_put(cmm);
- return PTR_ERR(pdev);
+ return -EINVAL;
}
of_node_put(cmm);
@@ -726,8 +727,12 @@ static int rcar_du_cmm_init(struct rcar_du_device *rcdu)
* disabled: return 0 and let the DU continue probing.
*/
ret = rcar_cmm_init(pdev);
- if (ret)
+ if (ret) {
+ platform_device_put(pdev);
return ret == -ENODEV ? 0 : ret;
+ }
+
+ rcdu->cmms[i] = pdev;
/*
* Enforce suspend/resume ordering by making the CMM a provider
@@ -739,20 +744,27 @@ static int rcar_du_cmm_init(struct rcar_du_device *rcdu)
"Failed to create device link to CMM%u\n", i);
return -EINVAL;
}
-
- rcdu->cmms[i] = pdev;
}
return 0;
}
+static void rcar_du_modeset_cleanup(struct drm_device *dev, void *res)
+{
+ struct rcar_du_device *rcdu = to_rcar_du_device(dev);
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(rcdu->cmms); ++i)
+ platform_device_put(rcdu->cmms[i]);
+}
+
int rcar_du_modeset_init(struct rcar_du_device *rcdu)
{
static const unsigned int mmio_offsets[] = {
DU0_REG_OFFSET, DU2_REG_OFFSET
};
- struct drm_device *dev = rcdu->ddev;
+ struct drm_device *dev = &rcdu->ddev;
struct drm_encoder *encoder;
unsigned int dpad0_sources;
unsigned int num_encoders;
@@ -766,6 +778,10 @@ int rcar_du_modeset_init(struct rcar_du_device *rcdu)
if (ret)
return ret;
+ ret = drmm_add_action(&rcdu->ddev, rcar_du_modeset_cleanup, NULL);
+ if (ret)
+ return ret;
+
dev->mode_config.min_width = 0;
dev->mode_config.min_height = 0;
dev->mode_config.normalize_zpos = true;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_plane.c b/drivers/gpu/drm/rcar-du/rcar_du_plane.c
index a0021fc25b27..02e5f11f38eb 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_plane.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_plane.c
@@ -128,7 +128,7 @@ static int rcar_du_plane_hwalloc(struct rcar_du_plane *plane,
int rcar_du_atomic_check_planes(struct drm_device *dev,
struct drm_atomic_state *state)
{
- struct rcar_du_device *rcdu = dev->dev_private;
+ struct rcar_du_device *rcdu = to_rcar_du_device(dev);
unsigned int group_freed_planes[RCAR_DU_MAX_GROUPS] = { 0, };
unsigned int group_free_planes[RCAR_DU_MAX_GROUPS] = { 0, };
bool needs_realloc = false;
@@ -773,9 +773,9 @@ int rcar_du_planes_init(struct rcar_du_group *rgrp)
plane->group = rgrp;
- ret = drm_universal_plane_init(rcdu->ddev, &plane->plane, crtcs,
- &rcar_du_plane_funcs, formats,
- ARRAY_SIZE(formats),
+ ret = drm_universal_plane_init(&rcdu->ddev, &plane->plane,
+ crtcs, &rcar_du_plane_funcs,
+ formats, ARRAY_SIZE(formats),
NULL, type, NULL);
if (ret < 0)
return ret;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vsp.c b/drivers/gpu/drm/rcar-du/rcar_du_vsp.c
index f6a69aa116e6..53221d8473c1 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_vsp.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_vsp.c
@@ -21,6 +21,7 @@
#include <linux/dma-mapping.h>
#include <linux/of_platform.h>
#include <linux/scatterlist.h>
+#include <linux/slab.h>
#include <linux/videodev2.h>
#include <media/vsp1.h>
@@ -344,6 +345,15 @@ static const struct drm_plane_funcs rcar_du_vsp_plane_funcs = {
static void rcar_du_vsp_cleanup(struct drm_device *dev, void *res)
{
struct rcar_du_vsp *vsp = res;
+ unsigned int i;
+
+ for (i = 0; i < vsp->num_planes; ++i) {
+ struct rcar_du_vsp_plane *plane = &vsp->planes[i];
+
+ drm_plane_cleanup(&plane->plane);
+ }
+
+ kfree(vsp->planes);
put_device(vsp->vsp);
}
@@ -354,6 +364,7 @@ int rcar_du_vsp_init(struct rcar_du_vsp *vsp, struct device_node *np,
struct rcar_du_device *rcdu = vsp->dev;
struct platform_device *pdev;
unsigned int num_crtcs = hweight32(crtcs);
+ unsigned int num_planes;
unsigned int i;
int ret;
@@ -364,7 +375,7 @@ int rcar_du_vsp_init(struct rcar_du_vsp *vsp, struct device_node *np,
vsp->vsp = &pdev->dev;
- ret = drmm_add_action(rcdu->ddev, rcar_du_vsp_cleanup, vsp);
+ ret = drmm_add_action_or_reset(&rcdu->ddev, rcar_du_vsp_cleanup, vsp);
if (ret < 0)
return ret;
@@ -376,14 +387,13 @@ int rcar_du_vsp_init(struct rcar_du_vsp *vsp, struct device_node *np,
* The VSP2D (Gen3) has 5 RPFs, but the VSP1D (Gen2) is limited to
* 4 RPFs.
*/
- vsp->num_planes = rcdu->info->gen >= 3 ? 5 : 4;
+ num_planes = rcdu->info->gen >= 3 ? 5 : 4;
- vsp->planes = devm_kcalloc(rcdu->dev, vsp->num_planes,
- sizeof(*vsp->planes), GFP_KERNEL);
+ vsp->planes = kcalloc(num_planes, sizeof(*vsp->planes), GFP_KERNEL);
if (!vsp->planes)
return -ENOMEM;
- for (i = 0; i < vsp->num_planes; ++i) {
+ for (i = 0; i < num_planes; ++i) {
enum drm_plane_type type = i < num_crtcs
? DRM_PLANE_TYPE_PRIMARY
: DRM_PLANE_TYPE_OVERLAY;
@@ -392,8 +402,8 @@ int rcar_du_vsp_init(struct rcar_du_vsp *vsp, struct device_node *np,
plane->vsp = vsp;
plane->index = i;
- ret = drm_universal_plane_init(rcdu->ddev, &plane->plane, crtcs,
- &rcar_du_vsp_plane_funcs,
+ ret = drm_universal_plane_init(&rcdu->ddev, &plane->plane,
+ crtcs, &rcar_du_vsp_plane_funcs,
rcar_du_vsp_formats,
ARRAY_SIZE(rcar_du_vsp_formats),
NULL, type, NULL);
@@ -409,8 +419,10 @@ int rcar_du_vsp_init(struct rcar_du_vsp *vsp, struct device_node *np,
} else {
drm_plane_create_alpha_property(&plane->plane);
drm_plane_create_zpos_property(&plane->plane, 1, 1,
- vsp->num_planes - 1);
+ num_planes - 1);
}
+
+ vsp->num_planes++;
}
return 0;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_writeback.c b/drivers/gpu/drm/rcar-du/rcar_du_writeback.c
index 04efa78d70b6..c79d1259e49b 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_writeback.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_writeback.c
@@ -204,7 +204,7 @@ int rcar_du_writeback_init(struct rcar_du_device *rcdu,
drm_connector_helper_add(&wb_conn->base,
&rcar_du_wb_conn_helper_funcs);
- return drm_writeback_connector_init(rcdu->ddev, wb_conn,
+ return drm_writeback_connector_init(&rcdu->ddev, wb_conn,
&rcar_du_wb_conn_funcs,
&rcar_du_wb_enc_helper_funcs,
writeback_formats,
diff --git a/drivers/gpu/drm/rockchip/Kconfig b/drivers/gpu/drm/rockchip/Kconfig
index 310aa1546893..cb25c0e8fc9b 100644
--- a/drivers/gpu/drm/rockchip/Kconfig
+++ b/drivers/gpu/drm/rockchip/Kconfig
@@ -49,7 +49,7 @@ config ROCKCHIP_DW_MIPI_DSI
select GENERIC_PHY_MIPI_DPHY
help
This selects support for Rockchip SoC specific extensions
- for the Synopsys DesignWare HDMI driver. If you want to
+ for the Synopsys DesignWare dsi driver. If you want to
enable MIPI DSI on RK3288 or RK3399 based SoC, you should
select this option.
diff --git a/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c b/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
index e84325e56d98..24a71091759c 100644
--- a/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
+++ b/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
@@ -1089,7 +1089,7 @@ static int dw_mipi_dsi_rockchip_probe(struct platform_device *pdev)
dsi->grf_regmap = syscon_regmap_lookup_by_phandle(np, "rockchip,grf");
if (IS_ERR(dsi->grf_regmap)) {
- DRM_DEV_ERROR(dsi->dev, "Unable to get rockchip,grf\n");
+ DRM_DEV_ERROR(dev, "Unable to get rockchip,grf\n");
return PTR_ERR(dsi->grf_regmap);
}
diff --git a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
index 23de359a1dec..830bdd5e9b7c 100644
--- a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
+++ b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
@@ -202,7 +202,7 @@ static int rockchip_hdmi_parse_dt(struct rockchip_hdmi *hdmi)
} else if (PTR_ERR(hdmi->vpll_clk) == -EPROBE_DEFER) {
return -EPROBE_DEFER;
} else if (IS_ERR(hdmi->vpll_clk)) {
- DRM_DEV_ERROR(hdmi->dev, "failed to get grf clock\n");
+ DRM_DEV_ERROR(hdmi->dev, "failed to get vpll clock\n");
return PTR_ERR(hdmi->vpll_clk);
}
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index d1e05482641b..8d15cabdcb02 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -1643,7 +1643,6 @@ static const struct drm_crtc_funcs vop_crtc_funcs = {
.disable_vblank = vop_crtc_disable_vblank,
.set_crc_source = vop_crtc_set_crc_source,
.verify_crc_source = vop_crtc_verify_crc_source,
- .gamma_set = drm_atomic_helper_legacy_gamma_set,
};
static void vop_fb_unref_worker(struct drm_flip_work *work, void *val)
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
index b498d474ef9e..92637b70c9bf 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -60,8 +60,6 @@
#define to_drm_sched_job(sched_job) \
container_of((sched_job), struct drm_sched_job, queue_node)
-static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb);
-
/**
* drm_sched_rq_init - initialize a given run queue struct
*
@@ -164,6 +162,40 @@ drm_sched_rq_select_entity(struct drm_sched_rq *rq)
}
/**
+ * drm_sched_job_done - complete a job
+ * @s_job: pointer to the job which is done
+ *
+ * Finish the job's fence and wake up the worker thread.
+ */
+static void drm_sched_job_done(struct drm_sched_job *s_job)
+{
+ struct drm_sched_fence *s_fence = s_job->s_fence;
+ struct drm_gpu_scheduler *sched = s_fence->sched;
+
+ atomic_dec(&sched->hw_rq_count);
+ atomic_dec(&sched->score);
+
+ trace_drm_sched_process_job(s_fence);
+
+ dma_fence_get(&s_fence->finished);
+ drm_sched_fence_finished(s_fence);
+ dma_fence_put(&s_fence->finished);
+ wake_up_interruptible(&sched->wake_up_worker);
+}
+
+/**
+ * drm_sched_job_done_cb - the callback for a done job
+ * @f: fence
+ * @cb: fence callbacks
+ */
+static void drm_sched_job_done_cb(struct dma_fence *f, struct dma_fence_cb *cb)
+{
+ struct drm_sched_job *s_job = container_of(cb, struct drm_sched_job, cb);
+
+ drm_sched_job_done(s_job);
+}
+
+/**
* drm_sched_dependency_optimized
*
* @fence: the dependency fence
@@ -199,7 +231,7 @@ EXPORT_SYMBOL(drm_sched_dependency_optimized);
static void drm_sched_start_timeout(struct drm_gpu_scheduler *sched)
{
if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
- !list_empty(&sched->ring_mirror_list))
+ !list_empty(&sched->pending_list))
schedule_delayed_work(&sched->work_tdr, sched->timeout);
}
@@ -259,7 +291,7 @@ void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched,
{
spin_lock(&sched->job_list_lock);
- if (list_empty(&sched->ring_mirror_list))
+ if (list_empty(&sched->pending_list))
cancel_delayed_work(&sched->work_tdr);
else
mod_delayed_work(system_wq, &sched->work_tdr, remaining);
@@ -273,7 +305,7 @@ static void drm_sched_job_begin(struct drm_sched_job *s_job)
struct drm_gpu_scheduler *sched = s_job->sched;
spin_lock(&sched->job_list_lock);
- list_add_tail(&s_job->node, &sched->ring_mirror_list);
+ list_add_tail(&s_job->list, &sched->pending_list);
drm_sched_start_timeout(sched);
spin_unlock(&sched->job_list_lock);
}
@@ -287,8 +319,8 @@ static void drm_sched_job_timedout(struct work_struct *work)
/* Protects against concurrent deletion in drm_sched_get_cleanup_job */
spin_lock(&sched->job_list_lock);
- job = list_first_entry_or_null(&sched->ring_mirror_list,
- struct drm_sched_job, node);
+ job = list_first_entry_or_null(&sched->pending_list,
+ struct drm_sched_job, list);
if (job) {
/*
@@ -296,7 +328,7 @@ static void drm_sched_job_timedout(struct work_struct *work)
* drm_sched_cleanup_jobs. It will be reinserted back after sched->thread
* is parked at which point it's safe.
*/
- list_del_init(&job->node);
+ list_del_init(&job->list);
spin_unlock(&sched->job_list_lock);
job->sched->ops->timedout_job(job);
@@ -372,7 +404,7 @@ EXPORT_SYMBOL(drm_sched_increase_karma);
* Stop the scheduler and also removes and frees all completed jobs.
* Note: bad job will not be freed as it might be used later and so it's
* callers responsibility to release it manually if it's not part of the
- * mirror list any more.
+ * pending list any more.
*
*/
void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad)
@@ -393,26 +425,27 @@ void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad)
* Add at the head of the queue to reflect it was the earliest
* job extracted.
*/
- list_add(&bad->node, &sched->ring_mirror_list);
+ list_add(&bad->list, &sched->pending_list);
/*
* Iterate the job list from later to earlier one and either deactive
- * their HW callbacks or remove them from mirror list if they already
+ * their HW callbacks or remove them from pending list if they already
* signaled.
* This iteration is thread safe as sched thread is stopped.
*/
- list_for_each_entry_safe_reverse(s_job, tmp, &sched->ring_mirror_list, node) {
+ list_for_each_entry_safe_reverse(s_job, tmp, &sched->pending_list,
+ list) {
if (s_job->s_fence->parent &&
dma_fence_remove_callback(s_job->s_fence->parent,
&s_job->cb)) {
atomic_dec(&sched->hw_rq_count);
} else {
/*
- * remove job from ring_mirror_list.
+ * remove job from pending_list.
* Locking here is for concurrent resume timeout
*/
spin_lock(&sched->job_list_lock);
- list_del_init(&s_job->node);
+ list_del_init(&s_job->list);
spin_unlock(&sched->job_list_lock);
/*
@@ -463,7 +496,7 @@ void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery)
* so no new jobs are being inserted or removed. Also concurrent
* GPU recovers can't run in parallel.
*/
- list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
+ list_for_each_entry_safe(s_job, tmp, &sched->pending_list, list) {
struct dma_fence *fence = s_job->s_fence->parent;
atomic_inc(&sched->hw_rq_count);
@@ -473,14 +506,14 @@ void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery)
if (fence) {
r = dma_fence_add_callback(fence, &s_job->cb,
- drm_sched_process_job);
+ drm_sched_job_done_cb);
if (r == -ENOENT)
- drm_sched_process_job(fence, &s_job->cb);
+ drm_sched_job_done(s_job);
else if (r)
DRM_ERROR("fence add callback failed (%d)\n",
r);
} else
- drm_sched_process_job(NULL, &s_job->cb);
+ drm_sched_job_done(s_job);
}
if (full_recovery) {
@@ -494,7 +527,7 @@ void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery)
EXPORT_SYMBOL(drm_sched_start);
/**
- * drm_sched_resubmit_jobs - helper to relunch job from mirror ring list
+ * drm_sched_resubmit_jobs - helper to relunch job from pending ring list
*
* @sched: scheduler instance
*
@@ -506,7 +539,7 @@ void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched)
bool found_guilty = false;
struct dma_fence *fence;
- list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
+ list_for_each_entry_safe(s_job, tmp, &sched->pending_list, list) {
struct drm_sched_fence *s_fence = s_job->s_fence;
if (!found_guilty && atomic_read(&s_job->karma) > sched->hang_limit) {
@@ -566,7 +599,7 @@ int drm_sched_job_init(struct drm_sched_job *job,
return -ENOMEM;
job->id = atomic64_inc_return(&sched->job_id_count);
- INIT_LIST_HEAD(&job->node);
+ INIT_LIST_HEAD(&job->list);
return 0;
}
@@ -636,36 +669,11 @@ drm_sched_select_entity(struct drm_gpu_scheduler *sched)
}
/**
- * drm_sched_process_job - process a job
- *
- * @f: fence
- * @cb: fence callbacks
- *
- * Called after job has finished execution.
- */
-static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb)
-{
- struct drm_sched_job *s_job = container_of(cb, struct drm_sched_job, cb);
- struct drm_sched_fence *s_fence = s_job->s_fence;
- struct drm_gpu_scheduler *sched = s_fence->sched;
-
- atomic_dec(&sched->hw_rq_count);
- atomic_dec(&sched->score);
-
- trace_drm_sched_process_job(s_fence);
-
- dma_fence_get(&s_fence->finished);
- drm_sched_fence_finished(s_fence);
- dma_fence_put(&s_fence->finished);
- wake_up_interruptible(&sched->wake_up_worker);
-}
-
-/**
* drm_sched_get_cleanup_job - fetch the next finished job to be destroyed
*
* @sched: scheduler instance
*
- * Returns the next finished job from the mirror list (if there is one)
+ * Returns the next finished job from the pending list (if there is one)
* ready for it to be destroyed.
*/
static struct drm_sched_job *
@@ -675,7 +683,7 @@ drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched)
/*
* Don't destroy jobs while the timeout worker is running OR thread
- * is being parked and hence assumed to not touch ring_mirror_list
+ * is being parked and hence assumed to not touch pending_list
*/
if ((sched->timeout != MAX_SCHEDULE_TIMEOUT &&
!cancel_delayed_work(&sched->work_tdr)) ||
@@ -684,12 +692,12 @@ drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched)
spin_lock(&sched->job_list_lock);
- job = list_first_entry_or_null(&sched->ring_mirror_list,
- struct drm_sched_job, node);
+ job = list_first_entry_or_null(&sched->pending_list,
+ struct drm_sched_job, list);
if (job && dma_fence_is_signaled(&job->s_fence->finished)) {
- /* remove job from ring_mirror_list */
- list_del_init(&job->node);
+ /* remove job from pending_list */
+ list_del_init(&job->list);
} else {
job = NULL;
/* queue timeout for next job */
@@ -809,9 +817,9 @@ static int drm_sched_main(void *param)
if (!IS_ERR_OR_NULL(fence)) {
s_fence->parent = dma_fence_get(fence);
r = dma_fence_add_callback(fence, &sched_job->cb,
- drm_sched_process_job);
+ drm_sched_job_done_cb);
if (r == -ENOENT)
- drm_sched_process_job(fence, &sched_job->cb);
+ drm_sched_job_done(sched_job);
else if (r)
DRM_ERROR("fence add callback failed (%d)\n",
r);
@@ -820,7 +828,7 @@ static int drm_sched_main(void *param)
if (IS_ERR(fence))
dma_fence_set_error(&s_fence->finished, PTR_ERR(fence));
- drm_sched_process_job(NULL, &sched_job->cb);
+ drm_sched_job_done(sched_job);
}
wake_up(&sched->job_scheduled);
@@ -858,7 +866,7 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
init_waitqueue_head(&sched->wake_up_worker);
init_waitqueue_head(&sched->job_scheduled);
- INIT_LIST_HEAD(&sched->ring_mirror_list);
+ INIT_LIST_HEAD(&sched->pending_list);
spin_lock_init(&sched->job_list_lock);
atomic_set(&sched->hw_rq_count, 0);
INIT_DELAYED_WORK(&sched->work_tdr, drm_sched_job_timedout);
@@ -891,6 +899,9 @@ void drm_sched_fini(struct drm_gpu_scheduler *sched)
if (sched->thread)
kthread_stop(sched->thread);
+ /* Confirm no work left behind accessing device structures */
+ cancel_delayed_work_sync(&sched->work_tdr);
+
sched->ready = false;
}
EXPORT_SYMBOL(drm_sched_fini);
diff --git a/drivers/gpu/drm/sti/sti_cursor.c b/drivers/gpu/drm/sti/sti_cursor.c
index a98057431023..7476301d7142 100644
--- a/drivers/gpu/drm/sti/sti_cursor.c
+++ b/drivers/gpu/drm/sti/sti_cursor.c
@@ -330,13 +330,6 @@ static const struct drm_plane_helper_funcs sti_cursor_helpers_funcs = {
.atomic_disable = sti_cursor_atomic_disable,
};
-static void sti_cursor_destroy(struct drm_plane *drm_plane)
-{
- DRM_DEBUG_DRIVER("\n");
-
- drm_plane_cleanup(drm_plane);
-}
-
static int sti_cursor_late_register(struct drm_plane *drm_plane)
{
struct sti_plane *plane = to_sti_plane(drm_plane);
@@ -350,7 +343,7 @@ static int sti_cursor_late_register(struct drm_plane *drm_plane)
static const struct drm_plane_funcs sti_cursor_plane_helpers_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
- .destroy = sti_cursor_destroy,
+ .destroy = drm_plane_cleanup,
.reset = sti_plane_reset,
.atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
diff --git a/drivers/gpu/drm/sti/sti_gdp.c b/drivers/gpu/drm/sti/sti_gdp.c
index 2d5a2b5b78b8..2f4a34f14d33 100644
--- a/drivers/gpu/drm/sti/sti_gdp.c
+++ b/drivers/gpu/drm/sti/sti_gdp.c
@@ -884,13 +884,6 @@ static const struct drm_plane_helper_funcs sti_gdp_helpers_funcs = {
.atomic_disable = sti_gdp_atomic_disable,
};
-static void sti_gdp_destroy(struct drm_plane *drm_plane)
-{
- DRM_DEBUG_DRIVER("\n");
-
- drm_plane_cleanup(drm_plane);
-}
-
static int sti_gdp_late_register(struct drm_plane *drm_plane)
{
struct sti_plane *plane = to_sti_plane(drm_plane);
@@ -902,7 +895,7 @@ static int sti_gdp_late_register(struct drm_plane *drm_plane)
static const struct drm_plane_funcs sti_gdp_plane_helpers_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
- .destroy = sti_gdp_destroy,
+ .destroy = drm_plane_cleanup,
.reset = sti_plane_reset,
.atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
diff --git a/drivers/gpu/drm/sti/sti_hqvdp.c b/drivers/gpu/drm/sti/sti_hqvdp.c
index 5a4e12194a77..62f824cd5f21 100644
--- a/drivers/gpu/drm/sti/sti_hqvdp.c
+++ b/drivers/gpu/drm/sti/sti_hqvdp.c
@@ -1262,13 +1262,6 @@ static const struct drm_plane_helper_funcs sti_hqvdp_helpers_funcs = {
.atomic_disable = sti_hqvdp_atomic_disable,
};
-static void sti_hqvdp_destroy(struct drm_plane *drm_plane)
-{
- DRM_DEBUG_DRIVER("\n");
-
- drm_plane_cleanup(drm_plane);
-}
-
static int sti_hqvdp_late_register(struct drm_plane *drm_plane)
{
struct sti_plane *plane = to_sti_plane(drm_plane);
@@ -1282,7 +1275,7 @@ static int sti_hqvdp_late_register(struct drm_plane *drm_plane)
static const struct drm_plane_funcs sti_hqvdp_plane_helpers_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
- .destroy = sti_hqvdp_destroy,
+ .destroy = drm_plane_cleanup,
.reset = sti_plane_reset,
.atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
diff --git a/drivers/gpu/drm/stm/ltdc.c b/drivers/gpu/drm/stm/ltdc.c
index 3980677435cb..7812094f93d6 100644
--- a/drivers/gpu/drm/stm/ltdc.c
+++ b/drivers/gpu/drm/stm/ltdc.c
@@ -713,7 +713,6 @@ static const struct drm_crtc_funcs ltdc_crtc_funcs = {
.enable_vblank = ltdc_crtc_enable_vblank,
.disable_vblank = ltdc_crtc_disable_vblank,
.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
- .gamma_set = drm_atomic_helper_legacy_gamma_set,
};
/*
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c
index eaaf5d70e352..6b9af4c08cd6 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.c
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c
@@ -569,30 +569,13 @@ static void sun4i_tcon0_mode_set_rgb(struct sun4i_tcon *tcon,
if (info->bus_flags & DRM_BUS_FLAG_DE_LOW)
val |= SUN4I_TCON0_IO_POL_DE_NEGATIVE;
- /*
- * On A20 and similar SoCs, the only way to achieve Positive Edge
- * (Rising Edge), is setting dclk clock phase to 2/3(240°).
- * By default TCON works in Negative Edge(Falling Edge),
- * this is why phase is set to 0 in that case.
- * Unfortunately there's no way to logically invert dclk through
- * IO_POL register.
- * The only acceptable way to work, triple checked with scope,
- * is using clock phase set to 0° for Negative Edge and set to 240°
- * for Positive Edge.
- * On A33 and similar SoCs there would be a 90° phase option,
- * but it divides also dclk by 2.
- * Following code is a way to avoid quirks all around TCON
- * and DOTCLOCK drivers.
- */
- if (info->bus_flags & DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE)
- clk_set_phase(tcon->dclk, 240);
-
if (info->bus_flags & DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE)
- clk_set_phase(tcon->dclk, 0);
+ val |= SUN4I_TCON0_IO_POL_DCLK_DRIVE_NEGEDGE;
regmap_update_bits(tcon->regs, SUN4I_TCON0_IO_POL_REG,
SUN4I_TCON0_IO_POL_HSYNC_POSITIVE |
SUN4I_TCON0_IO_POL_VSYNC_POSITIVE |
+ SUN4I_TCON0_IO_POL_DCLK_DRIVE_NEGEDGE |
SUN4I_TCON0_IO_POL_DE_NEGATIVE,
val);
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.h b/drivers/gpu/drm/sun4i/sun4i_tcon.h
index cfbf4e6c1679..c5ac1b02482c 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.h
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.h
@@ -113,6 +113,7 @@
#define SUN4I_TCON0_IO_POL_REG 0x88
#define SUN4I_TCON0_IO_POL_DCLK_PHASE(phase) ((phase & 3) << 28)
#define SUN4I_TCON0_IO_POL_DE_NEGATIVE BIT(27)
+#define SUN4I_TCON0_IO_POL_DCLK_DRIVE_NEGEDGE BIT(26)
#define SUN4I_TCON0_IO_POL_HSYNC_POSITIVE BIT(25)
#define SUN4I_TCON0_IO_POL_VSYNC_POSITIVE BIT(24)
diff --git a/drivers/gpu/drm/sun4i/sun8i_csc.c b/drivers/gpu/drm/sun4i/sun8i_csc.c
index 781955dd4995..9bd62de0c288 100644
--- a/drivers/gpu/drm/sun4i/sun8i_csc.c
+++ b/drivers/gpu/drm/sun4i/sun8i_csc.c
@@ -46,33 +46,6 @@ static const u32 yuv2rgb[2][2][12] = {
},
};
-static const u32 yvu2rgb[2][2][12] = {
- [DRM_COLOR_YCBCR_LIMITED_RANGE] = {
- [DRM_COLOR_YCBCR_BT601] = {
- 0x000004A8, 0x00000662, 0x00000000, 0xFFFC8451,
- 0x000004A8, 0xFFFFFCC0, 0xFFFFFE6F, 0x00021E4D,
- 0x000004A8, 0x00000000, 0x00000811, 0xFFFBACA9,
- },
- [DRM_COLOR_YCBCR_BT709] = {
- 0x000004A8, 0x0000072B, 0x00000000, 0xFFFC1F99,
- 0x000004A8, 0xFFFFFDDF, 0xFFFFFF26, 0x00013383,
- 0x000004A8, 0x00000000, 0x00000873, 0xFFFB7BEF,
- }
- },
- [DRM_COLOR_YCBCR_FULL_RANGE] = {
- [DRM_COLOR_YCBCR_BT601] = {
- 0x00000400, 0x0000059B, 0x00000000, 0xFFFD322E,
- 0x00000400, 0xFFFFFD25, 0xFFFFFEA0, 0x00021DD5,
- 0x00000400, 0x00000000, 0x00000716, 0xFFFC74BD,
- },
- [DRM_COLOR_YCBCR_BT709] = {
- 0x00000400, 0x0000064C, 0x00000000, 0xFFFCD9B4,
- 0x00000400, 0xFFFFFE21, 0xFFFFFF41, 0x00014F96,
- 0x00000400, 0x00000000, 0x0000076C, 0xFFFC49EF,
- }
- },
-};
-
/*
* DE3 has a bit different CSC units. Factors are in two's complement format.
* First three factors in a row are multiplication factors which have 17 bits
@@ -96,7 +69,7 @@ static const u32 yvu2rgb[2][2][12] = {
* c20 c21 c22 [d2 const2]
*/
-static const u32 yuv2rgb_de3[2][2][12] = {
+static const u32 yuv2rgb_de3[2][3][12] = {
[DRM_COLOR_YCBCR_LIMITED_RANGE] = {
[DRM_COLOR_YCBCR_BT601] = {
0x0002542A, 0x00000000, 0x0003312A, 0xFFC00000,
@@ -107,6 +80,11 @@ static const u32 yuv2rgb_de3[2][2][12] = {
0x0002542A, 0x00000000, 0x000395E2, 0xFFC00000,
0x0002542A, 0xFFFF92D2, 0xFFFEEF27, 0xFE000000,
0x0002542A, 0x0004398C, 0x00000000, 0xFE000000,
+ },
+ [DRM_COLOR_YCBCR_BT2020] = {
+ 0x0002542A, 0x00000000, 0x00035B7B, 0xFFC00000,
+ 0x0002542A, 0xFFFFA017, 0xFFFEB2FC, 0xFE000000,
+ 0x0002542A, 0x00044896, 0x00000000, 0xFE000000,
}
},
[DRM_COLOR_YCBCR_FULL_RANGE] = {
@@ -119,33 +97,11 @@ static const u32 yuv2rgb_de3[2][2][12] = {
0x00020000, 0x00000000, 0x0003264C, 0x00000000,
0x00020000, 0xFFFFA018, 0xFFFF1053, 0xFE000000,
0x00020000, 0x0003B611, 0x00000000, 0xFE000000,
- }
- },
-};
-
-static const u32 yvu2rgb_de3[2][2][12] = {
- [DRM_COLOR_YCBCR_LIMITED_RANGE] = {
- [DRM_COLOR_YCBCR_BT601] = {
- 0x0002542A, 0x0003312A, 0x00000000, 0xFFC00000,
- 0x0002542A, 0xFFFE5FC3, 0xFFFF376B, 0xFE000000,
- 0x0002542A, 0x00000000, 0x000408D2, 0xFE000000,
},
- [DRM_COLOR_YCBCR_BT709] = {
- 0x0002542A, 0x000395E2, 0x00000000, 0xFFC00000,
- 0x0002542A, 0xFFFEEF27, 0xFFFF92D2, 0xFE000000,
- 0x0002542A, 0x00000000, 0x0004398C, 0xFE000000,
- }
- },
- [DRM_COLOR_YCBCR_FULL_RANGE] = {
- [DRM_COLOR_YCBCR_BT601] = {
- 0x00020000, 0x0002CDD2, 0x00000000, 0x00000000,
- 0x00020000, 0xFFFE925D, 0xFFFF4FCE, 0xFE000000,
- 0x00020000, 0x00000000, 0x00038B43, 0xFE000000,
- },
- [DRM_COLOR_YCBCR_BT709] = {
- 0x00020000, 0x0003264C, 0x00000000, 0x00000000,
- 0x00020000, 0xFFFF1053, 0xFFFFA018, 0xFE000000,
- 0x00020000, 0x00000000, 0x0003B611, 0xFE000000,
+ [DRM_COLOR_YCBCR_BT2020] = {
+ 0x00020000, 0x00000000, 0x0002F2FE, 0x00000000,
+ 0x00020000, 0xFFFFABC0, 0xFFFEDB78, 0xFE000000,
+ 0x00020000, 0x0003C346, 0x00000000, 0xFE000000,
}
},
};
@@ -157,21 +113,30 @@ static void sun8i_csc_set_coefficients(struct regmap *map, u32 base,
{
const u32 *table;
u32 base_reg;
+ int i;
+
+ table = yuv2rgb[range][encoding];
switch (mode) {
case SUN8I_CSC_MODE_YUV2RGB:
- table = yuv2rgb[range][encoding];
+ base_reg = SUN8I_CSC_COEFF(base, 0);
+ regmap_bulk_write(map, base_reg, table, 12);
break;
case SUN8I_CSC_MODE_YVU2RGB:
- table = yvu2rgb[range][encoding];
+ for (i = 0; i < 12; i++) {
+ if ((i & 3) == 1)
+ base_reg = SUN8I_CSC_COEFF(base, i + 1);
+ else if ((i & 3) == 2)
+ base_reg = SUN8I_CSC_COEFF(base, i - 1);
+ else
+ base_reg = SUN8I_CSC_COEFF(base, i);
+ regmap_write(map, base_reg, table[i]);
+ }
break;
default:
DRM_WARN("Wrong CSC mode specified.\n");
return;
}
-
- base_reg = SUN8I_CSC_COEFF(base, 0);
- regmap_bulk_write(map, base_reg, table, 12);
}
static void sun8i_de3_ccsc_set_coefficients(struct regmap *map, int layer,
@@ -180,22 +145,36 @@ static void sun8i_de3_ccsc_set_coefficients(struct regmap *map, int layer,
enum drm_color_range range)
{
const u32 *table;
- u32 base_reg;
+ u32 addr;
+ int i;
+
+ table = yuv2rgb_de3[range][encoding];
switch (mode) {
case SUN8I_CSC_MODE_YUV2RGB:
- table = yuv2rgb_de3[range][encoding];
+ addr = SUN50I_MIXER_BLEND_CSC_COEFF(DE3_BLD_BASE, layer, 0);
+ regmap_bulk_write(map, addr, table, 12);
break;
case SUN8I_CSC_MODE_YVU2RGB:
- table = yvu2rgb_de3[range][encoding];
+ for (i = 0; i < 12; i++) {
+ if ((i & 3) == 1)
+ addr = SUN50I_MIXER_BLEND_CSC_COEFF(DE3_BLD_BASE,
+ layer,
+ i + 1);
+ else if ((i & 3) == 2)
+ addr = SUN50I_MIXER_BLEND_CSC_COEFF(DE3_BLD_BASE,
+ layer,
+ i - 1);
+ else
+ addr = SUN50I_MIXER_BLEND_CSC_COEFF(DE3_BLD_BASE,
+ layer, i);
+ regmap_write(map, addr, table[i]);
+ }
break;
default:
DRM_WARN("Wrong CSC mode specified.\n");
return;
}
-
- base_reg = SUN50I_MIXER_BLEND_CSC_COEFF(DE3_BLD_BASE, layer, 0, 0);
- regmap_bulk_write(map, base_reg, table, 12);
}
static void sun8i_csc_enable(struct regmap *map, u32 base, bool enable)
diff --git a/drivers/gpu/drm/sun4i/sun8i_mixer.h b/drivers/gpu/drm/sun4i/sun8i_mixer.h
index 7576b523fdbb..145833a9d82d 100644
--- a/drivers/gpu/drm/sun4i/sun8i_mixer.h
+++ b/drivers/gpu/drm/sun4i/sun8i_mixer.h
@@ -50,10 +50,8 @@
#define SUN8I_MIXER_BLEND_CK_MIN(base, x) ((base) + 0xe0 + 0x04 * (x))
#define SUN8I_MIXER_BLEND_OUTCTL(base) ((base) + 0xfc)
#define SUN50I_MIXER_BLEND_CSC_CTL(base) ((base) + 0x100)
-#define SUN50I_MIXER_BLEND_CSC_COEFF(base, layer, x, y) \
- ((base) + 0x110 + (layer) * 0x30 + (x) * 0x10 + 4 * (y))
-#define SUN50I_MIXER_BLEND_CSC_CONST(base, layer, i) \
- ((base) + 0x110 + (layer) * 0x30 + (i) * 0x10 + 0x0c)
+#define SUN50I_MIXER_BLEND_CSC_COEFF(base, layer, x) \
+ ((base) + 0x110 + (layer) * 0x30 + (x) * 4)
#define SUN8I_MIXER_BLEND_PIPE_CTL_EN_MSK GENMASK(12, 8)
#define SUN8I_MIXER_BLEND_PIPE_CTL_EN(pipe) BIT(8 + pipe)
diff --git a/drivers/gpu/drm/sun4i/sun8i_vi_layer.c b/drivers/gpu/drm/sun4i/sun8i_vi_layer.c
index 76393fc976fe..8cc294a9969d 100644
--- a/drivers/gpu/drm/sun4i/sun8i_vi_layer.c
+++ b/drivers/gpu/drm/sun4i/sun8i_vi_layer.c
@@ -543,6 +543,8 @@ struct sun8i_vi_layer *sun8i_vi_layer_init_one(struct drm_device *drm,
supported_encodings = BIT(DRM_COLOR_YCBCR_BT601) |
BIT(DRM_COLOR_YCBCR_BT709);
+ if (mixer->cfg->is_de3)
+ supported_encodings |= BIT(DRM_COLOR_YCBCR_BT2020);
supported_ranges = BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
BIT(DRM_COLOR_YCBCR_FULL_RANGE);
diff --git a/drivers/gpu/drm/tdfx/tdfx_drv.c b/drivers/gpu/drm/tdfx/tdfx_drv.c
index ab699bf0ac5c..58c185c299f4 100644
--- a/drivers/gpu/drm/tdfx/tdfx_drv.c
+++ b/drivers/gpu/drm/tdfx/tdfx_drv.c
@@ -56,7 +56,7 @@ static const struct file_operations tdfx_driver_fops = {
.llseek = noop_llseek,
};
-static struct drm_driver driver = {
+static const struct drm_driver driver = {
.driver_features = DRIVER_LEGACY,
.fops = &tdfx_driver_fops,
.name = DRIVER_NAME,
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index 85dd7131553a..0ae3a025efe9 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -2186,7 +2186,7 @@ static int tegra_dc_runtime_resume(struct host1x_client *client)
struct device *dev = client->dev;
int err;
- err = pm_runtime_get_sync(dev);
+ err = pm_runtime_resume_and_get(dev);
if (err < 0) {
dev_err(dev, "failed to get runtime PM: %d\n", err);
return err;
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index e45c8414e2a3..e9ce7d6992d2 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -1301,8 +1301,10 @@ static const struct of_device_id host1x_drm_subdevs[] = {
{ .compatible = "nvidia,tegra30-hdmi", },
{ .compatible = "nvidia,tegra30-gr2d", },
{ .compatible = "nvidia,tegra30-gr3d", },
+ { .compatible = "nvidia,tegra114-dc", },
{ .compatible = "nvidia,tegra114-dsi", },
{ .compatible = "nvidia,tegra114-hdmi", },
+ { .compatible = "nvidia,tegra114-gr2d", },
{ .compatible = "nvidia,tegra114-gr3d", },
{ .compatible = "nvidia,tegra124-dc", },
{ .compatible = "nvidia,tegra124-sor", },
diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c
index 5691ef1b0e58..f46d377f0c30 100644
--- a/drivers/gpu/drm/tegra/dsi.c
+++ b/drivers/gpu/drm/tegra/dsi.c
@@ -1111,7 +1111,7 @@ static int tegra_dsi_runtime_resume(struct host1x_client *client)
struct device *dev = client->dev;
int err;
- err = pm_runtime_get_sync(dev);
+ err = pm_runtime_resume_and_get(dev);
if (err < 0) {
dev_err(dev, "failed to get runtime PM: %d\n", err);
return err;
diff --git a/drivers/gpu/drm/tegra/falcon.c b/drivers/gpu/drm/tegra/falcon.c
index 56edef06c48e..223ab2ceb7e6 100644
--- a/drivers/gpu/drm/tegra/falcon.c
+++ b/drivers/gpu/drm/tegra/falcon.c
@@ -72,7 +72,7 @@ static int falcon_parse_firmware_image(struct falcon *falcon)
struct falcon_fw_os_header_v1 *os;
/* endian problems would show up right here */
- if (bin->magic != PCI_VENDOR_ID_NVIDIA) {
+ if (bin->magic != PCI_VENDOR_ID_NVIDIA && bin->magic != 0x10fe) {
dev_err(falcon->dev, "incorrect firmware magic\n");
return -EINVAL;
}
@@ -178,9 +178,10 @@ int falcon_boot(struct falcon *falcon)
falcon->firmware.data.offset + offset,
offset, FALCON_MEMORY_DATA);
- /* copy the first code segment into Falcon internal memory */
- falcon_copy_chunk(falcon, falcon->firmware.code.offset,
- 0, FALCON_MEMORY_IMEM);
+ /* copy the code segment into Falcon internal memory */
+ for (offset = 0; offset < falcon->firmware.code.size; offset += 256)
+ falcon_copy_chunk(falcon, falcon->firmware.code.offset + offset,
+ offset, FALCON_MEMORY_IMEM);
/* setup falcon interrupts */
falcon_writel(falcon, FALCON_IRQMSET_EXT(0xff) |
diff --git a/drivers/gpu/drm/tegra/gr2d.c b/drivers/gpu/drm/tegra/gr2d.c
index 1a0d3ba6e525..adbe2ddcda19 100644
--- a/drivers/gpu/drm/tegra/gr2d.c
+++ b/drivers/gpu/drm/tegra/gr2d.c
@@ -161,9 +161,14 @@ static const struct gr2d_soc tegra30_gr2d_soc = {
.version = 0x30,
};
+static const struct gr2d_soc tegra114_gr2d_soc = {
+ .version = 0x35,
+};
+
static const struct of_device_id gr2d_match[] = {
- { .compatible = "nvidia,tegra30-gr2d", .data = &tegra20_gr2d_soc },
- { .compatible = "nvidia,tegra20-gr2d", .data = &tegra30_gr2d_soc },
+ { .compatible = "nvidia,tegra114-gr2d", .data = &tegra114_gr2d_soc },
+ { .compatible = "nvidia,tegra30-gr2d", .data = &tegra30_gr2d_soc },
+ { .compatible = "nvidia,tegra20-gr2d", .data = &tegra20_gr2d_soc },
{ },
};
MODULE_DEVICE_TABLE(of, gr2d_match);
diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c
index d09a24931c87..e5d2a4026028 100644
--- a/drivers/gpu/drm/tegra/hdmi.c
+++ b/drivers/gpu/drm/tegra/hdmi.c
@@ -1510,7 +1510,7 @@ static int tegra_hdmi_runtime_resume(struct host1x_client *client)
struct device *dev = client->dev;
int err;
- err = pm_runtime_get_sync(dev);
+ err = pm_runtime_resume_and_get(dev);
if (err < 0) {
dev_err(dev, "failed to get runtime PM: %d\n", err);
return err;
diff --git a/drivers/gpu/drm/tegra/hub.c b/drivers/gpu/drm/tegra/hub.c
index 22a03f7ffdc1..5ce771cba133 100644
--- a/drivers/gpu/drm/tegra/hub.c
+++ b/drivers/gpu/drm/tegra/hub.c
@@ -789,7 +789,7 @@ static int tegra_display_hub_runtime_resume(struct host1x_client *client)
unsigned int i;
int err;
- err = pm_runtime_get_sync(dev);
+ err = pm_runtime_resume_and_get(dev);
if (err < 0) {
dev_err(dev, "failed to get runtime PM: %d\n", err);
return err;
diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c
index cc2aa2308a51..f02a035dda45 100644
--- a/drivers/gpu/drm/tegra/sor.c
+++ b/drivers/gpu/drm/tegra/sor.c
@@ -3218,7 +3218,7 @@ static int tegra_sor_runtime_resume(struct host1x_client *client)
struct device *dev = client->dev;
int err;
- err = pm_runtime_get_sync(dev);
+ err = pm_runtime_resume_and_get(dev);
if (err < 0) {
dev_err(dev, "failed to get runtime PM: %d\n", err);
return err;
diff --git a/drivers/gpu/drm/tegra/vic.c b/drivers/gpu/drm/tegra/vic.c
index ade56b860cf9..77e128832920 100644
--- a/drivers/gpu/drm/tegra/vic.c
+++ b/drivers/gpu/drm/tegra/vic.c
@@ -117,7 +117,19 @@ static int vic_boot(struct vic *vic)
if (spec->num_ids > 0) {
value = spec->ids[0] & 0xffff;
+ /*
+ * STREAMID0 is used for input/output buffers.
+ * Initialize it to SID_VIC in case context isolation
+ * is not enabled, and SID_VIC is used for both firmware
+ * and data buffers.
+ *
+ * If context isolation is enabled, it will be
+ * overridden by the SETSTREAMID opcode as part of
+ * each job.
+ */
vic_writel(vic, value, VIC_THI_STREAMID0);
+
+ /* STREAMID1 is used for firmware loading. */
vic_writel(vic, value, VIC_THI_STREAMID1);
}
}
@@ -135,16 +147,21 @@ static int vic_boot(struct vic *vic)
hdr = vic->falcon.firmware.virt;
fce_bin_data_offset = *(u32 *)(hdr + VIC_UCODE_FCE_DATA_OFFSET);
- hdr = vic->falcon.firmware.virt +
- *(u32 *)(hdr + VIC_UCODE_FCE_HEADER_OFFSET);
- fce_ucode_size = *(u32 *)(hdr + FCE_UCODE_SIZE_OFFSET);
falcon_execute_method(&vic->falcon, VIC_SET_APPLICATION_ID, 1);
- falcon_execute_method(&vic->falcon, VIC_SET_FCE_UCODE_SIZE,
- fce_ucode_size);
- falcon_execute_method(&vic->falcon, VIC_SET_FCE_UCODE_OFFSET,
- (vic->falcon.firmware.iova + fce_bin_data_offset)
- >> 8);
+
+ /* Old VIC firmware needs kernel help with setting up FCE microcode. */
+ if (fce_bin_data_offset != 0x0 && fce_bin_data_offset != 0xa5a5a5a5) {
+ hdr = vic->falcon.firmware.virt +
+ *(u32 *)(hdr + VIC_UCODE_FCE_HEADER_OFFSET);
+ fce_ucode_size = *(u32 *)(hdr + FCE_UCODE_SIZE_OFFSET);
+
+ falcon_execute_method(&vic->falcon, VIC_SET_FCE_UCODE_SIZE,
+ fce_ucode_size);
+ falcon_execute_method(
+ &vic->falcon, VIC_SET_FCE_UCODE_OFFSET,
+ (vic->falcon.firmware.iova + fce_bin_data_offset) >> 8);
+ }
err = falcon_wait_idle(&vic->falcon);
if (err < 0) {
@@ -314,7 +331,7 @@ static int vic_open_channel(struct tegra_drm_client *client,
struct vic *vic = to_vic(client);
int err;
- err = pm_runtime_get_sync(vic->dev);
+ err = pm_runtime_resume_and_get(vic->dev);
if (err < 0)
return err;
diff --git a/drivers/gpu/drm/tiny/cirrus.c b/drivers/gpu/drm/tiny/cirrus.c
index 561c49d8657a..a043e602199e 100644
--- a/drivers/gpu/drm/tiny/cirrus.c
+++ b/drivers/gpu/drm/tiny/cirrus.c
@@ -602,7 +602,6 @@ static int cirrus_pci_probe(struct pci_dev *pdev,
drm_mode_config_reset(dev);
- dev->pdev = pdev;
pci_set_drvdata(pdev, dev);
ret = drm_dev_register(dev, 0);
if (ret)
diff --git a/drivers/gpu/drm/ttm/ttm_agp_backend.c b/drivers/gpu/drm/ttm/ttm_agp_backend.c
index 03c86628e4ac..8f9fa4188897 100644
--- a/drivers/gpu/drm/ttm/ttm_agp_backend.c
+++ b/drivers/gpu/drm/ttm/ttm_agp_backend.c
@@ -32,7 +32,6 @@
#define pr_fmt(fmt) "[TTM] " fmt
-#include <drm/ttm/ttm_module.h>
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_placement.h>
#include <linux/agp_backend.h>
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 9a03c7834b1e..b65f4b12f986 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -31,7 +31,6 @@
#define pr_fmt(fmt) "[TTM] " fmt
-#include <drm/ttm/ttm_module.h>
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_placement.h>
#include <linux/jiffies.h>
@@ -43,6 +42,8 @@
#include <linux/atomic.h>
#include <linux/dma-resv.h>
+#include "ttm_module.h"
+
static void ttm_bo_global_kobj_release(struct kobject *kobj);
/*
@@ -71,9 +72,9 @@ static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
struct ttm_resource_manager *man;
int i, mem_type;
- drm_printf(&p, "No space for %p (%lu pages, %luK, %luM)\n",
- bo, bo->mem.num_pages, bo->mem.size >> 10,
- bo->mem.size >> 20);
+ drm_printf(&p, "No space for %p (%lu pages, %zuK, %zuM)\n",
+ bo, bo->mem.num_pages, bo->base.size >> 10,
+ bo->base.size >> 20);
for (i = 0; i < placement->num_placement; i++) {
mem_type = placement->placement[i].mem_type;
drm_printf(&p, " placement[%d]=0x%08X (%d)\n",
@@ -109,40 +110,14 @@ static struct kobj_type ttm_bo_glob_kobj_type = {
.default_attrs = ttm_bo_global_attrs
};
-static void ttm_bo_add_mem_to_lru(struct ttm_buffer_object *bo,
- struct ttm_resource *mem)
-{
- struct ttm_bo_device *bdev = bo->bdev;
- struct ttm_resource_manager *man;
-
- if (!list_empty(&bo->lru) || bo->pin_count)
- return;
-
- man = ttm_manager_type(bdev, mem->mem_type);
- list_add_tail(&bo->lru, &man->lru[bo->priority]);
-
- if (man->use_tt && bo->ttm &&
- !(bo->ttm->page_flags & (TTM_PAGE_FLAG_SG |
- TTM_PAGE_FLAG_SWAPPED))) {
- list_add_tail(&bo->swap, &ttm_bo_glob.swap_lru[bo->priority]);
- }
-}
-
static void ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
{
struct ttm_bo_device *bdev = bo->bdev;
- bool notify = false;
- if (!list_empty(&bo->swap)) {
- list_del_init(&bo->swap);
- notify = true;
- }
- if (!list_empty(&bo->lru)) {
- list_del_init(&bo->lru);
- notify = true;
- }
+ list_del_init(&bo->swap);
+ list_del_init(&bo->lru);
- if (notify && bdev->driver->del_from_lru_notify)
+ if (bdev->driver->del_from_lru_notify)
bdev->driver->del_from_lru_notify(bo);
}
@@ -155,12 +130,32 @@ static void ttm_bo_bulk_move_set_pos(struct ttm_lru_bulk_move_pos *pos,
}
void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo,
+ struct ttm_resource *mem,
struct ttm_lru_bulk_move *bulk)
{
+ struct ttm_bo_device *bdev = bo->bdev;
+ struct ttm_resource_manager *man;
+
dma_resv_assert_held(bo->base.resv);
- ttm_bo_del_from_lru(bo);
- ttm_bo_add_mem_to_lru(bo, &bo->mem);
+ if (bo->pin_count) {
+ ttm_bo_del_from_lru(bo);
+ return;
+ }
+
+ man = ttm_manager_type(bdev, mem->mem_type);
+ list_move_tail(&bo->lru, &man->lru[bo->priority]);
+ if (man->use_tt && bo->ttm &&
+ !(bo->ttm->page_flags & (TTM_PAGE_FLAG_SG |
+ TTM_PAGE_FLAG_SWAPPED))) {
+ struct list_head *swap;
+
+ swap = &ttm_bo_glob.swap_lru[bo->priority];
+ list_move_tail(&bo->swap, swap);
+ }
+
+ if (bdev->driver->del_from_lru_notify)
+ bdev->driver->del_from_lru_notify(bo);
if (bulk && !bo->pin_count) {
switch (bo->mem.mem_type) {
@@ -267,7 +262,7 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
goto out_err;
}
- ctx->bytes_moved += bo->num_pages << PAGE_SHIFT;
+ ctx->bytes_moved += bo->base.size;
return 0;
out_err:
@@ -514,10 +509,9 @@ static void ttm_bo_release(struct kref *kref)
* shrinkers, now that they are queued for
* destruction.
*/
- if (bo->pin_count) {
+ if (WARN_ON(bo->pin_count)) {
bo->pin_count = 0;
- ttm_bo_del_from_lru(bo);
- ttm_bo_add_mem_to_lru(bo, &bo->mem);
+ ttm_bo_move_to_lru_tail(bo, &bo->mem, NULL);
}
kref_init(&bo->kref);
@@ -859,8 +853,7 @@ static int ttm_bo_mem_placement(struct ttm_buffer_object *bo,
mem->placement = place->flags;
spin_lock(&ttm_bo_glob.lru_lock);
- ttm_bo_del_from_lru(bo);
- ttm_bo_add_mem_to_lru(bo, mem);
+ ttm_bo_move_to_lru_tail(bo, mem, NULL);
spin_unlock(&ttm_bo_glob.lru_lock);
return 0;
@@ -937,9 +930,8 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
}
error:
- if (bo->mem.mem_type == TTM_PL_SYSTEM && !list_empty(&bo->lru)) {
+ if (bo->mem.mem_type == TTM_PL_SYSTEM && !bo->pin_count)
ttm_bo_move_to_lru_tail_unlocked(bo);
- }
return ret;
}
@@ -984,8 +976,7 @@ static int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
memset(&hop, 0, sizeof(hop));
- mem.num_pages = bo->num_pages;
- mem.size = mem.num_pages << PAGE_SHIFT;
+ mem.num_pages = PAGE_ALIGN(bo->base.size) >> PAGE_SHIFT;
mem.page_alignment = bo->mem.page_alignment;
mem.bus.offset = 0;
mem.bus.addr = NULL;
@@ -1101,7 +1092,7 @@ EXPORT_SYMBOL(ttm_bo_validate);
int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
struct ttm_buffer_object *bo,
- unsigned long size,
+ size_t size,
enum ttm_bo_type type,
struct ttm_placement *placement,
uint32_t page_alignment,
@@ -1112,9 +1103,8 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
void (*destroy) (struct ttm_buffer_object *))
{
struct ttm_mem_global *mem_glob = &ttm_mem_glob;
- int ret = 0;
- unsigned long num_pages;
bool locked;
+ int ret = 0;
ret = ttm_mem_global_alloc(mem_glob, acc_size, ctx);
if (ret) {
@@ -1126,16 +1116,6 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
return -ENOMEM;
}
- num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
- if (num_pages == 0) {
- pr_err("Illegal buffer object size\n");
- if (destroy)
- (*destroy)(bo);
- else
- kfree(bo);
- ttm_mem_global_free(mem_glob, acc_size);
- return -EINVAL;
- }
bo->destroy = destroy ? destroy : ttm_bo_default_destroy;
kref_init(&bo->kref);
@@ -1144,10 +1124,8 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
INIT_LIST_HEAD(&bo->swap);
bo->bdev = bdev;
bo->type = type;
- bo->num_pages = num_pages;
- bo->mem.size = num_pages << PAGE_SHIFT;
bo->mem.mem_type = TTM_PL_SYSTEM;
- bo->mem.num_pages = bo->num_pages;
+ bo->mem.num_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
bo->mem.mm_node = NULL;
bo->mem.page_alignment = page_alignment;
bo->mem.bus.offset = 0;
@@ -1165,9 +1143,10 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
}
if (!ttm_bo_uses_embedded_gem_object(bo)) {
/*
- * bo.gem is not initialized, so we have to setup the
+ * bo.base is not initialized, so we have to setup the
* struct elements we want use regardless.
*/
+ bo->base.size = size;
dma_resv_init(&bo->base._resv);
drm_vma_node_reset(&bo->base.vma_node);
}
@@ -1209,7 +1188,7 @@ EXPORT_SYMBOL(ttm_bo_init_reserved);
int ttm_bo_init(struct ttm_bo_device *bdev,
struct ttm_buffer_object *bo,
- unsigned long size,
+ size_t size,
enum ttm_bo_type type,
struct ttm_placement *placement,
uint32_t page_alignment,
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 7ccb2295cac1..398d5013fc39 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -310,7 +310,7 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
kref_init(&fbo->base.kref);
fbo->base.destroy = &ttm_transfered_destroy;
fbo->base.acc_size = 0;
- fbo->base.pin_count = 1;
+ fbo->base.pin_count = 0;
if (bo->type != ttm_bo_type_sg)
fbo->base.base.resv = &fbo->base.base._resv;
@@ -319,6 +319,8 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
ret = dma_resv_trylock(&fbo->base.base._resv);
WARN_ON(!ret);
+ ttm_bo_move_to_lru_tail_unlocked(&fbo->base);
+
*new_obj = &fbo->base;
return 0;
}
@@ -429,9 +431,9 @@ int ttm_bo_kmap(struct ttm_buffer_object *bo,
map->virtual = NULL;
map->bo = bo;
- if (num_pages > bo->num_pages)
+ if (num_pages > bo->mem.num_pages)
return -EINVAL;
- if (start_page > bo->num_pages)
+ if ((start_page + num_pages) > bo->mem.num_pages)
return -EINVAL;
ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
@@ -483,14 +485,14 @@ int ttm_bo_vmap(struct ttm_buffer_object *bo, struct dma_buf_map *map)
if (mem->bus.is_iomem) {
void __iomem *vaddr_iomem;
- size_t size = bo->num_pages << PAGE_SHIFT;
if (mem->bus.addr)
vaddr_iomem = (void __iomem *)mem->bus.addr;
else if (mem->bus.caching == ttm_write_combined)
- vaddr_iomem = ioremap_wc(mem->bus.offset, size);
+ vaddr_iomem = ioremap_wc(mem->bus.offset,
+ bo->base.size);
else
- vaddr_iomem = ioremap(mem->bus.offset, size);
+ vaddr_iomem = ioremap(mem->bus.offset, bo->base.size);
if (!vaddr_iomem)
return -ENOMEM;
@@ -515,7 +517,7 @@ int ttm_bo_vmap(struct ttm_buffer_object *bo, struct dma_buf_map *map)
* or to make the buffer object look contiguous.
*/
prot = ttm_io_prot(bo, mem, PAGE_KERNEL);
- vaddr = vmap(ttm->pages, bo->num_pages, 0, prot);
+ vaddr = vmap(ttm->pages, ttm->num_pages, 0, prot);
if (!vaddr)
return -ENOMEM;
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 2944fa0af493..6dc96cf66744 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -31,7 +31,6 @@
#define pr_fmt(fmt) "[TTM] " fmt
-#include <drm/ttm/ttm_module.h>
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_placement.h>
#include <drm/drm_vma_manager.h>
@@ -199,7 +198,7 @@ static vm_fault_t ttm_bo_vm_insert_huge(struct vm_fault *vmf,
/* Fault should not cross bo boundary. */
page_offset &= ~(fault_page_size - 1);
- if (page_offset + fault_page_size > bo->num_pages)
+ if (page_offset + fault_page_size > bo->mem.num_pages)
goto out_fallback;
if (bo->mem.bus.is_iomem)
@@ -307,7 +306,7 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
page_last = vma_pages(vma) + vma->vm_pgoff -
drm_vma_node_start(&bo->base.vma_node);
- if (unlikely(page_offset >= bo->num_pages))
+ if (unlikely(page_offset >= bo->mem.num_pages))
return VM_FAULT_SIGBUS;
prot = ttm_io_prot(bo, &bo->mem, prot);
@@ -470,7 +469,7 @@ int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr,
<< PAGE_SHIFT);
int ret;
- if (len < 1 || (offset + len) >> PAGE_SHIFT > bo->num_pages)
+ if (len < 1 || (offset + len) >> PAGE_SHIFT > bo->mem.num_pages)
return -EIO;
ret = ttm_bo_reserve(bo, true, false, NULL);
diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
index 8a8f1a6a83a6..9fa36ed59429 100644
--- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c
+++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
@@ -55,7 +55,7 @@ void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
list_for_each_entry(entry, list, head) {
struct ttm_buffer_object *bo = entry->bo;
- ttm_bo_move_to_lru_tail(bo, NULL);
+ ttm_bo_move_to_lru_tail(bo, &bo->mem, NULL);
dma_resv_unlock(bo->base.resv);
}
spin_unlock(&ttm_bo_glob.lru_lock);
@@ -162,7 +162,7 @@ void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
dma_resv_add_shared_fence(bo->base.resv, fence);
else
dma_resv_add_excl_fence(bo->base.resv, fence);
- ttm_bo_move_to_lru_tail(bo, NULL);
+ ttm_bo_move_to_lru_tail(bo, &bo->mem, NULL);
dma_resv_unlock(bo->base.resv);
}
spin_unlock(&ttm_bo_glob.lru_lock);
diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
index 5ed1fc8f2ace..a3bfbd9cea68 100644
--- a/drivers/gpu/drm/ttm/ttm_memory.c
+++ b/drivers/gpu/drm/ttm/ttm_memory.c
@@ -29,7 +29,6 @@
#define pr_fmt(fmt) "[TTM] " fmt
#include <drm/ttm/ttm_memory.h>
-#include <drm/ttm/ttm_module.h>
#include <linux/spinlock.h>
#include <linux/sched.h>
#include <linux/wait.h>
@@ -39,6 +38,8 @@
#include <linux/swap.h>
#include <drm/ttm/ttm_pool.h>
+#include "ttm_module.h"
+
#define TTM_MEMORY_ALLOC_RETRIES 4
struct ttm_mem_global ttm_mem_glob;
diff --git a/drivers/gpu/drm/ttm/ttm_module.c b/drivers/gpu/drm/ttm/ttm_module.c
index 6ff40c041d79..c0906437cb1c 100644
--- a/drivers/gpu/drm/ttm/ttm_module.c
+++ b/drivers/gpu/drm/ttm/ttm_module.c
@@ -32,9 +32,10 @@
#include <linux/module.h>
#include <linux/device.h>
#include <linux/sched.h>
-#include <drm/ttm/ttm_module.h>
#include <drm/drm_sysfs.h>
+#include "ttm_module.h"
+
static DECLARE_WAIT_QUEUE_HEAD(exit_q);
static atomic_t device_released;
diff --git a/include/drm/ttm/ttm_module.h b/drivers/gpu/drm/ttm/ttm_module.h
index 45fa318c1585..45fa318c1585 100644
--- a/include/drm/ttm/ttm_module.h
+++ b/drivers/gpu/drm/ttm/ttm_module.h
diff --git a/drivers/gpu/drm/ttm/ttm_pool.c b/drivers/gpu/drm/ttm/ttm_pool.c
index 7b2f60616750..11e0313db0ea 100644
--- a/drivers/gpu/drm/ttm/ttm_pool.c
+++ b/drivers/gpu/drm/ttm/ttm_pool.c
@@ -66,7 +66,7 @@ static struct ttm_pool_type global_uncached[MAX_ORDER];
static struct ttm_pool_type global_dma32_write_combined[MAX_ORDER];
static struct ttm_pool_type global_dma32_uncached[MAX_ORDER];
-static spinlock_t shrinker_lock;
+static struct mutex shrinker_lock;
static struct list_head shrinker_list;
static struct shrinker mm_shrinker;
@@ -79,12 +79,13 @@ static struct page *ttm_pool_alloc_page(struct ttm_pool *pool, gfp_t gfp_flags,
struct page *p;
void *vaddr;
- if (order) {
- gfp_flags |= GFP_TRANSHUGE_LIGHT | __GFP_NORETRY |
+ /* Don't set the __GFP_COMP flag for higher order allocations.
+ * Mapping pages directly into an userspace process and calling
+ * put_page() on a TTM allocated page is illegal.
+ */
+ if (order)
+ gfp_flags |= __GFP_NOMEMALLOC | __GFP_NORETRY |
__GFP_KSWAPD_RECLAIM;
- gfp_flags &= ~__GFP_MOVABLE;
- gfp_flags &= ~__GFP_COMP;
- }
if (!pool->use_dma_alloc) {
p = alloc_pages(gfp_flags, order);
@@ -190,7 +191,7 @@ static int ttm_pool_map(struct ttm_pool *pool, unsigned int order,
size_t size = (1ULL << order) * PAGE_SIZE;
addr = dma_map_page(pool->dev, p, 0, size, DMA_BIDIRECTIONAL);
- if (dma_mapping_error(pool->dev, **dma_addr))
+ if (dma_mapping_error(pool->dev, addr))
return -EFAULT;
}
@@ -249,9 +250,9 @@ static void ttm_pool_type_init(struct ttm_pool_type *pt, struct ttm_pool *pool,
spin_lock_init(&pt->lock);
INIT_LIST_HEAD(&pt->pages);
- spin_lock(&shrinker_lock);
+ mutex_lock(&shrinker_lock);
list_add_tail(&pt->shrinker_list, &shrinker_list);
- spin_unlock(&shrinker_lock);
+ mutex_unlock(&shrinker_lock);
}
/* Remove a pool_type from the global shrinker list and free all pages */
@@ -259,9 +260,9 @@ static void ttm_pool_type_fini(struct ttm_pool_type *pt)
{
struct page *p, *tmp;
- spin_lock(&shrinker_lock);
+ mutex_lock(&shrinker_lock);
list_del(&pt->shrinker_list);
- spin_unlock(&shrinker_lock);
+ mutex_unlock(&shrinker_lock);
list_for_each_entry_safe(p, tmp, &pt->pages, lru)
ttm_pool_free_page(pt->pool, pt->caching, pt->order, p);
@@ -302,7 +303,7 @@ static unsigned int ttm_pool_shrink(void)
unsigned int num_freed;
struct page *p;
- spin_lock(&shrinker_lock);
+ mutex_lock(&shrinker_lock);
pt = list_first_entry(&shrinker_list, typeof(*pt), shrinker_list);
p = ttm_pool_type_take(pt);
@@ -314,7 +315,7 @@ static unsigned int ttm_pool_shrink(void)
}
list_move_tail(&pt->shrinker_list, &shrinker_list);
- spin_unlock(&shrinker_lock);
+ mutex_unlock(&shrinker_lock);
return num_freed;
}
@@ -507,7 +508,6 @@ void ttm_pool_init(struct ttm_pool *pool, struct device *dev,
ttm_pool_type_init(&pool->caching[i].orders[j],
pool, i, j);
}
-EXPORT_SYMBOL(ttm_pool_init);
/**
* ttm_pool_fini - Cleanup a pool
@@ -525,7 +525,6 @@ void ttm_pool_fini(struct ttm_pool *pool)
for (j = 0; j < MAX_ORDER; ++j)
ttm_pool_type_fini(&pool->caching[i].orders[j]);
}
-EXPORT_SYMBOL(ttm_pool_fini);
#ifdef CONFIG_DEBUG_FS
/* Count the number of pages available in a pool_type */
@@ -566,7 +565,7 @@ int ttm_pool_debugfs(struct ttm_pool *pool, struct seq_file *m)
{
unsigned int i;
- spin_lock(&shrinker_lock);
+ mutex_lock(&shrinker_lock);
seq_puts(m, "\t ");
for (i = 0; i < MAX_ORDER; ++i)
@@ -602,7 +601,7 @@ int ttm_pool_debugfs(struct ttm_pool *pool, struct seq_file *m)
seq_printf(m, "\ntotal\t: %8lu of %8lu\n",
atomic_long_read(&allocated_pages), page_pool_size);
- spin_unlock(&shrinker_lock);
+ mutex_unlock(&shrinker_lock);
return 0;
}
@@ -646,7 +645,7 @@ int ttm_pool_mgr_init(unsigned long num_pages)
if (!page_pool_size)
page_pool_size = num_pages;
- spin_lock_init(&shrinker_lock);
+ mutex_init(&shrinker_lock);
INIT_LIST_HEAD(&shrinker_list);
for (i = 0; i < MAX_ORDER; ++i) {
diff --git a/drivers/gpu/drm/ttm/ttm_range_manager.c b/drivers/gpu/drm/ttm/ttm_range_manager.c
index e0952444cea9..a39305f742da 100644
--- a/drivers/gpu/drm/ttm/ttm_range_manager.c
+++ b/drivers/gpu/drm/ttm/ttm_range_manager.c
@@ -29,7 +29,6 @@
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
*/
-#include <drm/ttm/ttm_module.h>
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_placement.h>
#include <drm/drm_mm.h>
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index da9eeffe0c6d..7f75a13163f0 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -129,7 +129,7 @@ static void ttm_tt_init_fields(struct ttm_tt *ttm,
uint32_t page_flags,
enum ttm_caching caching)
{
- ttm->num_pages = bo->num_pages;
+ ttm->num_pages = PAGE_ALIGN(bo->base.size) >> PAGE_SHIFT;
ttm->caching = ttm_cached;
ttm->page_flags = page_flags;
ttm->dma_address = NULL;
@@ -162,19 +162,6 @@ void ttm_tt_fini(struct ttm_tt *ttm)
}
EXPORT_SYMBOL(ttm_tt_fini);
-int ttm_dma_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo,
- uint32_t page_flags, enum ttm_caching caching)
-{
- ttm_tt_init_fields(ttm, bo, page_flags, caching);
-
- if (ttm_dma_tt_alloc_page_directory(ttm)) {
- pr_err("Failed allocating page table\n");
- return -ENOMEM;
- }
- return 0;
-}
-EXPORT_SYMBOL(ttm_dma_tt_init);
-
int ttm_sg_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo,
uint32_t page_flags, enum ttm_caching caching)
{
diff --git a/drivers/gpu/drm/tve200/tve200_display.c b/drivers/gpu/drm/tve200/tve200_display.c
index 17ff24d999d1..cb0e837d3dba 100644
--- a/drivers/gpu/drm/tve200/tve200_display.c
+++ b/drivers/gpu/drm/tve200/tve200_display.c
@@ -11,7 +11,6 @@
*/
#include <linux/clk.h>
-#include <linux/version.h>
#include <linux/dma-buf.h>
#include <linux/of_graph.h>
#include <linux/delay.h>
diff --git a/drivers/gpu/drm/tve200/tve200_drv.c b/drivers/gpu/drm/tve200/tve200_drv.c
index 07140e0b90a3..7fa71c8bb828 100644
--- a/drivers/gpu/drm/tve200/tve200_drv.c
+++ b/drivers/gpu/drm/tve200/tve200_drv.c
@@ -35,7 +35,6 @@
#include <linux/platform_device.h>
#include <linux/shmem_fs.h>
#include <linux/slab.h>
-#include <linux/version.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
diff --git a/drivers/gpu/drm/v3d/v3d_drv.c b/drivers/gpu/drm/v3d/v3d_drv.c
index 42d401fd244e..99e22beea90b 100644
--- a/drivers/gpu/drm/v3d/v3d_drv.c
+++ b/drivers/gpu/drm/v3d/v3d_drv.c
@@ -232,8 +232,8 @@ static int v3d_platform_drm_probe(struct platform_device *pdev)
return ret;
mmu_debug = V3D_READ(V3D_MMU_DEBUG_INFO);
- dev->coherent_dma_mask =
- DMA_BIT_MASK(30 + V3D_GET_FIELD(mmu_debug, V3D_MMU_PA_WIDTH));
+ dma_set_mask_and_coherent(dev,
+ DMA_BIT_MASK(30 + V3D_GET_FIELD(mmu_debug, V3D_MMU_PA_WIDTH)));
v3d->va_width = 30 + V3D_GET_FIELD(mmu_debug, V3D_MMU_VA_WIDTH);
ident1 = V3D_READ(V3D_HUB_IDENT1);
diff --git a/drivers/gpu/drm/v3d/v3d_irq.c b/drivers/gpu/drm/v3d/v3d_irq.c
index c88686489b88..e714d5318f30 100644
--- a/drivers/gpu/drm/v3d/v3d_irq.c
+++ b/drivers/gpu/drm/v3d/v3d_irq.c
@@ -178,10 +178,7 @@ v3d_hub_irq(int irq, void *arg)
};
const char *client = "?";
- V3D_WRITE(V3D_MMU_CTL,
- V3D_READ(V3D_MMU_CTL) & (V3D_MMU_CTL_CAP_EXCEEDED |
- V3D_MMU_CTL_PT_INVALID |
- V3D_MMU_CTL_WRITE_VIOLATION));
+ V3D_WRITE(V3D_MMU_CTL, V3D_READ(V3D_MMU_CTL));
if (v3d->ver >= 41) {
axi_id = axi_id >> 5;
@@ -217,7 +214,7 @@ v3d_irq_init(struct v3d_dev *v3d)
V3D_CORE_WRITE(core, V3D_CTL_INT_CLR, V3D_CORE_IRQS);
V3D_WRITE(V3D_HUB_INT_CLR, V3D_HUB_IRQS);
- irq1 = platform_get_irq(v3d_to_pdev(v3d), 1);
+ irq1 = platform_get_irq_optional(v3d_to_pdev(v3d), 1);
if (irq1 == -EPROBE_DEFER)
return irq1;
if (irq1 > 0) {
diff --git a/drivers/gpu/drm/vboxvideo/vbox_drv.c b/drivers/gpu/drm/vboxvideo/vbox_drv.c
index f3eac72cb46e..e534896b6cfd 100644
--- a/drivers/gpu/drm/vboxvideo/vbox_drv.c
+++ b/drivers/gpu/drm/vboxvideo/vbox_drv.c
@@ -51,7 +51,6 @@ static int vbox_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (IS_ERR(vbox))
return PTR_ERR(vbox);
- vbox->ddev.pdev = pdev;
pci_set_drvdata(pdev, vbox);
mutex_init(&vbox->hw_mutex);
@@ -109,15 +108,16 @@ static void vbox_pci_remove(struct pci_dev *pdev)
static int vbox_pm_suspend(struct device *dev)
{
struct vbox_private *vbox = dev_get_drvdata(dev);
+ struct pci_dev *pdev = to_pci_dev(dev);
int error;
error = drm_mode_config_helper_suspend(&vbox->ddev);
if (error)
return error;
- pci_save_state(vbox->ddev.pdev);
- pci_disable_device(vbox->ddev.pdev);
- pci_set_power_state(vbox->ddev.pdev, PCI_D3hot);
+ pci_save_state(pdev);
+ pci_disable_device(pdev);
+ pci_set_power_state(pdev, PCI_D3hot);
return 0;
}
@@ -125,8 +125,9 @@ static int vbox_pm_suspend(struct device *dev)
static int vbox_pm_resume(struct device *dev)
{
struct vbox_private *vbox = dev_get_drvdata(dev);
+ struct pci_dev *pdev = to_pci_dev(dev);
- if (pci_enable_device(vbox->ddev.pdev))
+ if (pci_enable_device(pdev))
return -EIO;
return drm_mode_config_helper_resume(&vbox->ddev);
diff --git a/drivers/gpu/drm/vboxvideo/vbox_irq.c b/drivers/gpu/drm/vboxvideo/vbox_irq.c
index 631657fa554f..b3ded68603ba 100644
--- a/drivers/gpu/drm/vboxvideo/vbox_irq.c
+++ b/drivers/gpu/drm/vboxvideo/vbox_irq.c
@@ -170,10 +170,12 @@ static void vbox_hotplug_worker(struct work_struct *work)
int vbox_irq_init(struct vbox_private *vbox)
{
+ struct pci_dev *pdev = to_pci_dev(vbox->ddev.dev);
+
INIT_WORK(&vbox->hotplug_work, vbox_hotplug_worker);
vbox_update_mode_hints(vbox);
- return drm_irq_install(&vbox->ddev, vbox->ddev.pdev->irq);
+ return drm_irq_install(&vbox->ddev, pdev->irq);
}
void vbox_irq_fini(struct vbox_private *vbox)
diff --git a/drivers/gpu/drm/vboxvideo/vbox_main.c b/drivers/gpu/drm/vboxvideo/vbox_main.c
index d68d9bad7674..f28779715ccd 100644
--- a/drivers/gpu/drm/vboxvideo/vbox_main.c
+++ b/drivers/gpu/drm/vboxvideo/vbox_main.c
@@ -8,7 +8,9 @@
* Hans de Goede <[email protected]>
*/
+#include <linux/pci.h>
#include <linux/vbox_err.h>
+
#include <drm/drm_fb_helper.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_damage_helper.h>
@@ -30,6 +32,7 @@ void vbox_report_caps(struct vbox_private *vbox)
static int vbox_accel_init(struct vbox_private *vbox)
{
+ struct pci_dev *pdev = to_pci_dev(vbox->ddev.dev);
struct vbva_buffer *vbva;
unsigned int i;
@@ -41,7 +44,7 @@ static int vbox_accel_init(struct vbox_private *vbox)
/* Take a command buffer for each screen from the end of usable VRAM. */
vbox->available_vram_size -= vbox->num_crtcs * VBVA_MIN_BUFFER_SIZE;
- vbox->vbva_buffers = pci_iomap_range(vbox->ddev.pdev, 0,
+ vbox->vbva_buffers = pci_iomap_range(pdev, 0,
vbox->available_vram_size,
vbox->num_crtcs *
VBVA_MIN_BUFFER_SIZE);
@@ -106,6 +109,7 @@ bool vbox_check_supported(u16 id)
int vbox_hw_init(struct vbox_private *vbox)
{
+ struct pci_dev *pdev = to_pci_dev(vbox->ddev.dev);
int ret = -ENOMEM;
vbox->full_vram_size = inl(VBE_DISPI_IOPORT_DATA);
@@ -115,7 +119,7 @@ int vbox_hw_init(struct vbox_private *vbox)
/* Map guest-heap at end of vram */
vbox->guest_heap =
- pci_iomap_range(vbox->ddev.pdev, 0, GUEST_HEAP_OFFSET(vbox),
+ pci_iomap_range(pdev, 0, GUEST_HEAP_OFFSET(vbox),
GUEST_HEAP_SIZE);
if (!vbox->guest_heap)
return -ENOMEM;
diff --git a/drivers/gpu/drm/vboxvideo/vbox_ttm.c b/drivers/gpu/drm/vboxvideo/vbox_ttm.c
index f5a06675da43..0066a3c1dfc9 100644
--- a/drivers/gpu/drm/vboxvideo/vbox_ttm.c
+++ b/drivers/gpu/drm/vboxvideo/vbox_ttm.c
@@ -15,8 +15,9 @@ int vbox_mm_init(struct vbox_private *vbox)
struct drm_vram_mm *vmm;
int ret;
struct drm_device *dev = &vbox->ddev;
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
- vmm = drm_vram_helper_alloc_mm(dev, pci_resource_start(dev->pdev, 0),
+ vmm = drm_vram_helper_alloc_mm(dev, pci_resource_start(pdev, 0),
vbox->available_vram_size);
if (IS_ERR(vmm)) {
ret = PTR_ERR(vmm);
@@ -24,8 +25,8 @@ int vbox_mm_init(struct vbox_private *vbox)
return ret;
}
- vbox->fb_mtrr = arch_phys_wc_add(pci_resource_start(dev->pdev, 0),
- pci_resource_len(dev->pdev, 0));
+ vbox->fb_mtrr = arch_phys_wc_add(pci_resource_start(pdev, 0),
+ pci_resource_len(pdev, 0));
return 0;
}
diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c
index 469d1b4f2643..fddaeb0b09c1 100644
--- a/drivers/gpu/drm/vc4/vc4_bo.c
+++ b/drivers/gpu/drm/vc4/vc4_bo.c
@@ -21,7 +21,7 @@
#include "vc4_drv.h"
#include "uapi/drm/vc4_drm.h"
-static vm_fault_t vc4_fault(struct vm_fault *vmf);
+static const struct drm_gem_object_funcs vc4_gem_object_funcs;
static const char * const bo_type_names[] = {
"kernel",
@@ -376,20 +376,6 @@ out:
return bo;
}
-static const struct vm_operations_struct vc4_vm_ops = {
- .fault = vc4_fault,
- .open = drm_gem_vm_open,
- .close = drm_gem_vm_close,
-};
-
-static const struct drm_gem_object_funcs vc4_gem_object_funcs = {
- .free = vc4_free_object,
- .export = vc4_prime_export,
- .get_sg_table = drm_gem_cma_prime_get_sg_table,
- .vmap = vc4_prime_vmap,
- .vm_ops = &vc4_vm_ops,
-};
-
/**
* vc4_create_object - Implementation of driver->gem_create_object.
* @dev: DRM device
@@ -538,7 +524,7 @@ static void vc4_bo_cache_free_old(struct drm_device *dev)
/* Called on the last userspace/kernel unreference of the BO. Returns
* it to the BO cache if possible, otherwise frees it.
*/
-void vc4_free_object(struct drm_gem_object *gem_bo)
+static void vc4_free_object(struct drm_gem_object *gem_bo)
{
struct drm_device *dev = gem_bo->dev;
struct vc4_dev *vc4 = to_vc4_dev(dev);
@@ -673,7 +659,7 @@ static void vc4_bo_cache_time_timer(struct timer_list *t)
schedule_work(&vc4->bo_cache.time_work);
}
-struct dma_buf * vc4_prime_export(struct drm_gem_object *obj, int flags)
+static struct dma_buf *vc4_prime_export(struct drm_gem_object *obj, int flags)
{
struct vc4_bo *bo = to_vc4_bo(obj);
struct dma_buf *dmabuf;
@@ -718,19 +704,9 @@ static vm_fault_t vc4_fault(struct vm_fault *vmf)
return VM_FAULT_SIGBUS;
}
-int vc4_mmap(struct file *filp, struct vm_area_struct *vma)
+static int vc4_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
{
- struct drm_gem_object *gem_obj;
- unsigned long vm_pgoff;
- struct vc4_bo *bo;
- int ret;
-
- ret = drm_gem_mmap(filp, vma);
- if (ret)
- return ret;
-
- gem_obj = vma->vm_private_data;
- bo = to_vc4_bo(gem_obj);
+ struct vc4_bo *bo = to_vc4_bo(obj);
if (bo->validated_shader && (vma->vm_flags & VM_WRITE)) {
DRM_DEBUG("mmaping of shader BOs for writing not allowed.\n");
@@ -744,72 +720,23 @@ int vc4_mmap(struct file *filp, struct vm_area_struct *vma)
return -EINVAL;
}
- /*
- * Clear the VM_PFNMAP flag that was set by drm_gem_mmap(), and set the
- * vm_pgoff (used as a fake buffer offset by DRM) to 0 as we want to map
- * the whole buffer.
- */
- vma->vm_flags &= ~VM_PFNMAP;
-
- /* This ->vm_pgoff dance is needed to make all parties happy:
- * - dma_mmap_wc() uses ->vm_pgoff as an offset within the allocated
- * mem-region, hence the need to set it to zero (the value set by
- * the DRM core is a virtual offset encoding the GEM object-id)
- * - the mmap() core logic needs ->vm_pgoff to be restored to its
- * initial value before returning from this function because it
- * encodes the offset of this GEM in the dev->anon_inode pseudo-file
- * and this information will be used when we invalidate userspace
- * mappings with drm_vma_node_unmap() (called from vc4_gem_purge()).
- */
- vm_pgoff = vma->vm_pgoff;
- vma->vm_pgoff = 0;
- ret = dma_mmap_wc(bo->base.base.dev->dev, vma, bo->base.vaddr,
- bo->base.paddr, vma->vm_end - vma->vm_start);
- vma->vm_pgoff = vm_pgoff;
-
- if (ret)
- drm_gem_vm_close(vma);
-
- return ret;
-}
-
-int vc4_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
-{
- struct vc4_bo *bo = to_vc4_bo(obj);
-
- if (bo->validated_shader && (vma->vm_flags & VM_WRITE)) {
- DRM_DEBUG("mmaping of shader BOs for writing not allowed.\n");
- return -EINVAL;
- }
-
- return drm_gem_cma_prime_mmap(obj, vma);
-}
-
-int vc4_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map)
-{
- struct vc4_bo *bo = to_vc4_bo(obj);
-
- if (bo->validated_shader) {
- DRM_DEBUG("mmaping of shader BOs not allowed.\n");
- return -EINVAL;
- }
-
- return drm_gem_cma_prime_vmap(obj, map);
+ return drm_gem_cma_mmap(obj, vma);
}
-struct drm_gem_object *
-vc4_prime_import_sg_table(struct drm_device *dev,
- struct dma_buf_attachment *attach,
- struct sg_table *sgt)
-{
- struct drm_gem_object *obj;
-
- obj = drm_gem_cma_prime_import_sg_table(dev, attach, sgt);
- if (IS_ERR(obj))
- return obj;
+static const struct vm_operations_struct vc4_vm_ops = {
+ .fault = vc4_fault,
+ .open = drm_gem_vm_open,
+ .close = drm_gem_vm_close,
+};
- return obj;
-}
+static const struct drm_gem_object_funcs vc4_gem_object_funcs = {
+ .free = vc4_free_object,
+ .export = vc4_prime_export,
+ .get_sg_table = drm_gem_cma_get_sg_table,
+ .vmap = drm_gem_cma_vmap,
+ .mmap = vc4_gem_object_mmap,
+ .vm_ops = &vc4_vm_ops,
+};
static int vc4_grab_bin_bo(struct vc4_dev *vc4, struct vc4_file *vc4file)
{
diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c
index ea710beb8e00..269390bc586e 100644
--- a/drivers/gpu/drm/vc4/vc4_crtc.c
+++ b/drivers/gpu/drm/vc4/vc4_crtc.c
@@ -403,7 +403,9 @@ static void require_hvs_enabled(struct drm_device *dev)
SCALER_DISPCTRL_ENABLE);
}
-static int vc4_crtc_disable(struct drm_crtc *crtc, unsigned int channel)
+static int vc4_crtc_disable(struct drm_crtc *crtc,
+ struct drm_atomic_state *state,
+ unsigned int channel)
{
struct drm_encoder *encoder = vc4_get_crtc_encoder(crtc);
struct vc4_encoder *vc4_encoder = to_vc4_encoder(encoder);
@@ -435,13 +437,13 @@ static int vc4_crtc_disable(struct drm_crtc *crtc, unsigned int channel)
mdelay(20);
if (vc4_encoder && vc4_encoder->post_crtc_disable)
- vc4_encoder->post_crtc_disable(encoder);
+ vc4_encoder->post_crtc_disable(encoder, state);
vc4_crtc_pixelvalve_reset(crtc);
vc4_hvs_stop_channel(dev, channel);
if (vc4_encoder && vc4_encoder->post_crtc_powerdown)
- vc4_encoder->post_crtc_powerdown(encoder);
+ vc4_encoder->post_crtc_powerdown(encoder, state);
return 0;
}
@@ -468,7 +470,7 @@ int vc4_crtc_disable_at_boot(struct drm_crtc *crtc)
if (channel < 0)
return 0;
- return vc4_crtc_disable(crtc, channel);
+ return vc4_crtc_disable(crtc, NULL, channel);
}
static void vc4_crtc_atomic_disable(struct drm_crtc *crtc,
@@ -484,7 +486,7 @@ static void vc4_crtc_atomic_disable(struct drm_crtc *crtc,
/* Disable vblank irq handling before crtc is disabled. */
drm_crtc_vblank_off(crtc);
- vc4_crtc_disable(crtc, old_vc4_state->assigned_channel);
+ vc4_crtc_disable(crtc, state, old_vc4_state->assigned_channel);
/*
* Make sure we issue a vblank event after disabling the CRTC if
@@ -503,8 +505,6 @@ static void vc4_crtc_atomic_disable(struct drm_crtc *crtc,
static void vc4_crtc_atomic_enable(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
- struct drm_crtc_state *old_state = drm_atomic_get_old_crtc_state(state,
- crtc);
struct drm_device *dev = crtc->dev;
struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
struct drm_encoder *encoder = vc4_get_crtc_encoder(crtc);
@@ -517,17 +517,17 @@ static void vc4_crtc_atomic_enable(struct drm_crtc *crtc,
*/
drm_crtc_vblank_on(crtc);
- vc4_hvs_atomic_enable(crtc, old_state);
+ vc4_hvs_atomic_enable(crtc, state);
if (vc4_encoder->pre_crtc_configure)
- vc4_encoder->pre_crtc_configure(encoder);
+ vc4_encoder->pre_crtc_configure(encoder, state);
vc4_crtc_config_pv(crtc);
CRTC_WRITE(PV_CONTROL, CRTC_READ(PV_CONTROL) | PV_CONTROL_EN);
if (vc4_encoder->pre_crtc_enable)
- vc4_encoder->pre_crtc_enable(encoder);
+ vc4_encoder->pre_crtc_enable(encoder, state);
/* When feeding the transposer block the pixelvalve is unneeded and
* should not be enabled.
@@ -536,7 +536,7 @@ static void vc4_crtc_atomic_enable(struct drm_crtc *crtc,
CRTC_READ(PV_V_CONTROL) | PV_VCONTROL_VIDEN);
if (vc4_encoder->post_crtc_enable)
- vc4_encoder->post_crtc_enable(encoder);
+ vc4_encoder->post_crtc_enable(encoder, state);
}
static enum drm_mode_status vc4_crtc_mode_valid(struct drm_crtc *crtc,
@@ -593,7 +593,7 @@ static int vc4_crtc_atomic_check(struct drm_crtc *crtc,
struct drm_connector_state *conn_state;
int ret, i;
- ret = vc4_hvs_atomic_check(crtc, crtc_state);
+ ret = vc4_hvs_atomic_check(crtc, state);
if (ret)
return ret;
@@ -697,7 +697,6 @@ vc4_async_page_flip_complete(struct vc4_seqno_cb *cb)
container_of(cb, struct vc4_async_flip_state, cb);
struct drm_crtc *crtc = flip_state->crtc;
struct drm_device *dev = crtc->dev;
- struct vc4_dev *vc4 = to_vc4_dev(dev);
struct drm_plane *plane = crtc->primary;
vc4_plane_async_set_fb(plane, flip_state->fb);
@@ -729,8 +728,6 @@ vc4_async_page_flip_complete(struct vc4_seqno_cb *cb)
}
kfree(flip_state);
-
- up(&vc4->async_modeset);
}
/* Implements async (non-vblank-synced) page flips.
@@ -745,7 +742,6 @@ static int vc4_async_page_flip(struct drm_crtc *crtc,
uint32_t flags)
{
struct drm_device *dev = crtc->dev;
- struct vc4_dev *vc4 = to_vc4_dev(dev);
struct drm_plane *plane = crtc->primary;
int ret = 0;
struct vc4_async_flip_state *flip_state;
@@ -774,15 +770,6 @@ static int vc4_async_page_flip(struct drm_crtc *crtc,
flip_state->crtc = crtc;
flip_state->event = event;
- /* Make sure all other async modesetes have landed. */
- ret = down_interruptible(&vc4->async_modeset);
- if (ret) {
- drm_framebuffer_put(fb);
- vc4_bo_dec_usecnt(bo);
- kfree(flip_state);
- return ret;
- }
-
/* Save the current FB before it's replaced by the new one in
* drm_atomic_set_fb_for_plane(). We'll need the old FB in
* vc4_async_page_flip_complete() to decrement the BO usecnt and keep
@@ -884,7 +871,6 @@ static const struct drm_crtc_funcs vc4_crtc_funcs = {
.reset = vc4_crtc_reset,
.atomic_duplicate_state = vc4_crtc_duplicate_state,
.atomic_destroy_state = vc4_crtc_destroy_state,
- .gamma_set = drm_atomic_helper_legacy_gamma_set,
.enable_vblank = vc4_enable_vblank,
.disable_vblank = vc4_disable_vblank,
.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c
index 2cd97a39c286..556ad0f02a0d 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.c
+++ b/drivers/gpu/drm/vc4/vc4_drv.c
@@ -140,17 +140,7 @@ static void vc4_close(struct drm_device *dev, struct drm_file *file)
kfree(vc4file);
}
-static const struct file_operations vc4_drm_fops = {
- .owner = THIS_MODULE,
- .open = drm_open,
- .release = drm_release,
- .unlocked_ioctl = drm_ioctl,
- .mmap = vc4_mmap,
- .poll = drm_poll,
- .read = drm_read,
- .compat_ioctl = drm_compat_ioctl,
- .llseek = noop_llseek,
-};
+DEFINE_DRM_GEM_FOPS(vc4_drm_fops);
static const struct drm_ioctl_desc vc4_drm_ioctls[] = {
DRM_IOCTL_DEF_DRV(VC4_SUBMIT_CL, vc4_submit_cl_ioctl, DRM_RENDER_ALLOW),
@@ -190,12 +180,7 @@ static struct drm_driver vc4_drm_driver = {
.gem_create_object = vc4_create_object,
- .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
- .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_import_sg_table = vc4_prime_import_sg_table,
- .gem_prime_mmap = vc4_prime_mmap,
-
- .dumb_create = vc4_dumb_create,
+ DRM_GEM_CMA_DRIVER_OPS_WITH_DUMB_CREATE(vc4_dumb_create),
.ioctls = vc4_drm_ioctls,
.num_ioctls = ARRAY_SIZE(vc4_drm_ioctls),
diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h
index 43a1af110b3e..a7500716cf3f 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.h
+++ b/drivers/gpu/drm/vc4/vc4_drv.h
@@ -77,7 +77,6 @@ struct vc4_dev {
struct vc4_hvs *hvs;
struct vc4_v3d *v3d;
struct vc4_dpi *dpi;
- struct vc4_dsi *dsi1;
struct vc4_vec *vec;
struct vc4_txp *txp;
@@ -215,8 +214,6 @@ struct vc4_dev {
struct work_struct reset_work;
} hangcheck;
- struct semaphore async_modeset;
-
struct drm_modeset_lock ctm_state_lock;
struct drm_private_obj ctm_manager;
struct drm_private_obj hvs_channels;
@@ -444,12 +441,12 @@ struct vc4_encoder {
enum vc4_encoder_type type;
u32 clock_select;
- void (*pre_crtc_configure)(struct drm_encoder *encoder);
- void (*pre_crtc_enable)(struct drm_encoder *encoder);
- void (*post_crtc_enable)(struct drm_encoder *encoder);
+ void (*pre_crtc_configure)(struct drm_encoder *encoder, struct drm_atomic_state *state);
+ void (*pre_crtc_enable)(struct drm_encoder *encoder, struct drm_atomic_state *state);
+ void (*post_crtc_enable)(struct drm_encoder *encoder, struct drm_atomic_state *state);
- void (*post_crtc_disable)(struct drm_encoder *encoder);
- void (*post_crtc_powerdown)(struct drm_encoder *encoder);
+ void (*post_crtc_disable)(struct drm_encoder *encoder, struct drm_atomic_state *state);
+ void (*post_crtc_powerdown)(struct drm_encoder *encoder, struct drm_atomic_state *state);
};
static inline struct vc4_encoder *
@@ -785,13 +782,11 @@ struct vc4_validated_shader_info {
/* vc4_bo.c */
struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size);
-void vc4_free_object(struct drm_gem_object *gem_obj);
struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t size,
bool from_cache, enum vc4_kernel_bo_type type);
int vc4_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
struct drm_mode_create_dumb *args);
-struct dma_buf *vc4_prime_export(struct drm_gem_object *obj, int flags);
int vc4_create_bo_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int vc4_create_shader_bo_ioctl(struct drm_device *dev, void *data,
@@ -806,12 +801,6 @@ int vc4_get_hang_state_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int vc4_label_bo_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-int vc4_mmap(struct file *filp, struct vm_area_struct *vma);
-int vc4_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
-struct drm_gem_object *vc4_prime_import_sg_table(struct drm_device *dev,
- struct dma_buf_attachment *attach,
- struct sg_table *sgt);
-int vc4_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map);
int vc4_bo_cache_init(struct drm_device *dev);
int vc4_bo_inc_usecnt(struct vc4_bo *bo);
void vc4_bo_dec_usecnt(struct vc4_bo *bo);
@@ -916,11 +905,10 @@ void vc4_irq_reset(struct drm_device *dev);
extern struct platform_driver vc4_hvs_driver;
void vc4_hvs_stop_channel(struct drm_device *dev, unsigned int output);
int vc4_hvs_get_fifo_from_output(struct drm_device *dev, unsigned int output);
-int vc4_hvs_atomic_check(struct drm_crtc *crtc, struct drm_crtc_state *state);
-void vc4_hvs_atomic_enable(struct drm_crtc *crtc, struct drm_crtc_state *old_state);
-void vc4_hvs_atomic_disable(struct drm_crtc *crtc, struct drm_crtc_state *old_state);
-void vc4_hvs_atomic_flush(struct drm_crtc *crtc,
- struct drm_atomic_state *state);
+int vc4_hvs_atomic_check(struct drm_crtc *crtc, struct drm_atomic_state *state);
+void vc4_hvs_atomic_enable(struct drm_crtc *crtc, struct drm_atomic_state *state);
+void vc4_hvs_atomic_disable(struct drm_crtc *crtc, struct drm_atomic_state *state);
+void vc4_hvs_atomic_flush(struct drm_crtc *crtc, struct drm_atomic_state *state);
void vc4_hvs_dump_state(struct drm_device *dev);
void vc4_hvs_unmask_underrun(struct drm_device *dev, int channel);
void vc4_hvs_mask_underrun(struct drm_device *dev, int channel);
diff --git a/drivers/gpu/drm/vc4/vc4_dsi.c b/drivers/gpu/drm/vc4/vc4_dsi.c
index 19aab4e7e209..a55256ed0955 100644
--- a/drivers/gpu/drm/vc4/vc4_dsi.c
+++ b/drivers/gpu/drm/vc4/vc4_dsi.c
@@ -306,11 +306,11 @@
# define DSI0_PHY_AFEC0_RESET BIT(11)
# define DSI1_PHY_AFEC0_PD_BG BIT(11)
# define DSI0_PHY_AFEC0_PD BIT(10)
-# define DSI1_PHY_AFEC0_PD_DLANE3 BIT(10)
+# define DSI1_PHY_AFEC0_PD_DLANE1 BIT(10)
# define DSI0_PHY_AFEC0_PD_BG BIT(9)
# define DSI1_PHY_AFEC0_PD_DLANE2 BIT(9)
# define DSI0_PHY_AFEC0_PD_DLANE1 BIT(8)
-# define DSI1_PHY_AFEC0_PD_DLANE1 BIT(8)
+# define DSI1_PHY_AFEC0_PD_DLANE3 BIT(8)
# define DSI_PHY_AFEC0_PTATADJ_MASK VC4_MASK(7, 4)
# define DSI_PHY_AFEC0_PTATADJ_SHIFT 4
# define DSI_PHY_AFEC0_CTATADJ_MASK VC4_MASK(3, 0)
@@ -493,6 +493,18 @@
*/
#define DSI1_ID 0x8c
+struct vc4_dsi_variant {
+ /* Whether we're on bcm2835's DSI0 or DSI1. */
+ unsigned int port;
+
+ bool broken_axi_workaround;
+
+ const char *debugfs_name;
+ const struct debugfs_reg32 *regs;
+ size_t nregs;
+
+};
+
/* General DSI hardware state. */
struct vc4_dsi {
struct platform_device *pdev;
@@ -509,8 +521,7 @@ struct vc4_dsi {
u32 *reg_dma_mem;
dma_addr_t reg_paddr;
- /* Whether we're on bcm2835's DSI0 or DSI1. */
- int port;
+ const struct vc4_dsi_variant *variant;
/* DSI channel for the panel we're connected to. */
u32 channel;
@@ -586,10 +597,10 @@ dsi_dma_workaround_write(struct vc4_dsi *dsi, u32 offset, u32 val)
#define DSI_READ(offset) readl(dsi->regs + (offset))
#define DSI_WRITE(offset, val) dsi_dma_workaround_write(dsi, offset, val)
#define DSI_PORT_READ(offset) \
- DSI_READ(dsi->port ? DSI1_##offset : DSI0_##offset)
+ DSI_READ(dsi->variant->port ? DSI1_##offset : DSI0_##offset)
#define DSI_PORT_WRITE(offset, val) \
- DSI_WRITE(dsi->port ? DSI1_##offset : DSI0_##offset, val)
-#define DSI_PORT_BIT(bit) (dsi->port ? DSI1_##bit : DSI0_##bit)
+ DSI_WRITE(dsi->variant->port ? DSI1_##offset : DSI0_##offset, val)
+#define DSI_PORT_BIT(bit) (dsi->variant->port ? DSI1_##bit : DSI0_##bit)
/* VC4 DSI encoder KMS struct */
struct vc4_dsi_encoder {
@@ -837,7 +848,7 @@ static void vc4_dsi_encoder_enable(struct drm_encoder *encoder)
ret = pm_runtime_get_sync(dev);
if (ret) {
- DRM_ERROR("Failed to runtime PM enable on DSI%d\n", dsi->port);
+ DRM_ERROR("Failed to runtime PM enable on DSI%d\n", dsi->variant->port);
return;
}
@@ -871,7 +882,7 @@ static void vc4_dsi_encoder_enable(struct drm_encoder *encoder)
DSI_PORT_WRITE(STAT, DSI_PORT_READ(STAT));
/* Set AFE CTR00/CTR1 to release powerdown of analog. */
- if (dsi->port == 0) {
+ if (dsi->variant->port == 0) {
u32 afec0 = (VC4_SET_FIELD(7, DSI_PHY_AFEC0_PTATADJ) |
VC4_SET_FIELD(7, DSI_PHY_AFEC0_CTATADJ));
@@ -1017,7 +1028,7 @@ static void vc4_dsi_encoder_enable(struct drm_encoder *encoder)
DSI_PORT_BIT(PHYC_CLANE_ENABLE) |
((dsi->mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS) ?
0 : DSI_PORT_BIT(PHYC_HS_CLK_CONTINUOUS)) |
- (dsi->port == 0 ?
+ (dsi->variant->port == 0 ?
VC4_SET_FIELD(lpx - 1, DSI0_PHYC_ESC_CLK_LPDT) :
VC4_SET_FIELD(lpx - 1, DSI1_PHYC_ESC_CLK_LPDT)));
@@ -1043,13 +1054,13 @@ static void vc4_dsi_encoder_enable(struct drm_encoder *encoder)
DSI_DISP1_ENABLE);
/* Ungate the block. */
- if (dsi->port == 0)
+ if (dsi->variant->port == 0)
DSI_PORT_WRITE(CTRL, DSI_PORT_READ(CTRL) | DSI0_CTRL_CTRL0);
else
DSI_PORT_WRITE(CTRL, DSI_PORT_READ(CTRL) | DSI1_CTRL_EN);
/* Bring AFE out of reset. */
- if (dsi->port == 0) {
+ if (dsi->variant->port == 0) {
} else {
DSI_PORT_WRITE(PHY_AFEC0,
DSI_PORT_READ(PHY_AFEC0) &
@@ -1313,8 +1324,32 @@ static const struct drm_encoder_helper_funcs vc4_dsi_encoder_helper_funcs = {
.mode_fixup = vc4_dsi_encoder_mode_fixup,
};
+static const struct vc4_dsi_variant bcm2711_dsi1_variant = {
+ .port = 1,
+ .debugfs_name = "dsi1_regs",
+ .regs = dsi1_regs,
+ .nregs = ARRAY_SIZE(dsi1_regs),
+};
+
+static const struct vc4_dsi_variant bcm2835_dsi0_variant = {
+ .port = 0,
+ .debugfs_name = "dsi0_regs",
+ .regs = dsi0_regs,
+ .nregs = ARRAY_SIZE(dsi0_regs),
+};
+
+static const struct vc4_dsi_variant bcm2835_dsi1_variant = {
+ .port = 1,
+ .broken_axi_workaround = true,
+ .debugfs_name = "dsi1_regs",
+ .regs = dsi1_regs,
+ .nregs = ARRAY_SIZE(dsi1_regs),
+};
+
static const struct of_device_id vc4_dsi_dt_match[] = {
- { .compatible = "brcm,bcm2835-dsi1", (void *)(uintptr_t)1 },
+ { .compatible = "brcm,bcm2711-dsi1", &bcm2711_dsi1_variant },
+ { .compatible = "brcm,bcm2835-dsi0", &bcm2835_dsi0_variant },
+ { .compatible = "brcm,bcm2835-dsi1", &bcm2835_dsi1_variant },
{}
};
@@ -1325,7 +1360,7 @@ static void dsi_handle_error(struct vc4_dsi *dsi,
if (!(stat & bit))
return;
- DRM_ERROR("DSI%d: %s error\n", dsi->port, type);
+ DRM_ERROR("DSI%d: %s error\n", dsi->variant->port, type);
*ret = IRQ_HANDLED;
}
@@ -1398,12 +1433,12 @@ vc4_dsi_init_phy_clocks(struct vc4_dsi *dsi)
struct device *dev = &dsi->pdev->dev;
const char *parent_name = __clk_get_name(dsi->pll_phy_clock);
static const struct {
- const char *dsi0_name, *dsi1_name;
+ const char *name;
int div;
} phy_clocks[] = {
- { "dsi0_byte", "dsi1_byte", 8 },
- { "dsi0_ddr2", "dsi1_ddr2", 4 },
- { "dsi0_ddr", "dsi1_ddr", 2 },
+ { "byte", 8 },
+ { "ddr2", 4 },
+ { "ddr", 2 },
};
int i;
@@ -1419,8 +1454,12 @@ vc4_dsi_init_phy_clocks(struct vc4_dsi *dsi)
for (i = 0; i < ARRAY_SIZE(phy_clocks); i++) {
struct clk_fixed_factor *fix = &dsi->phy_clocks[i];
struct clk_init_data init;
+ char clk_name[16];
int ret;
+ snprintf(clk_name, sizeof(clk_name),
+ "dsi%u_%s", dsi->variant->port, phy_clocks[i].name);
+
/* We just use core fixed factor clock ops for the PHY
* clocks. The clocks are actually gated by the
* PHY_AFEC0_DDRCLK_EN bits, which we should be
@@ -1437,10 +1476,7 @@ vc4_dsi_init_phy_clocks(struct vc4_dsi *dsi)
memset(&init, 0, sizeof(init));
init.parent_names = &parent_name;
init.num_parents = 1;
- if (dsi->port == 1)
- init.name = phy_clocks[i].dsi1_name;
- else
- init.name = phy_clocks[i].dsi0_name;
+ init.name = clk_name;
init.ops = &clk_fixed_factor_ops;
ret = devm_clk_hw_register(dev, &fix->hw);
@@ -1459,7 +1495,6 @@ static int vc4_dsi_bind(struct device *dev, struct device *master, void *data)
{
struct platform_device *pdev = to_platform_device(dev);
struct drm_device *drm = dev_get_drvdata(master);
- struct vc4_dev *vc4 = to_vc4_dev(drm);
struct vc4_dsi *dsi = dev_get_drvdata(dev);
struct vc4_dsi_encoder *vc4_dsi_encoder;
struct drm_panel *panel;
@@ -1471,7 +1506,7 @@ static int vc4_dsi_bind(struct device *dev, struct device *master, void *data)
if (!match)
return -ENODEV;
- dsi->port = (uintptr_t)match->data;
+ dsi->variant = match->data;
vc4_dsi_encoder = devm_kzalloc(dev, sizeof(*vc4_dsi_encoder),
GFP_KERNEL);
@@ -1488,13 +1523,8 @@ static int vc4_dsi_bind(struct device *dev, struct device *master, void *data)
return PTR_ERR(dsi->regs);
dsi->regset.base = dsi->regs;
- if (dsi->port == 0) {
- dsi->regset.regs = dsi0_regs;
- dsi->regset.nregs = ARRAY_SIZE(dsi0_regs);
- } else {
- dsi->regset.regs = dsi1_regs;
- dsi->regset.nregs = ARRAY_SIZE(dsi1_regs);
- }
+ dsi->regset.regs = dsi->variant->regs;
+ dsi->regset.nregs = dsi->variant->nregs;
if (DSI_PORT_READ(ID) != DSI_ID_VALUE) {
dev_err(dev, "Port returned 0x%08x for ID instead of 0x%08x\n",
@@ -1502,11 +1532,11 @@ static int vc4_dsi_bind(struct device *dev, struct device *master, void *data)
return -ENODEV;
}
- /* DSI1 has a broken AXI slave that doesn't respond to writes
- * from the ARM. It does handle writes from the DMA engine,
+ /* DSI1 on BCM2835/6/7 has a broken AXI slave that doesn't respond to
+ * writes from the ARM. It does handle writes from the DMA engine,
* so set up a channel for talking to it.
*/
- if (dsi->port == 1) {
+ if (dsi->variant->broken_axi_workaround) {
dsi->reg_dma_mem = dma_alloc_coherent(dev, 4,
&dsi->reg_dma_paddr,
GFP_KERNEL);
@@ -1612,9 +1642,6 @@ static int vc4_dsi_bind(struct device *dev, struct device *master, void *data)
if (ret)
return ret;
- if (dsi->port == 1)
- vc4->dsi1 = dsi;
-
drm_simple_encoder_init(drm, dsi->encoder, DRM_MODE_ENCODER_DSI);
drm_encoder_helper_add(dsi->encoder, &vc4_dsi_encoder_helper_funcs);
@@ -1630,10 +1657,7 @@ static int vc4_dsi_bind(struct device *dev, struct device *master, void *data)
*/
list_splice_init(&dsi->encoder->bridge_chain, &dsi->bridge_chain);
- if (dsi->port == 0)
- vc4_debugfs_add_regset32(drm, "dsi0_regs", &dsi->regset);
- else
- vc4_debugfs_add_regset32(drm, "dsi1_regs", &dsi->regset);
+ vc4_debugfs_add_regset32(drm, dsi->variant->debugfs_name, &dsi->regset);
pm_runtime_enable(dev);
@@ -1643,8 +1667,6 @@ static int vc4_dsi_bind(struct device *dev, struct device *master, void *data)
static void vc4_dsi_unbind(struct device *dev, struct device *master,
void *data)
{
- struct drm_device *drm = dev_get_drvdata(master);
- struct vc4_dev *vc4 = to_vc4_dev(drm);
struct vc4_dsi *dsi = dev_get_drvdata(dev);
if (dsi->bridge)
@@ -1656,9 +1678,6 @@ static void vc4_dsi_unbind(struct device *dev, struct device *master,
*/
list_splice_init(&dsi->bridge_chain, &dsi->encoder->bridge_chain);
drm_encoder_cleanup(dsi->encoder);
-
- if (dsi->port == 1)
- vc4->dsi1 = NULL;
}
static const struct component_ops vc4_dsi_ops = {
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
index 555106220578..bd7514612e95 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
@@ -76,12 +76,25 @@
#define VC5_HDMI_VERTB_VSPO_SHIFT 16
#define VC5_HDMI_VERTB_VSPO_MASK VC4_MASK(29, 16)
+#define VC5_HDMI_DEEP_COLOR_CONFIG_1_INIT_PACK_PHASE_SHIFT 8
+#define VC5_HDMI_DEEP_COLOR_CONFIG_1_INIT_PACK_PHASE_MASK VC4_MASK(10, 8)
+
+#define VC5_HDMI_DEEP_COLOR_CONFIG_1_COLOR_DEPTH_SHIFT 0
+#define VC5_HDMI_DEEP_COLOR_CONFIG_1_COLOR_DEPTH_MASK VC4_MASK(3, 0)
+
+#define VC5_HDMI_GCP_CONFIG_GCP_ENABLE BIT(31)
+
+#define VC5_HDMI_GCP_WORD_1_GCP_SUBPACKET_BYTE_1_SHIFT 8
+#define VC5_HDMI_GCP_WORD_1_GCP_SUBPACKET_BYTE_1_MASK VC4_MASK(15, 8)
+
# define VC4_HD_M_SW_RST BIT(2)
# define VC4_HD_M_ENABLE BIT(0)
#define CEC_CLOCK_FREQ 40000
#define VC4_HSM_MID_CLOCK 149985000
+#define HDMI_14_MAX_TMDS_CLK (340 * 1000 * 1000)
+
static int vc4_hdmi_debugfs_regs(struct seq_file *m, void *unused)
{
struct drm_info_node *node = (struct drm_info_node *)m->private;
@@ -170,16 +183,48 @@ static int vc4_hdmi_connector_get_modes(struct drm_connector *connector)
static void vc4_hdmi_connector_reset(struct drm_connector *connector)
{
- drm_atomic_helper_connector_reset(connector);
+ struct vc4_hdmi_connector_state *old_state =
+ conn_state_to_vc4_hdmi_conn_state(connector->state);
+ struct vc4_hdmi_connector_state *new_state =
+ kzalloc(sizeof(*new_state), GFP_KERNEL);
+
+ if (connector->state)
+ __drm_atomic_helper_connector_destroy_state(connector->state);
+
+ kfree(old_state);
+ __drm_atomic_helper_connector_reset(connector, &new_state->base);
+
+ if (!new_state)
+ return;
+
+ new_state->base.max_bpc = 8;
+ new_state->base.max_requested_bpc = 8;
drm_atomic_helper_connector_tv_reset(connector);
}
+static struct drm_connector_state *
+vc4_hdmi_connector_duplicate_state(struct drm_connector *connector)
+{
+ struct drm_connector_state *conn_state = connector->state;
+ struct vc4_hdmi_connector_state *vc4_state = conn_state_to_vc4_hdmi_conn_state(conn_state);
+ struct vc4_hdmi_connector_state *new_state;
+
+ new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
+ if (!new_state)
+ return NULL;
+
+ new_state->pixel_rate = vc4_state->pixel_rate;
+ __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
+
+ return &new_state->base;
+}
+
static const struct drm_connector_funcs vc4_hdmi_connector_funcs = {
.detect = vc4_hdmi_connector_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = vc4_hdmi_connector_destroy,
.reset = vc4_hdmi_connector_reset,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_duplicate_state = vc4_hdmi_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
@@ -200,12 +245,20 @@ static int vc4_hdmi_connector_init(struct drm_device *dev,
vc4_hdmi->ddc);
drm_connector_helper_add(connector, &vc4_hdmi_connector_helper_funcs);
+ /*
+ * Some of the properties below require access to state, like bpc.
+ * Allocate some default initial connector state with our reset helper.
+ */
+ if (connector->funcs->reset)
+ connector->funcs->reset(connector);
+
/* Create and attach TV margin props to this connector. */
ret = drm_mode_create_tv_margin_properties(dev);
if (ret)
return ret;
drm_connector_attach_tv_margin_properties(connector);
+ drm_connector_attach_max_bpc_property(connector, 8, 12);
connector->polled = (DRM_CONNECTOR_POLL_CONNECT |
DRM_CONNECTOR_POLL_DISCONNECT);
@@ -219,7 +272,8 @@ static int vc4_hdmi_connector_init(struct drm_device *dev,
}
static int vc4_hdmi_stop_packet(struct drm_encoder *encoder,
- enum hdmi_infoframe_type type)
+ enum hdmi_infoframe_type type,
+ bool poll)
{
struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
u32 packet_id = type - 0x80;
@@ -227,6 +281,9 @@ static int vc4_hdmi_stop_packet(struct drm_encoder *encoder,
HDMI_WRITE(HDMI_RAM_PACKET_CONFIG,
HDMI_READ(HDMI_RAM_PACKET_CONFIG) & ~BIT(packet_id));
+ if (!poll)
+ return 0;
+
return wait_for(!(HDMI_READ(HDMI_RAM_PACKET_STATUS) &
BIT(packet_id)), 100);
}
@@ -253,7 +310,7 @@ static void vc4_hdmi_write_infoframe(struct drm_encoder *encoder,
if (len < 0)
return;
- ret = vc4_hdmi_stop_packet(encoder, frame->any.type);
+ ret = vc4_hdmi_stop_packet(encoder, frame->any.type, true);
if (ret) {
DRM_ERROR("Failed to wait for infoframe to go idle: %d\n", ret);
return;
@@ -356,7 +413,8 @@ static void vc4_hdmi_set_infoframes(struct drm_encoder *encoder)
vc4_hdmi_set_audio_infoframe(encoder);
}
-static void vc4_hdmi_encoder_post_crtc_disable(struct drm_encoder *encoder)
+static void vc4_hdmi_encoder_post_crtc_disable(struct drm_encoder *encoder,
+ struct drm_atomic_state *state)
{
struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
@@ -369,7 +427,8 @@ static void vc4_hdmi_encoder_post_crtc_disable(struct drm_encoder *encoder)
HDMI_READ(HDMI_VID_CTL) | VC4_HD_VID_CTL_BLANKPIX);
}
-static void vc4_hdmi_encoder_post_crtc_powerdown(struct drm_encoder *encoder)
+static void vc4_hdmi_encoder_post_crtc_powerdown(struct drm_encoder *encoder,
+ struct drm_atomic_state *state)
{
struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
int ret;
@@ -468,6 +527,7 @@ static void vc5_hdmi_csc_setup(struct vc4_hdmi *vc4_hdmi, bool enable)
}
static void vc4_hdmi_set_timings(struct vc4_hdmi *vc4_hdmi,
+ struct drm_connector_state *state,
struct drm_display_mode *mode)
{
bool hsync_pos = mode->flags & DRM_MODE_FLAG_PHSYNC;
@@ -511,7 +571,9 @@ static void vc4_hdmi_set_timings(struct vc4_hdmi *vc4_hdmi,
HDMI_WRITE(HDMI_VERTB0, vertb_even);
HDMI_WRITE(HDMI_VERTB1, vertb);
}
+
static void vc5_hdmi_set_timings(struct vc4_hdmi *vc4_hdmi,
+ struct drm_connector_state *state,
struct drm_display_mode *mode)
{
bool hsync_pos = mode->flags & DRM_MODE_FLAG_PHSYNC;
@@ -531,6 +593,9 @@ static void vc5_hdmi_set_timings(struct vc4_hdmi *vc4_hdmi,
mode->crtc_vsync_end -
interlaced,
VC4_HDMI_VERTB_VBP));
+ unsigned char gcp;
+ bool gcp_en;
+ u32 reg;
HDMI_WRITE(HDMI_VEC_INTERFACE_XBAR, 0x354021);
HDMI_WRITE(HDMI_HORZA,
@@ -556,6 +621,39 @@ static void vc5_hdmi_set_timings(struct vc4_hdmi *vc4_hdmi,
HDMI_WRITE(HDMI_VERTB0, vertb_even);
HDMI_WRITE(HDMI_VERTB1, vertb);
+ switch (state->max_bpc) {
+ case 12:
+ gcp = 6;
+ gcp_en = true;
+ break;
+ case 10:
+ gcp = 5;
+ gcp_en = true;
+ break;
+ case 8:
+ default:
+ gcp = 4;
+ gcp_en = false;
+ break;
+ }
+
+ reg = HDMI_READ(HDMI_DEEP_COLOR_CONFIG_1);
+ reg &= ~(VC5_HDMI_DEEP_COLOR_CONFIG_1_INIT_PACK_PHASE_MASK |
+ VC5_HDMI_DEEP_COLOR_CONFIG_1_COLOR_DEPTH_MASK);
+ reg |= VC4_SET_FIELD(2, VC5_HDMI_DEEP_COLOR_CONFIG_1_INIT_PACK_PHASE) |
+ VC4_SET_FIELD(gcp, VC5_HDMI_DEEP_COLOR_CONFIG_1_COLOR_DEPTH);
+ HDMI_WRITE(HDMI_DEEP_COLOR_CONFIG_1, reg);
+
+ reg = HDMI_READ(HDMI_GCP_WORD_1);
+ reg &= ~VC5_HDMI_GCP_WORD_1_GCP_SUBPACKET_BYTE_1_MASK;
+ reg |= VC4_SET_FIELD(gcp, VC5_HDMI_GCP_WORD_1_GCP_SUBPACKET_BYTE_1);
+ HDMI_WRITE(HDMI_GCP_WORD_1, reg);
+
+ reg = HDMI_READ(HDMI_GCP_CONFIG);
+ reg &= ~VC5_HDMI_GCP_CONFIG_GCP_ENABLE;
+ reg |= gcp_en ? VC5_HDMI_GCP_CONFIG_GCP_ENABLE : 0;
+ HDMI_WRITE(HDMI_GCP_CONFIG, reg);
+
HDMI_WRITE(HDMI_CLOCK_STOP, 0);
}
@@ -583,8 +681,29 @@ static void vc4_hdmi_recenter_fifo(struct vc4_hdmi *vc4_hdmi)
"VC4_HDMI_FIFO_CTL_RECENTER_DONE");
}
-static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder)
+static struct drm_connector_state *
+vc4_hdmi_encoder_get_connector_state(struct drm_encoder *encoder,
+ struct drm_atomic_state *state)
{
+ struct drm_connector_state *conn_state;
+ struct drm_connector *connector;
+ unsigned int i;
+
+ for_each_new_connector_in_state(state, connector, conn_state, i) {
+ if (conn_state->best_encoder == encoder)
+ return conn_state;
+ }
+
+ return NULL;
+}
+
+static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder,
+ struct drm_atomic_state *state)
+{
+ struct drm_connector_state *conn_state =
+ vc4_hdmi_encoder_get_connector_state(encoder, state);
+ struct vc4_hdmi_connector_state *vc4_conn_state =
+ conn_state_to_vc4_hdmi_conn_state(conn_state);
struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode;
struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
unsigned long pixel_rate, hsm_rate;
@@ -596,7 +715,7 @@ static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder)
return;
}
- pixel_rate = mode->clock * 1000 * ((mode->flags & DRM_MODE_FLAG_DBLCLK) ? 2 : 1);
+ pixel_rate = vc4_conn_state->pixel_rate;
ret = clk_set_rate(vc4_hdmi->pixel_clock, pixel_rate);
if (ret) {
DRM_ERROR("Failed to set pixel clock rate: %d\n", ret);
@@ -664,7 +783,7 @@ static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder)
vc4_hdmi->variant->reset(vc4_hdmi);
if (vc4_hdmi->variant->phy_init)
- vc4_hdmi->variant->phy_init(vc4_hdmi, mode);
+ vc4_hdmi->variant->phy_init(vc4_hdmi, vc4_conn_state);
HDMI_WRITE(HDMI_SCHEDULER_CONTROL,
HDMI_READ(HDMI_SCHEDULER_CONTROL) |
@@ -672,10 +791,11 @@ static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder)
VC4_HDMI_SCHEDULER_CONTROL_IGNORE_VSYNC_PREDICTS);
if (vc4_hdmi->variant->set_timings)
- vc4_hdmi->variant->set_timings(vc4_hdmi, mode);
+ vc4_hdmi->variant->set_timings(vc4_hdmi, conn_state, mode);
}
-static void vc4_hdmi_encoder_pre_crtc_enable(struct drm_encoder *encoder)
+static void vc4_hdmi_encoder_pre_crtc_enable(struct drm_encoder *encoder,
+ struct drm_atomic_state *state)
{
struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode;
struct vc4_hdmi_encoder *vc4_encoder = to_vc4_hdmi_encoder(encoder);
@@ -697,7 +817,8 @@ static void vc4_hdmi_encoder_pre_crtc_enable(struct drm_encoder *encoder)
HDMI_WRITE(HDMI_FIFO_CTL, VC4_HDMI_FIFO_CTL_MASTER_SLAVE_N);
}
-static void vc4_hdmi_encoder_post_crtc_enable(struct drm_encoder *encoder)
+static void vc4_hdmi_encoder_post_crtc_enable(struct drm_encoder *encoder,
+ struct drm_atomic_state *state)
{
struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode;
struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
@@ -766,6 +887,7 @@ static int vc4_hdmi_encoder_atomic_check(struct drm_encoder *encoder,
struct drm_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
+ struct vc4_hdmi_connector_state *vc4_state = conn_state_to_vc4_hdmi_conn_state(conn_state);
struct drm_display_mode *mode = &crtc_state->adjusted_mode;
struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
unsigned long long pixel_rate = mode->clock * 1000;
@@ -790,9 +912,22 @@ static int vc4_hdmi_encoder_atomic_check(struct drm_encoder *encoder,
pixel_rate = mode->clock * 1000;
}
+ if (conn_state->max_bpc == 12) {
+ pixel_rate = pixel_rate * 150;
+ do_div(pixel_rate, 100);
+ } else if (conn_state->max_bpc == 10) {
+ pixel_rate = pixel_rate * 125;
+ do_div(pixel_rate, 100);
+ }
+
+ if (mode->flags & DRM_MODE_FLAG_DBLCLK)
+ pixel_rate = pixel_rate * 2;
+
if (pixel_rate > vc4_hdmi->variant->max_pixel_clock)
return -EINVAL;
+ vc4_state->pixel_rate = pixel_rate;
+
return 0;
}
@@ -936,7 +1071,7 @@ static void vc4_hdmi_audio_reset(struct vc4_hdmi *vc4_hdmi)
int ret;
vc4_hdmi->audio.streaming = false;
- ret = vc4_hdmi_stop_packet(encoder, HDMI_INFOFRAME_TYPE_AUDIO);
+ ret = vc4_hdmi_stop_packet(encoder, HDMI_INFOFRAME_TYPE_AUDIO, false);
if (ret)
dev_err(dev, "Failed to stop audio infoframe: %d\n", ret);
@@ -1267,6 +1402,7 @@ static int vc4_hdmi_audio_init(struct vc4_hdmi *vc4_hdmi)
card->dai_link = dai_link;
card->num_links = 1;
card->name = vc4_hdmi->variant->card_name;
+ card->driver_name = "vc4-hdmi";
card->dev = dev;
card->owner = THIS_MODULE;
@@ -1853,7 +1989,7 @@ static const struct vc4_hdmi_variant bcm2711_hdmi0_variant = {
.encoder_type = VC4_ENCODER_TYPE_HDMI0,
.debugfs_name = "hdmi0_regs",
.card_name = "vc4-hdmi-0",
- .max_pixel_clock = 297000000,
+ .max_pixel_clock = HDMI_14_MAX_TMDS_CLK,
.registers = vc5_hdmi_hdmi0_fields,
.num_registers = ARRAY_SIZE(vc5_hdmi_hdmi0_fields),
.phy_lane_mapping = {
@@ -1879,7 +2015,7 @@ static const struct vc4_hdmi_variant bcm2711_hdmi1_variant = {
.encoder_type = VC4_ENCODER_TYPE_HDMI1,
.debugfs_name = "hdmi1_regs",
.card_name = "vc4-hdmi-1",
- .max_pixel_clock = 297000000,
+ .max_pixel_clock = HDMI_14_MAX_TMDS_CLK,
.registers = vc5_hdmi_hdmi1_fields,
.num_registers = ARRAY_SIZE(vc5_hdmi_hdmi1_fields),
.phy_lane_mapping = {
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.h b/drivers/gpu/drm/vc4/vc4_hdmi.h
index 0526a9cf608a..4c8994cfd932 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.h
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.h
@@ -21,10 +21,9 @@ to_vc4_hdmi_encoder(struct drm_encoder *encoder)
return container_of(encoder, struct vc4_hdmi_encoder, base.base);
}
-struct drm_display_mode;
-
struct vc4_hdmi;
struct vc4_hdmi_register;
+struct vc4_hdmi_connector_state;
enum vc4_hdmi_phy_channel {
PHY_LANE_0 = 0,
@@ -78,11 +77,12 @@ struct vc4_hdmi_variant {
/* Callback to configure the video timings in the HDMI block */
void (*set_timings)(struct vc4_hdmi *vc4_hdmi,
+ struct drm_connector_state *state,
struct drm_display_mode *mode);
- /* Callback to initialize the PHY according to the mode */
+ /* Callback to initialize the PHY according to the connector state */
void (*phy_init)(struct vc4_hdmi *vc4_hdmi,
- struct drm_display_mode *mode);
+ struct vc4_hdmi_connector_state *vc4_conn_state);
/* Callback to disable the PHY */
void (*phy_disable)(struct vc4_hdmi *vc4_hdmi);
@@ -180,14 +180,25 @@ encoder_to_vc4_hdmi(struct drm_encoder *encoder)
return container_of(_encoder, struct vc4_hdmi, encoder);
}
+struct vc4_hdmi_connector_state {
+ struct drm_connector_state base;
+ unsigned long long pixel_rate;
+};
+
+static inline struct vc4_hdmi_connector_state *
+conn_state_to_vc4_hdmi_conn_state(struct drm_connector_state *conn_state)
+{
+ return container_of(conn_state, struct vc4_hdmi_connector_state, base);
+}
+
void vc4_hdmi_phy_init(struct vc4_hdmi *vc4_hdmi,
- struct drm_display_mode *mode);
+ struct vc4_hdmi_connector_state *vc4_conn_state);
void vc4_hdmi_phy_disable(struct vc4_hdmi *vc4_hdmi);
void vc4_hdmi_phy_rng_enable(struct vc4_hdmi *vc4_hdmi);
void vc4_hdmi_phy_rng_disable(struct vc4_hdmi *vc4_hdmi);
void vc5_hdmi_phy_init(struct vc4_hdmi *vc4_hdmi,
- struct drm_display_mode *mode);
+ struct vc4_hdmi_connector_state *vc4_conn_state);
void vc5_hdmi_phy_disable(struct vc4_hdmi *vc4_hdmi);
void vc5_hdmi_phy_rng_enable(struct vc4_hdmi *vc4_hdmi);
void vc5_hdmi_phy_rng_disable(struct vc4_hdmi *vc4_hdmi);
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi_phy.c b/drivers/gpu/drm/vc4/vc4_hdmi_phy.c
index 057796b54c51..36535480f8e2 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi_phy.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi_phy.c
@@ -127,7 +127,8 @@
#define OSCILLATOR_FREQUENCY 54000000
-void vc4_hdmi_phy_init(struct vc4_hdmi *vc4_hdmi, struct drm_display_mode *mode)
+void vc4_hdmi_phy_init(struct vc4_hdmi *vc4_hdmi,
+ struct vc4_hdmi_connector_state *conn_state)
{
/* PHY should be in reset, like
* vc4_hdmi_encoder_disable() does.
@@ -339,11 +340,12 @@ static void vc5_hdmi_reset_phy(struct vc4_hdmi *vc4_hdmi)
HDMI_WRITE(HDMI_TX_PHY_POWERDOWN_CTL, BIT(10));
}
-void vc5_hdmi_phy_init(struct vc4_hdmi *vc4_hdmi, struct drm_display_mode *mode)
+void vc5_hdmi_phy_init(struct vc4_hdmi *vc4_hdmi,
+ struct vc4_hdmi_connector_state *conn_state)
{
const struct phy_lane_settings *chan0_settings, *chan1_settings, *chan2_settings, *clock_settings;
const struct vc4_hdmi_variant *variant = vc4_hdmi->variant;
- unsigned long long pixel_freq = mode->clock * 1000;
+ unsigned long long pixel_freq = conn_state->pixel_rate;
unsigned long long vco_freq;
unsigned char word_sel;
u8 vco_sel, vco_div;
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi_regs.h b/drivers/gpu/drm/vc4/vc4_hdmi_regs.h
index 96d764ebfe67..401863cb8c98 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi_regs.h
+++ b/drivers/gpu/drm/vc4/vc4_hdmi_regs.h
@@ -59,9 +59,12 @@ enum vc4_hdmi_field {
*/
HDMI_CTS_0,
HDMI_CTS_1,
+ HDMI_DEEP_COLOR_CONFIG_1,
HDMI_DVP_CTL,
HDMI_FIFO_CTL,
HDMI_FRAME_COUNT,
+ HDMI_GCP_CONFIG,
+ HDMI_GCP_WORD_1,
HDMI_HORZA,
HDMI_HORZB,
HDMI_HOTPLUG,
@@ -229,6 +232,9 @@ static const struct vc4_hdmi_register __maybe_unused vc5_hdmi_hdmi0_fields[] = {
VC4_HDMI_REG(HDMI_VERTB1, 0x0f8),
VC4_HDMI_REG(HDMI_MAI_CHANNEL_MAP, 0x09c),
VC4_HDMI_REG(HDMI_MAI_CONFIG, 0x0a0),
+ VC4_HDMI_REG(HDMI_DEEP_COLOR_CONFIG_1, 0x170),
+ VC4_HDMI_REG(HDMI_GCP_CONFIG, 0x178),
+ VC4_HDMI_REG(HDMI_GCP_WORD_1, 0x17c),
VC4_HDMI_REG(HDMI_HOTPLUG, 0x1a8),
VC5_DVP_REG(HDMI_CLOCK_STOP, 0x0bc),
@@ -305,6 +311,9 @@ static const struct vc4_hdmi_register __maybe_unused vc5_hdmi_hdmi1_fields[] = {
VC4_HDMI_REG(HDMI_VERTB1, 0x0f8),
VC4_HDMI_REG(HDMI_MAI_CHANNEL_MAP, 0x09c),
VC4_HDMI_REG(HDMI_MAI_CONFIG, 0x0a0),
+ VC4_HDMI_REG(HDMI_DEEP_COLOR_CONFIG_1, 0x170),
+ VC4_HDMI_REG(HDMI_GCP_CONFIG, 0x178),
+ VC4_HDMI_REG(HDMI_GCP_WORD_1, 0x17c),
VC4_HDMI_REG(HDMI_HOTPLUG, 0x1a8),
VC5_DVP_REG(HDMI_CLOCK_STOP, 0x0bc),
diff --git a/drivers/gpu/drm/vc4/vc4_hvs.c b/drivers/gpu/drm/vc4/vc4_hvs.c
index cccd341e5d67..2b3a597fa65f 100644
--- a/drivers/gpu/drm/vc4/vc4_hvs.c
+++ b/drivers/gpu/drm/vc4/vc4_hvs.c
@@ -326,10 +326,10 @@ void vc4_hvs_stop_channel(struct drm_device *dev, unsigned int chan)
SCALER_DISPSTATX_EMPTY);
}
-int vc4_hvs_atomic_check(struct drm_crtc *crtc,
- struct drm_crtc_state *state)
+int vc4_hvs_atomic_check(struct drm_crtc *crtc, struct drm_atomic_state *state)
{
- struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(state);
+ struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+ struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc_state);
struct drm_device *dev = crtc->dev;
struct vc4_dev *vc4 = to_vc4_dev(dev);
struct drm_plane *plane;
@@ -341,10 +341,10 @@ int vc4_hvs_atomic_check(struct drm_crtc *crtc,
/* The pixelvalve can only feed one encoder (and encoders are
* 1:1 with connectors.)
*/
- if (hweight32(state->connector_mask) > 1)
+ if (hweight32(crtc_state->connector_mask) > 1)
return -EINVAL;
- drm_atomic_crtc_state_for_each_plane_state(plane, plane_state, state)
+ drm_atomic_crtc_state_for_each_plane_state(plane, plane_state, crtc_state)
dlist_count += vc4_plane_dlist_size(plane_state);
dlist_count++; /* Account for SCALER_CTL0_END. */
@@ -391,11 +391,12 @@ static void vc4_hvs_update_dlist(struct drm_crtc *crtc)
}
void vc4_hvs_atomic_enable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
struct drm_device *dev = crtc->dev;
struct vc4_dev *vc4 = to_vc4_dev(dev);
- struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc->state);
+ struct drm_crtc_state *new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+ struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(new_crtc_state);
struct drm_display_mode *mode = &crtc->state->adjusted_mode;
bool oneshot = vc4_state->feed_txp;
@@ -404,9 +405,10 @@ void vc4_hvs_atomic_enable(struct drm_crtc *crtc,
}
void vc4_hvs_atomic_disable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
struct drm_device *dev = crtc->dev;
+ struct drm_crtc_state *old_state = drm_atomic_get_old_crtc_state(state, crtc);
struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(old_state);
unsigned int chan = vc4_state->assigned_channel;
diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c
index ba310c0ab5f6..f09254c2497d 100644
--- a/drivers/gpu/drm/vc4/vc4_kms.c
+++ b/drivers/gpu/drm/vc4/vc4_kms.c
@@ -39,7 +39,11 @@ static struct vc4_ctm_state *to_vc4_ctm_state(struct drm_private_state *priv)
struct vc4_hvs_state {
struct drm_private_state base;
- unsigned int unassigned_channels;
+
+ struct {
+ unsigned in_use: 1;
+ struct drm_crtc_commit *pending_commit;
+ } fifo_state[HVS_NUM_CHANNELS];
};
static struct vc4_hvs_state *
@@ -183,6 +187,32 @@ vc4_ctm_commit(struct vc4_dev *vc4, struct drm_atomic_state *state)
}
static struct vc4_hvs_state *
+vc4_hvs_get_new_global_state(struct drm_atomic_state *state)
+{
+ struct vc4_dev *vc4 = to_vc4_dev(state->dev);
+ struct drm_private_state *priv_state;
+
+ priv_state = drm_atomic_get_new_private_obj_state(state, &vc4->hvs_channels);
+ if (IS_ERR(priv_state))
+ return ERR_CAST(priv_state);
+
+ return to_vc4_hvs_state(priv_state);
+}
+
+static struct vc4_hvs_state *
+vc4_hvs_get_old_global_state(struct drm_atomic_state *state)
+{
+ struct vc4_dev *vc4 = to_vc4_dev(state->dev);
+ struct drm_private_state *priv_state;
+
+ priv_state = drm_atomic_get_old_private_obj_state(state, &vc4->hvs_channels);
+ if (IS_ERR(priv_state))
+ return ERR_CAST(priv_state);
+
+ return to_vc4_hvs_state(priv_state);
+}
+
+static struct vc4_hvs_state *
vc4_hvs_get_global_state(struct drm_atomic_state *state)
{
struct vc4_dev *vc4 = to_vc4_dev(state->dev);
@@ -302,14 +332,15 @@ static void vc5_hvs_pv_muxing_commit(struct vc4_dev *vc4,
}
}
-static void
-vc4_atomic_complete_commit(struct drm_atomic_state *state)
+static void vc4_atomic_commit_tail(struct drm_atomic_state *state)
{
struct drm_device *dev = state->dev;
struct vc4_dev *vc4 = to_vc4_dev(dev);
struct vc4_hvs *hvs = vc4->hvs;
+ struct drm_crtc_state *old_crtc_state;
struct drm_crtc_state *new_crtc_state;
struct drm_crtc *crtc;
+ struct vc4_hvs_state *old_hvs_state;
int i;
for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
@@ -325,9 +356,35 @@ vc4_atomic_complete_commit(struct drm_atomic_state *state)
if (vc4->hvs->hvs5)
clk_set_min_rate(hvs->core_clk, 500000000);
- drm_atomic_helper_wait_for_fences(dev, state, false);
+ old_hvs_state = vc4_hvs_get_old_global_state(state);
+ if (!old_hvs_state)
+ return;
+
+ for_each_old_crtc_in_state(state, crtc, old_crtc_state, i) {
+ struct vc4_crtc_state *vc4_crtc_state =
+ to_vc4_crtc_state(old_crtc_state);
+ struct drm_crtc_commit *commit;
+ unsigned int channel = vc4_crtc_state->assigned_channel;
+ unsigned long done;
+
+ if (channel == VC4_HVS_CHANNEL_DISABLED)
+ continue;
+
+ if (!old_hvs_state->fifo_state[channel].in_use)
+ continue;
+
+ commit = old_hvs_state->fifo_state[i].pending_commit;
+ if (!commit)
+ continue;
- drm_atomic_helper_wait_for_dependencies(state);
+ done = wait_for_completion_timeout(&commit->hw_done, 10 * HZ);
+ if (!done)
+ drm_err(dev, "Timed out waiting for hw_done\n");
+
+ done = wait_for_completion_timeout(&commit->flip_done, 10 * HZ);
+ if (!done)
+ drm_err(dev, "Timed out waiting for flip_done\n");
+ }
drm_atomic_helper_commit_modeset_disables(dev, state);
@@ -350,125 +407,37 @@ vc4_atomic_complete_commit(struct drm_atomic_state *state)
drm_atomic_helper_cleanup_planes(dev, state);
- drm_atomic_helper_commit_cleanup_done(state);
-
if (vc4->hvs->hvs5)
clk_set_min_rate(hvs->core_clk, 0);
-
- drm_atomic_state_put(state);
-
- up(&vc4->async_modeset);
-}
-
-static void commit_work(struct work_struct *work)
-{
- struct drm_atomic_state *state = container_of(work,
- struct drm_atomic_state,
- commit_work);
- vc4_atomic_complete_commit(state);
}
-/**
- * vc4_atomic_commit - commit validated state object
- * @dev: DRM device
- * @state: the driver state object
- * @nonblock: nonblocking commit
- *
- * This function commits a with drm_atomic_helper_check() pre-validated state
- * object. This can still fail when e.g. the framebuffer reservation fails. For
- * now this doesn't implement asynchronous commits.
- *
- * RETURNS
- * Zero for success or -errno.
- */
-static int vc4_atomic_commit(struct drm_device *dev,
- struct drm_atomic_state *state,
- bool nonblock)
+static int vc4_atomic_commit_setup(struct drm_atomic_state *state)
{
- struct vc4_dev *vc4 = to_vc4_dev(dev);
- int ret;
-
- if (state->async_update) {
- ret = down_interruptible(&vc4->async_modeset);
- if (ret)
- return ret;
-
- ret = drm_atomic_helper_prepare_planes(dev, state);
- if (ret) {
- up(&vc4->async_modeset);
- return ret;
- }
-
- drm_atomic_helper_async_commit(dev, state);
-
- drm_atomic_helper_cleanup_planes(dev, state);
-
- up(&vc4->async_modeset);
-
- return 0;
- }
+ struct drm_crtc_state *crtc_state;
+ struct vc4_hvs_state *hvs_state;
+ struct drm_crtc *crtc;
+ unsigned int i;
- /* We know for sure we don't want an async update here. Set
- * state->legacy_cursor_update to false to prevent
- * drm_atomic_helper_setup_commit() from auto-completing
- * commit->flip_done.
- */
- state->legacy_cursor_update = false;
- ret = drm_atomic_helper_setup_commit(state, nonblock);
- if (ret)
- return ret;
+ hvs_state = vc4_hvs_get_new_global_state(state);
+ if (!hvs_state)
+ return -EINVAL;
- INIT_WORK(&state->commit_work, commit_work);
+ for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
+ struct vc4_crtc_state *vc4_crtc_state =
+ to_vc4_crtc_state(crtc_state);
+ unsigned int channel =
+ vc4_crtc_state->assigned_channel;
- ret = down_interruptible(&vc4->async_modeset);
- if (ret)
- return ret;
+ if (channel == VC4_HVS_CHANNEL_DISABLED)
+ continue;
- ret = drm_atomic_helper_prepare_planes(dev, state);
- if (ret) {
- up(&vc4->async_modeset);
- return ret;
- }
+ if (!hvs_state->fifo_state[channel].in_use)
+ continue;
- if (!nonblock) {
- ret = drm_atomic_helper_wait_for_fences(dev, state, true);
- if (ret) {
- drm_atomic_helper_cleanup_planes(dev, state);
- up(&vc4->async_modeset);
- return ret;
- }
+ hvs_state->fifo_state[channel].pending_commit =
+ drm_crtc_commit_get(crtc_state->commit);
}
- /*
- * This is the point of no return - everything below never fails except
- * when the hw goes bonghits. Which means we can commit the new state on
- * the software side now.
- */
-
- BUG_ON(drm_atomic_helper_swap_state(state, false) < 0);
-
- /*
- * Everything below can be run asynchronously without the need to grab
- * any modeset locks at all under one condition: It must be guaranteed
- * that the asynchronous work has either been cancelled (if the driver
- * supports it, which at least requires that the framebuffers get
- * cleaned up with drm_atomic_helper_cleanup_planes()) or completed
- * before the new state gets committed on the software side with
- * drm_atomic_helper_swap_state().
- *
- * This scheme allows new atomic state updates to be prepared and
- * checked in parallel to the asynchronous completion of the previous
- * update. Which is important since compositors need to figure out the
- * composition of the next frame right after having submitted the
- * current layout.
- */
-
- drm_atomic_state_get(state);
- if (nonblock)
- queue_work(system_unbound_wq, &state->commit_work);
- else
- vc4_atomic_complete_commit(state);
-
return 0;
}
@@ -697,6 +666,7 @@ vc4_hvs_channels_duplicate_state(struct drm_private_obj *obj)
{
struct vc4_hvs_state *old_state = to_vc4_hvs_state(obj->state);
struct vc4_hvs_state *state;
+ unsigned int i;
state = kzalloc(sizeof(*state), GFP_KERNEL);
if (!state)
@@ -704,7 +674,16 @@ vc4_hvs_channels_duplicate_state(struct drm_private_obj *obj)
__drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
- state->unassigned_channels = old_state->unassigned_channels;
+
+ for (i = 0; i < HVS_NUM_CHANNELS; i++) {
+ state->fifo_state[i].in_use = old_state->fifo_state[i].in_use;
+
+ if (!old_state->fifo_state[i].pending_commit)
+ continue;
+
+ state->fifo_state[i].pending_commit =
+ drm_crtc_commit_get(old_state->fifo_state[i].pending_commit);
+ }
return &state->base;
}
@@ -713,6 +692,14 @@ static void vc4_hvs_channels_destroy_state(struct drm_private_obj *obj,
struct drm_private_state *state)
{
struct vc4_hvs_state *hvs_state = to_vc4_hvs_state(state);
+ unsigned int i;
+
+ for (i = 0; i < HVS_NUM_CHANNELS; i++) {
+ if (!hvs_state->fifo_state[i].pending_commit)
+ continue;
+
+ drm_crtc_commit_put(hvs_state->fifo_state[i].pending_commit);
+ }
kfree(hvs_state);
}
@@ -737,7 +724,6 @@ static int vc4_hvs_channels_obj_init(struct vc4_dev *vc4)
if (!state)
return -ENOMEM;
- state->unassigned_channels = GENMASK(HVS_NUM_CHANNELS - 1, 0);
drm_atomic_private_obj_init(&vc4->base, &vc4->hvs_channels,
&state->base,
&vc4_hvs_state_funcs);
@@ -781,12 +767,17 @@ static int vc4_pv_muxing_atomic_check(struct drm_device *dev,
struct vc4_hvs_state *hvs_new_state;
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
struct drm_crtc *crtc;
+ unsigned int unassigned_channels = 0;
unsigned int i;
hvs_new_state = vc4_hvs_get_global_state(state);
if (!hvs_new_state)
return -EINVAL;
+ for (i = 0; i < ARRAY_SIZE(hvs_new_state->fifo_state); i++)
+ if (!hvs_new_state->fifo_state[i].in_use)
+ unassigned_channels |= BIT(i);
+
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
struct vc4_crtc_state *old_vc4_crtc_state =
to_vc4_crtc_state(old_crtc_state);
@@ -794,6 +785,7 @@ static int vc4_pv_muxing_atomic_check(struct drm_device *dev,
to_vc4_crtc_state(new_crtc_state);
struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
unsigned int matching_channels;
+ unsigned int channel;
/* Nothing to do here, let's skip it */
if (old_crtc_state->enable == new_crtc_state->enable)
@@ -804,7 +796,8 @@ static int vc4_pv_muxing_atomic_check(struct drm_device *dev,
/* If we're disabling our CRTC, we put back our channel */
if (!new_crtc_state->enable) {
- hvs_new_state->unassigned_channels |= BIT(old_vc4_crtc_state->assigned_channel);
+ channel = old_vc4_crtc_state->assigned_channel;
+ hvs_new_state->fifo_state[channel].in_use = false;
new_vc4_crtc_state->assigned_channel = VC4_HVS_CHANNEL_DISABLED;
continue;
}
@@ -833,15 +826,14 @@ static int vc4_pv_muxing_atomic_check(struct drm_device *dev,
* the future, we will need to have something smarter,
* but it works so far.
*/
- matching_channels = hvs_new_state->unassigned_channels & vc4_crtc->data->hvs_available_channels;
- if (matching_channels) {
- unsigned int channel = ffs(matching_channels) - 1;
-
- new_vc4_crtc_state->assigned_channel = channel;
- hvs_new_state->unassigned_channels &= ~BIT(channel);
- } else {
+ matching_channels = unassigned_channels & vc4_crtc->data->hvs_available_channels;
+ if (!matching_channels)
return -EINVAL;
- }
+
+ channel = ffs(matching_channels) - 1;
+ new_vc4_crtc_state->assigned_channel = channel;
+ unassigned_channels &= ~BIT(channel);
+ hvs_new_state->fifo_state[channel].in_use = true;
}
return 0;
@@ -867,9 +859,14 @@ vc4_atomic_check(struct drm_device *dev, struct drm_atomic_state *state)
return vc4_load_tracker_atomic_check(state);
}
+static struct drm_mode_config_helper_funcs vc4_mode_config_helpers = {
+ .atomic_commit_setup = vc4_atomic_commit_setup,
+ .atomic_commit_tail = vc4_atomic_commit_tail,
+};
+
static const struct drm_mode_config_funcs vc4_mode_funcs = {
.atomic_check = vc4_atomic_check,
- .atomic_commit = vc4_atomic_commit,
+ .atomic_commit = drm_atomic_helper_commit,
.fb_create = vc4_fb_create,
};
@@ -889,8 +886,6 @@ int vc4_kms_load(struct drm_device *dev)
vc4->load_tracker_enabled = true;
}
- sema_init(&vc4->async_modeset, 1);
-
/* Set support for vblank irq fast disable, before drm_vblank_init() */
dev->vblank_disable_immediate = true;
@@ -910,6 +905,7 @@ int vc4_kms_load(struct drm_device *dev)
}
dev->mode_config.funcs = &vc4_mode_funcs;
+ dev->mode_config.helper_private = &vc4_mode_config_helpers;
dev->mode_config.preferred_depth = 24;
dev->mode_config.async_page_flip = true;
dev->mode_config.allow_fb_modifiers = true;
diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
index 6b39cc2ca18d..6bd8260aa9f2 100644
--- a/drivers/gpu/drm/vc4/vc4_plane.c
+++ b/drivers/gpu/drm/vc4/vc4_plane.c
@@ -1268,11 +1268,6 @@ static const struct drm_plane_helper_funcs vc4_plane_helper_funcs = {
.atomic_async_update = vc4_plane_atomic_async_update,
};
-static void vc4_plane_destroy(struct drm_plane *plane)
-{
- drm_plane_cleanup(plane);
-}
-
static bool vc4_format_mod_supported(struct drm_plane *plane,
uint32_t format,
uint64_t modifier)
@@ -1323,7 +1318,7 @@ static bool vc4_format_mod_supported(struct drm_plane *plane,
static const struct drm_plane_funcs vc4_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
- .destroy = vc4_plane_destroy,
+ .destroy = drm_plane_cleanup,
.set_property = NULL,
.reset = vc4_plane_reset,
.atomic_duplicate_state = vc4_plane_duplicate_state,
diff --git a/drivers/gpu/drm/vc4/vc4_txp.c b/drivers/gpu/drm/vc4/vc4_txp.c
index 8aa5220885f4..c0122d83b651 100644
--- a/drivers/gpu/drm/vc4/vc4_txp.c
+++ b/drivers/gpu/drm/vc4/vc4_txp.c
@@ -382,7 +382,6 @@ static const struct drm_crtc_funcs vc4_txp_crtc_funcs = {
.reset = vc4_crtc_reset,
.atomic_duplicate_state = vc4_crtc_duplicate_state,
.atomic_destroy_state = vc4_crtc_destroy_state,
- .gamma_set = drm_atomic_helper_legacy_gamma_set,
.enable_vblank = vc4_txp_enable_vblank,
.disable_vblank = vc4_txp_disable_vblank,
};
@@ -395,7 +394,7 @@ static int vc4_txp_atomic_check(struct drm_crtc *crtc,
struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc_state);
int ret;
- ret = vc4_hvs_atomic_check(crtc, crtc_state);
+ ret = vc4_hvs_atomic_check(crtc, state);
if (ret)
return ret;
@@ -408,23 +407,19 @@ static int vc4_txp_atomic_check(struct drm_crtc *crtc,
static void vc4_txp_atomic_enable(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
- struct drm_crtc_state *old_state = drm_atomic_get_old_crtc_state(state,
- crtc);
drm_crtc_vblank_on(crtc);
- vc4_hvs_atomic_enable(crtc, old_state);
+ vc4_hvs_atomic_enable(crtc, state);
}
static void vc4_txp_atomic_disable(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
- struct drm_crtc_state *old_state = drm_atomic_get_old_crtc_state(state,
- crtc);
struct drm_device *dev = crtc->dev;
/* Disable vblank irq handling before crtc is disabled. */
drm_crtc_vblank_off(crtc);
- vc4_hvs_atomic_disable(crtc, old_state);
+ vc4_hvs_atomic_disable(crtc, state);
/*
* Make sure we issue a vblank event after disabling the CRTC if
diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c
index f8635ccaf9a1..a0e75f1d5d01 100644
--- a/drivers/gpu/drm/vgem/vgem_drv.c
+++ b/drivers/gpu/drm/vgem/vgem_drv.c
@@ -356,8 +356,7 @@ static struct drm_gem_object *vgem_prime_import_sg_table(struct drm_device *dev,
}
obj->pages_pin_count++; /* perma-pinned */
- drm_prime_sg_to_page_addr_arrays(obj->table, obj->pages, NULL,
- npages);
+ drm_prime_sg_to_page_array(obj->table, obj->pages, npages);
return &obj->base;
}
diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c
index a3e0fb5b8671..faeae5d881fb 100644
--- a/drivers/gpu/drm/via/via_irq.c
+++ b/drivers/gpu/drm/via/via_irq.c
@@ -308,7 +308,7 @@ int via_driver_irq_postinstall(struct drm_device *dev)
drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
u32 status;
- DRM_DEBUG("via_driver_irq_postinstall\n");
+ DRM_DEBUG("fun: %s\n", __func__);
if (!dev_priv)
return -EINVAL;
diff --git a/drivers/gpu/drm/virtio/Kconfig b/drivers/gpu/drm/virtio/Kconfig
index b925b8b1da16..51ec7c3240c9 100644
--- a/drivers/gpu/drm/virtio/Kconfig
+++ b/drivers/gpu/drm/virtio/Kconfig
@@ -1,7 +1,8 @@
# SPDX-License-Identifier: GPL-2.0-only
config DRM_VIRTIO_GPU
tristate "Virtio GPU driver"
- depends on DRM && VIRTIO && VIRTIO_MENU && MMU
+ depends on DRM && VIRTIO_MENU && MMU
+ select VIRTIO
select DRM_KMS_HELPER
select DRM_GEM_SHMEM_HELPER
select VIRTIO_DMA_SHARED_BUFFER
diff --git a/drivers/gpu/drm/virtio/virtgpu_debugfs.c b/drivers/gpu/drm/virtio/virtgpu_debugfs.c
index 5fefc88d47e4..c2b20e0ee030 100644
--- a/drivers/gpu/drm/virtio/virtgpu_debugfs.c
+++ b/drivers/gpu/drm/virtio/virtgpu_debugfs.c
@@ -28,14 +28,13 @@
#include "virtgpu_drv.h"
-static void virtio_add_bool(struct seq_file *m, const char *name,
- bool value)
+static void virtio_gpu_add_bool(struct seq_file *m, const char *name,
+ bool value)
{
seq_printf(m, "%-16s : %s\n", name, value ? "yes" : "no");
}
-static void virtio_add_int(struct seq_file *m, const char *name,
- int value)
+static void virtio_gpu_add_int(struct seq_file *m, const char *name, int value)
{
seq_printf(m, "%-16s : %d\n", name, value);
}
@@ -45,13 +44,16 @@ static int virtio_gpu_features(struct seq_file *m, void *data)
struct drm_info_node *node = (struct drm_info_node *)m->private;
struct virtio_gpu_device *vgdev = node->minor->dev->dev_private;
- virtio_add_bool(m, "virgl", vgdev->has_virgl_3d);
- virtio_add_bool(m, "edid", vgdev->has_edid);
- virtio_add_bool(m, "indirect", vgdev->has_indirect);
- virtio_add_bool(m, "resource uuid", vgdev->has_resource_assign_uuid);
- virtio_add_bool(m, "blob resources", vgdev->has_resource_blob);
- virtio_add_int(m, "cap sets", vgdev->num_capsets);
- virtio_add_int(m, "scanouts", vgdev->num_scanouts);
+ virtio_gpu_add_bool(m, "virgl", vgdev->has_virgl_3d);
+ virtio_gpu_add_bool(m, "edid", vgdev->has_edid);
+ virtio_gpu_add_bool(m, "indirect", vgdev->has_indirect);
+
+ virtio_gpu_add_bool(m, "resource uuid",
+ vgdev->has_resource_assign_uuid);
+
+ virtio_gpu_add_bool(m, "blob resources", vgdev->has_resource_blob);
+ virtio_gpu_add_int(m, "cap sets", vgdev->num_capsets);
+ virtio_gpu_add_int(m, "scanouts", vgdev->num_scanouts);
if (vgdev->host_visible_region.len) {
seq_printf(m, "%-16s : 0x%lx +0x%lx\n", "host visible region",
(unsigned long)vgdev->host_visible_region.addr,
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c
index 27f13bd29c13..a21dc3ad6f88 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.c
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.c
@@ -54,7 +54,6 @@ static int virtio_gpu_pci_quirk(struct drm_device *dev, struct virtio_device *vd
DRM_INFO("pci: %s detected at %s\n",
vga ? "virtio-vga" : "virtio-gpu-pci",
pname);
- dev->pdev = pdev;
if (vga)
drm_fb_helper_remove_conflicting_pci_framebuffers(pdev,
"virtiodrmfb");
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h
index 6a232553c99b..d9dbc4f258f3 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.h
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
@@ -136,6 +136,7 @@ struct virtio_gpu_fence_driver {
struct virtio_gpu_fence {
struct dma_fence f;
+ uint64_t fence_id;
struct virtio_gpu_fence_driver *drv;
struct list_head node;
};
diff --git a/drivers/gpu/drm/virtio/virtgpu_fence.c b/drivers/gpu/drm/virtio/virtgpu_fence.c
index 728ca36f6327..d28e25e8409b 100644
--- a/drivers/gpu/drm/virtio/virtgpu_fence.c
+++ b/drivers/gpu/drm/virtio/virtgpu_fence.c
@@ -27,51 +27,48 @@
#include "virtgpu_drv.h"
-#define to_virtio_fence(x) \
+#define to_virtio_gpu_fence(x) \
container_of(x, struct virtio_gpu_fence, f)
-static const char *virtio_get_driver_name(struct dma_fence *f)
+static const char *virtio_gpu_get_driver_name(struct dma_fence *f)
{
return "virtio_gpu";
}
-static const char *virtio_get_timeline_name(struct dma_fence *f)
+static const char *virtio_gpu_get_timeline_name(struct dma_fence *f)
{
return "controlq";
}
-static bool virtio_fence_signaled(struct dma_fence *f)
+static bool virtio_gpu_fence_signaled(struct dma_fence *f)
{
- struct virtio_gpu_fence *fence = to_virtio_fence(f);
-
- if (WARN_ON_ONCE(fence->f.seqno == 0))
- /* leaked fence outside driver before completing
- * initialization with virtio_gpu_fence_emit */
- return false;
- if (atomic64_read(&fence->drv->last_fence_id) >= fence->f.seqno)
- return true;
+ /* leaked fence outside driver before completing
+ * initialization with virtio_gpu_fence_emit.
+ */
+ WARN_ON_ONCE(f->seqno == 0);
return false;
}
-static void virtio_fence_value_str(struct dma_fence *f, char *str, int size)
+static void virtio_gpu_fence_value_str(struct dma_fence *f, char *str, int size)
{
- snprintf(str, size, "%llu", f->seqno);
+ snprintf(str, size, "[%llu, %llu]", f->context, f->seqno);
}
-static void virtio_timeline_value_str(struct dma_fence *f, char *str, int size)
+static void virtio_gpu_timeline_value_str(struct dma_fence *f, char *str,
+ int size)
{
- struct virtio_gpu_fence *fence = to_virtio_fence(f);
+ struct virtio_gpu_fence *fence = to_virtio_gpu_fence(f);
snprintf(str, size, "%llu",
(u64)atomic64_read(&fence->drv->last_fence_id));
}
-static const struct dma_fence_ops virtio_fence_ops = {
- .get_driver_name = virtio_get_driver_name,
- .get_timeline_name = virtio_get_timeline_name,
- .signaled = virtio_fence_signaled,
- .fence_value_str = virtio_fence_value_str,
- .timeline_value_str = virtio_timeline_value_str,
+static const struct dma_fence_ops virtio_gpu_fence_ops = {
+ .get_driver_name = virtio_gpu_get_driver_name,
+ .get_timeline_name = virtio_gpu_get_timeline_name,
+ .signaled = virtio_gpu_fence_signaled,
+ .fence_value_str = virtio_gpu_fence_value_str,
+ .timeline_value_str = virtio_gpu_timeline_value_str,
};
struct virtio_gpu_fence *virtio_gpu_fence_alloc(struct virtio_gpu_device *vgdev)
@@ -88,7 +85,8 @@ struct virtio_gpu_fence *virtio_gpu_fence_alloc(struct virtio_gpu_device *vgdev)
* unknown yet. The fence must not be used outside of the driver
* until virtio_gpu_fence_emit is called.
*/
- dma_fence_init(&fence->f, &virtio_fence_ops, &drv->lock, drv->context, 0);
+ dma_fence_init(&fence->f, &virtio_gpu_fence_ops, &drv->lock, drv->context,
+ 0);
return fence;
}
@@ -101,7 +99,7 @@ void virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev,
unsigned long irq_flags;
spin_lock_irqsave(&drv->lock, irq_flags);
- fence->f.seqno = ++drv->current_fence_id;
+ fence->fence_id = fence->f.seqno = ++drv->current_fence_id;
dma_fence_get(&fence->f);
list_add_tail(&fence->node, &drv->fences);
spin_unlock_irqrestore(&drv->lock, irq_flags);
@@ -109,24 +107,45 @@ void virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev,
trace_dma_fence_emit(&fence->f);
cmd_hdr->flags |= cpu_to_le32(VIRTIO_GPU_FLAG_FENCE);
- cmd_hdr->fence_id = cpu_to_le64(fence->f.seqno);
+ cmd_hdr->fence_id = cpu_to_le64(fence->fence_id);
}
void virtio_gpu_fence_event_process(struct virtio_gpu_device *vgdev,
u64 fence_id)
{
struct virtio_gpu_fence_driver *drv = &vgdev->fence_drv;
- struct virtio_gpu_fence *fence, *tmp;
+ struct virtio_gpu_fence *signaled, *curr, *tmp;
unsigned long irq_flags;
spin_lock_irqsave(&drv->lock, irq_flags);
atomic64_set(&vgdev->fence_drv.last_fence_id, fence_id);
- list_for_each_entry_safe(fence, tmp, &drv->fences, node) {
- if (fence_id < fence->f.seqno)
+ list_for_each_entry_safe(curr, tmp, &drv->fences, node) {
+ if (fence_id != curr->fence_id)
continue;
- dma_fence_signal_locked(&fence->f);
- list_del(&fence->node);
- dma_fence_put(&fence->f);
+
+ signaled = curr;
+
+ /*
+ * Signal any fences with a strictly smaller sequence number
+ * than the current signaled fence.
+ */
+ list_for_each_entry_safe(curr, tmp, &drv->fences, node) {
+ /* dma-fence contexts must match */
+ if (signaled->f.context != curr->f.context)
+ continue;
+
+ if (!dma_fence_is_later(&signaled->f, &curr->f))
+ continue;
+
+ dma_fence_signal_locked(&curr->f);
+ list_del(&curr->node);
+ dma_fence_put(&curr->f);
+ }
+
+ dma_fence_signal_locked(&signaled->f);
+ list_del(&signaled->node);
+ dma_fence_put(&signaled->f);
+ break;
}
spin_unlock_irqrestore(&drv->lock, irq_flags);
}
diff --git a/drivers/gpu/drm/virtio/virtgpu_gem.c b/drivers/gpu/drm/virtio/virtgpu_gem.c
index c30c75ee83fc..8502400b2f9c 100644
--- a/drivers/gpu/drm/virtio/virtgpu_gem.c
+++ b/drivers/gpu/drm/virtio/virtgpu_gem.c
@@ -39,9 +39,6 @@ static int virtio_gpu_gem_create(struct drm_file *file,
int ret;
u32 handle;
- if (vgdev->has_virgl_3d)
- virtio_gpu_create_context(dev, file);
-
ret = virtio_gpu_object_create(vgdev, params, &obj, NULL);
if (ret < 0)
return ret;
@@ -119,6 +116,11 @@ int virtio_gpu_gem_object_open(struct drm_gem_object *obj,
if (!vgdev->has_virgl_3d)
goto out_notify;
+ /* the context might still be missing when the first ioctl is
+ * DRM_IOCTL_MODE_CREATE_DUMB or DRM_IOCTL_PRIME_FD_TO_HANDLE
+ */
+ virtio_gpu_create_context(obj->dev, file);
+
objs = virtio_gpu_array_alloc(1);
if (!objs)
return -ENOMEM;
diff --git a/drivers/gpu/drm/virtio/virtgpu_vram.c b/drivers/gpu/drm/virtio/virtgpu_vram.c
index 23c21bc4d01e..5cc34e7330fa 100644
--- a/drivers/gpu/drm/virtio/virtgpu_vram.c
+++ b/drivers/gpu/drm/virtio/virtgpu_vram.c
@@ -69,6 +69,7 @@ static const struct drm_gem_object_funcs virtio_gpu_vram_funcs = {
.close = virtio_gpu_gem_object_close,
.free = virtio_gpu_vram_free,
.mmap = virtio_gpu_vram_mmap,
+ .export = virtgpu_gem_prime_export,
};
bool virtio_gpu_is_vram(struct virtio_gpu_object *bo)
@@ -134,6 +135,8 @@ int virtio_gpu_vram_create(struct virtio_gpu_device *vgdev,
obj = &vram->base.base.base;
obj->funcs = &virtio_gpu_vram_funcs;
+
+ params->size = PAGE_ALIGN(params->size);
drm_gem_private_object_init(vgdev->ddev, obj, params->size);
/* Create fake offset */
diff --git a/drivers/gpu/drm/vkms/vkms_drv.c b/drivers/gpu/drm/vkms/vkms_drv.c
index d4d39227f2ed..2173b82606f6 100644
--- a/drivers/gpu/drm/vkms/vkms_drv.c
+++ b/drivers/gpu/drm/vkms/vkms_drv.c
@@ -34,12 +34,16 @@
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 0
-static struct vkms_device *vkms_device;
+static struct vkms_config *default_config;
-bool enable_cursor = true;
+static bool enable_cursor = true;
module_param_named(enable_cursor, enable_cursor, bool, 0444);
MODULE_PARM_DESC(enable_cursor, "Enable/Disable cursor support");
+static bool enable_writeback = true;
+module_param_named(enable_writeback, enable_writeback, bool, 0444);
+MODULE_PARM_DESC(enable_writeback, "Enable/Disable writeback connector support");
+
DEFINE_DRM_GEM_FOPS(vkms_driver_fops);
static void vkms_release(struct drm_device *dev)
@@ -113,16 +117,20 @@ static int vkms_modeset_init(struct vkms_device *vkmsdev)
dev->mode_config.max_height = YRES_MAX;
dev->mode_config.cursor_width = 512;
dev->mode_config.cursor_height = 512;
- dev->mode_config.preferred_depth = 32;
+ /* FIXME: There's a confusion between bpp and depth between this and
+ * fbdev helpers. We have to go with 0, meaning "pick the default",
+ * which ix XRGB8888 in all cases. */
+ dev->mode_config.preferred_depth = 0;
dev->mode_config.helper_private = &vkms_mode_config_helpers;
return vkms_output_init(vkmsdev, 0);
}
-static int __init vkms_init(void)
+static int vkms_create(struct vkms_config *config)
{
int ret;
struct platform_device *pdev;
+ struct vkms_device *vkms_device;
pdev = platform_device_register_simple(DRIVER_NAME, -1, NULL, 0);
if (IS_ERR(pdev))
@@ -140,6 +148,8 @@ static int __init vkms_init(void)
goto out_devres;
}
vkms_device->platform = pdev;
+ vkms_device->config = config;
+ config->dev = vkms_device;
ret = dma_coerce_mask_and_coherent(vkms_device->drm.dev,
DMA_BIT_MASK(64));
@@ -176,21 +186,47 @@ out_unregister:
return ret;
}
-static void __exit vkms_exit(void)
+static int __init vkms_init(void)
+{
+ struct vkms_config *config;
+
+ config = kmalloc(sizeof(*config), GFP_KERNEL);
+ if (!config)
+ return -ENOMEM;
+
+ default_config = config;
+
+ config->cursor = enable_cursor;
+ config->writeback = enable_writeback;
+
+ return vkms_create(config);
+}
+
+static void vkms_destroy(struct vkms_config *config)
{
struct platform_device *pdev;
- if (!vkms_device) {
+ if (!config->dev) {
DRM_INFO("vkms_device is NULL.\n");
return;
}
- pdev = vkms_device->platform;
+ pdev = config->dev->platform;
- drm_dev_unregister(&vkms_device->drm);
- drm_atomic_helper_shutdown(&vkms_device->drm);
+ drm_dev_unregister(&config->dev->drm);
+ drm_atomic_helper_shutdown(&config->dev->drm);
devres_release_group(&pdev->dev, NULL);
platform_device_unregister(pdev);
+
+ config->dev = NULL;
+}
+
+static void __exit vkms_exit(void)
+{
+ if (default_config->dev)
+ vkms_destroy(default_config);
+
+ kfree(default_config);
}
module_init(vkms_init);
diff --git a/drivers/gpu/drm/vkms/vkms_drv.h b/drivers/gpu/drm/vkms/vkms_drv.h
index 5ed91ff08cb3..35540c7c4416 100644
--- a/drivers/gpu/drm/vkms/vkms_drv.h
+++ b/drivers/gpu/drm/vkms/vkms_drv.h
@@ -19,8 +19,6 @@
#define XRES_MAX 8192
#define YRES_MAX 8192
-extern bool enable_cursor;
-
struct vkms_composer {
struct drm_framebuffer fb;
struct drm_rect src, dst;
@@ -82,10 +80,20 @@ struct vkms_output {
spinlock_t composer_lock;
};
+struct vkms_device;
+
+struct vkms_config {
+ bool writeback;
+ bool cursor;
+ /* only set when instantiated */
+ struct vkms_device *dev;
+};
+
struct vkms_device {
struct drm_device drm;
struct platform_device *platform;
struct vkms_output output;
+ const struct vkms_config *config;
};
#define drm_crtc_to_vkms_output(target) \
diff --git a/drivers/gpu/drm/vkms/vkms_output.c b/drivers/gpu/drm/vkms/vkms_output.c
index 4a1848b0318f..f5f6f15c362c 100644
--- a/drivers/gpu/drm/vkms/vkms_output.c
+++ b/drivers/gpu/drm/vkms/vkms_output.c
@@ -41,12 +41,13 @@ int vkms_output_init(struct vkms_device *vkmsdev, int index)
struct drm_crtc *crtc = &output->crtc;
struct drm_plane *primary, *cursor = NULL;
int ret;
+ int writeback;
primary = vkms_plane_init(vkmsdev, DRM_PLANE_TYPE_PRIMARY, index);
if (IS_ERR(primary))
return PTR_ERR(primary);
- if (enable_cursor) {
+ if (vkmsdev->config->cursor) {
cursor = vkms_plane_init(vkmsdev, DRM_PLANE_TYPE_CURSOR, index);
if (IS_ERR(cursor)) {
ret = PTR_ERR(cursor);
@@ -80,9 +81,11 @@ int vkms_output_init(struct vkms_device *vkmsdev, int index)
goto err_attach;
}
- ret = vkms_enable_writeback_connector(vkmsdev);
- if (ret)
- DRM_ERROR("Failed to init writeback connector\n");
+ if (vkmsdev->config->writeback) {
+ writeback = vkms_enable_writeback_connector(vkmsdev);
+ if (writeback)
+ DRM_ERROR("Failed to init writeback connector\n");
+ }
drm_mode_config_reset(dev);
@@ -98,7 +101,7 @@ err_connector:
drm_crtc_cleanup(crtc);
err_crtc:
- if (enable_cursor)
+ if (vkmsdev->config->cursor)
drm_plane_cleanup(cursor);
err_cursor:
diff --git a/drivers/gpu/drm/vmwgfx/Makefile b/drivers/gpu/drm/vmwgfx/Makefile
index 31f85f09f1fc..cc4cdca7176e 100644
--- a/drivers/gpu/drm/vmwgfx/Makefile
+++ b/drivers/gpu/drm/vmwgfx/Makefile
@@ -1,9 +1,9 @@
# SPDX-License-Identifier: GPL-2.0
vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \
vmwgfx_fb.o vmwgfx_ioctl.o vmwgfx_resource.o vmwgfx_ttm_buffer.o \
- vmwgfx_fifo.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \
- vmwgfx_overlay.o vmwgfx_marker.o vmwgfx_gmrid_manager.o \
- vmwgfx_fence.o vmwgfx_bo.o vmwgfx_scrn.o vmwgfx_context.o \
+ vmwgfx_cmd.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \
+ vmwgfx_overlay.o vmwgfx_gmrid_manager.o vmwgfx_fence.o \
+ vmwgfx_bo.o vmwgfx_scrn.o vmwgfx_context.o \
vmwgfx_surface.o vmwgfx_prime.o vmwgfx_mob.o vmwgfx_shader.o \
vmwgfx_cmdbuf_res.o vmwgfx_cmdbuf.o vmwgfx_stdu.o \
vmwgfx_cotable.o vmwgfx_so.o vmwgfx_binding.o vmwgfx_msg.o \
diff --git a/drivers/gpu/drm/vmwgfx/ttm_object.c b/drivers/gpu/drm/vmwgfx/ttm_object.c
index 16077785ad47..0fe869d0fad1 100644
--- a/drivers/gpu/drm/vmwgfx/ttm_object.c
+++ b/drivers/gpu/drm/vmwgfx/ttm_object.c
@@ -59,7 +59,6 @@
#define pr_fmt(fmt) "[TTM] " fmt
-#include <drm/ttm/ttm_module.h>
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c b/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c
index f41550797970..180f6dbc9460 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c
@@ -555,7 +555,7 @@ static int vmw_binding_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind)
SVGA3dCmdSetShader body;
} *cmd;
- cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL))
return -ENOMEM;
@@ -564,7 +564,7 @@ static int vmw_binding_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind)
cmd->body.cid = bi->ctx->id;
cmd->body.type = binding->shader_slot + SVGA3D_SHADERTYPE_MIN;
cmd->body.shid = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ vmw_cmd_commit(dev_priv, sizeof(*cmd));
return 0;
}
@@ -587,7 +587,7 @@ static int vmw_binding_scrub_render_target(struct vmw_ctx_bindinfo *bi,
SVGA3dCmdSetRenderTarget body;
} *cmd;
- cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL))
return -ENOMEM;
@@ -598,7 +598,7 @@ static int vmw_binding_scrub_render_target(struct vmw_ctx_bindinfo *bi,
cmd->body.target.sid = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
cmd->body.target.face = 0;
cmd->body.target.mipmap = 0;
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ vmw_cmd_commit(dev_priv, sizeof(*cmd));
return 0;
}
@@ -626,7 +626,7 @@ static int vmw_binding_scrub_texture(struct vmw_ctx_bindinfo *bi,
} body;
} *cmd;
- cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL))
return -ENOMEM;
@@ -636,7 +636,7 @@ static int vmw_binding_scrub_texture(struct vmw_ctx_bindinfo *bi,
cmd->body.s1.stage = binding->texture_stage;
cmd->body.s1.name = SVGA3D_TS_BIND_TEXTURE;
cmd->body.s1.value = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ vmw_cmd_commit(dev_priv, sizeof(*cmd));
return 0;
}
@@ -657,7 +657,7 @@ static int vmw_binding_scrub_dx_shader(struct vmw_ctx_bindinfo *bi, bool rebind)
SVGA3dCmdDXSetShader body;
} *cmd;
- cmd = VMW_FIFO_RESERVE_DX(dev_priv, sizeof(*cmd), bi->ctx->id);
+ cmd = VMW_CMD_CTX_RESERVE(dev_priv, sizeof(*cmd), bi->ctx->id);
if (unlikely(cmd == NULL))
return -ENOMEM;
@@ -665,7 +665,7 @@ static int vmw_binding_scrub_dx_shader(struct vmw_ctx_bindinfo *bi, bool rebind)
cmd->header.size = sizeof(cmd->body);
cmd->body.type = binding->shader_slot + SVGA3D_SHADERTYPE_MIN;
cmd->body.shaderId = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ vmw_cmd_commit(dev_priv, sizeof(*cmd));
return 0;
}
@@ -686,7 +686,7 @@ static int vmw_binding_scrub_cb(struct vmw_ctx_bindinfo *bi, bool rebind)
SVGA3dCmdDXSetSingleConstantBuffer body;
} *cmd;
- cmd = VMW_FIFO_RESERVE_DX(dev_priv, sizeof(*cmd), bi->ctx->id);
+ cmd = VMW_CMD_CTX_RESERVE(dev_priv, sizeof(*cmd), bi->ctx->id);
if (unlikely(cmd == NULL))
return -ENOMEM;
@@ -703,7 +703,7 @@ static int vmw_binding_scrub_cb(struct vmw_ctx_bindinfo *bi, bool rebind)
cmd->body.sizeInBytes = 0;
cmd->body.sid = SVGA3D_INVALID_ID;
}
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ vmw_cmd_commit(dev_priv, sizeof(*cmd));
return 0;
}
@@ -810,7 +810,7 @@ static int vmw_emit_set_sr(struct vmw_ctx_binding_state *cbs,
view_id_size = cbs->bind_cmd_count*sizeof(uint32);
cmd_size = sizeof(*cmd) + view_id_size;
- cmd = VMW_FIFO_RESERVE_DX(ctx->dev_priv, cmd_size, ctx->id);
+ cmd = VMW_CMD_CTX_RESERVE(ctx->dev_priv, cmd_size, ctx->id);
if (unlikely(cmd == NULL))
return -ENOMEM;
@@ -821,7 +821,7 @@ static int vmw_emit_set_sr(struct vmw_ctx_binding_state *cbs,
memcpy(&cmd[1], cbs->bind_cmd_buffer, view_id_size);
- vmw_fifo_commit(ctx->dev_priv, cmd_size);
+ vmw_cmd_commit(ctx->dev_priv, cmd_size);
bitmap_clear(cbs->per_shader[shader_slot].dirty_sr,
cbs->bind_first_slot, cbs->bind_cmd_count);
@@ -846,7 +846,7 @@ static int vmw_emit_set_rt(struct vmw_ctx_binding_state *cbs)
vmw_collect_view_ids(cbs, loc, SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS);
view_id_size = cbs->bind_cmd_count*sizeof(uint32);
cmd_size = sizeof(*cmd) + view_id_size;
- cmd = VMW_FIFO_RESERVE_DX(ctx->dev_priv, cmd_size, ctx->id);
+ cmd = VMW_CMD_CTX_RESERVE(ctx->dev_priv, cmd_size, ctx->id);
if (unlikely(cmd == NULL))
return -ENOMEM;
@@ -860,7 +860,7 @@ static int vmw_emit_set_rt(struct vmw_ctx_binding_state *cbs)
memcpy(&cmd[1], cbs->bind_cmd_buffer, view_id_size);
- vmw_fifo_commit(ctx->dev_priv, cmd_size);
+ vmw_cmd_commit(ctx->dev_priv, cmd_size);
return 0;
@@ -930,7 +930,7 @@ static int vmw_emit_set_so_target(struct vmw_ctx_binding_state *cbs)
so_target_size = cbs->bind_cmd_count*sizeof(SVGA3dSoTarget);
cmd_size = sizeof(*cmd) + so_target_size;
- cmd = VMW_FIFO_RESERVE_DX(ctx->dev_priv, cmd_size, ctx->id);
+ cmd = VMW_CMD_CTX_RESERVE(ctx->dev_priv, cmd_size, ctx->id);
if (unlikely(cmd == NULL))
return -ENOMEM;
@@ -938,7 +938,7 @@ static int vmw_emit_set_so_target(struct vmw_ctx_binding_state *cbs)
cmd->header.size = sizeof(cmd->body) + so_target_size;
memcpy(&cmd[1], cbs->bind_cmd_buffer, so_target_size);
- vmw_fifo_commit(ctx->dev_priv, cmd_size);
+ vmw_cmd_commit(ctx->dev_priv, cmd_size);
return 0;
@@ -1044,7 +1044,7 @@ static int vmw_emit_set_vb(struct vmw_ctx_binding_state *cbs)
set_vb_size = cbs->bind_cmd_count*sizeof(SVGA3dVertexBuffer);
cmd_size = sizeof(*cmd) + set_vb_size;
- cmd = VMW_FIFO_RESERVE_DX(ctx->dev_priv, cmd_size, ctx->id);
+ cmd = VMW_CMD_CTX_RESERVE(ctx->dev_priv, cmd_size, ctx->id);
if (unlikely(cmd == NULL))
return -ENOMEM;
@@ -1054,7 +1054,7 @@ static int vmw_emit_set_vb(struct vmw_ctx_binding_state *cbs)
memcpy(&cmd[1], cbs->bind_cmd_buffer, set_vb_size);
- vmw_fifo_commit(ctx->dev_priv, cmd_size);
+ vmw_cmd_commit(ctx->dev_priv, cmd_size);
bitmap_clear(cbs->dirty_vb,
cbs->bind_first_slot, cbs->bind_cmd_count);
@@ -1074,7 +1074,7 @@ static int vmw_emit_set_uav(struct vmw_ctx_binding_state *cbs)
vmw_collect_view_ids(cbs, loc, SVGA3D_MAX_UAVIEWS);
view_id_size = cbs->bind_cmd_count*sizeof(uint32);
cmd_size = sizeof(*cmd) + view_id_size;
- cmd = VMW_FIFO_RESERVE_DX(ctx->dev_priv, cmd_size, ctx->id);
+ cmd = VMW_CMD_CTX_RESERVE(ctx->dev_priv, cmd_size, ctx->id);
if (!cmd)
return -ENOMEM;
@@ -1086,7 +1086,7 @@ static int vmw_emit_set_uav(struct vmw_ctx_binding_state *cbs)
memcpy(&cmd[1], cbs->bind_cmd_buffer, view_id_size);
- vmw_fifo_commit(ctx->dev_priv, cmd_size);
+ vmw_cmd_commit(ctx->dev_priv, cmd_size);
return 0;
}
@@ -1104,7 +1104,7 @@ static int vmw_emit_set_cs_uav(struct vmw_ctx_binding_state *cbs)
vmw_collect_view_ids(cbs, loc, SVGA3D_MAX_UAVIEWS);
view_id_size = cbs->bind_cmd_count*sizeof(uint32);
cmd_size = sizeof(*cmd) + view_id_size;
- cmd = VMW_FIFO_RESERVE_DX(ctx->dev_priv, cmd_size, ctx->id);
+ cmd = VMW_CMD_CTX_RESERVE(ctx->dev_priv, cmd_size, ctx->id);
if (!cmd)
return -ENOMEM;
@@ -1116,7 +1116,7 @@ static int vmw_emit_set_cs_uav(struct vmw_ctx_binding_state *cbs)
memcpy(&cmd[1], cbs->bind_cmd_buffer, view_id_size);
- vmw_fifo_commit(ctx->dev_priv, cmd_size);
+ vmw_cmd_commit(ctx->dev_priv, cmd_size);
return 0;
}
@@ -1263,7 +1263,7 @@ static int vmw_binding_scrub_ib(struct vmw_ctx_bindinfo *bi, bool rebind)
SVGA3dCmdDXSetIndexBuffer body;
} *cmd;
- cmd = VMW_FIFO_RESERVE_DX(dev_priv, sizeof(*cmd), bi->ctx->id);
+ cmd = VMW_CMD_CTX_RESERVE(dev_priv, sizeof(*cmd), bi->ctx->id);
if (unlikely(cmd == NULL))
return -ENOMEM;
@@ -1279,7 +1279,7 @@ static int vmw_binding_scrub_ib(struct vmw_ctx_bindinfo *bi, bool rebind)
cmd->body.offset = 0;
}
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ vmw_cmd_commit(dev_priv, sizeof(*cmd));
return 0;
}
@@ -1315,14 +1315,14 @@ static int vmw_binding_scrub_so(struct vmw_ctx_bindinfo *bi, bool rebind)
SVGA3dCmdDXSetStreamOutput body;
} *cmd;
- cmd = VMW_FIFO_RESERVE_DX(dev_priv, sizeof(*cmd), bi->ctx->id);
+ cmd = VMW_CMD_CTX_RESERVE(dev_priv, sizeof(*cmd), bi->ctx->id);
if (!cmd)
return -ENOMEM;
cmd->header.id = SVGA_3D_CMD_DX_SET_STREAMOUTPUT;
cmd->header.size = sizeof(cmd->body);
cmd->body.soid = rebind ? bi->res->id : SVGA3D_INVALID_ID;
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ vmw_cmd_commit(dev_priv, sizeof(*cmd));
return 0;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c b/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
index f21881e087db..9f2779ddcf08 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
@@ -482,8 +482,8 @@ int vmw_bo_cpu_blit(struct ttm_buffer_object *dst,
d.src_addr = NULL;
d.dst_pages = dst->ttm->pages;
d.src_pages = src->ttm->pages;
- d.dst_num_pages = dst->num_pages;
- d.src_num_pages = src->num_pages;
+ d.dst_num_pages = dst->mem.num_pages;
+ d.src_num_pages = src->mem.num_pages;
d.dst_prot = ttm_io_prot(dst, &dst->mem, PAGE_KERNEL);
d.src_prot = ttm_io_prot(src, &src->mem, PAGE_KERNEL);
d.diff = diff;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
index 263d76ae43f0..63dbc44eebe0 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
@@ -223,7 +223,7 @@ int vmw_bo_pin_in_start_of_vram(struct vmw_private *dev_priv,
uint32_t new_flags;
place = vmw_vram_placement.placement[0];
- place.lpfn = bo->num_pages;
+ place.lpfn = bo->mem.num_pages;
placement.num_placement = 1;
placement.placement = &place;
placement.num_busy_placement = 1;
@@ -244,7 +244,7 @@ int vmw_bo_pin_in_start_of_vram(struct vmw_private *dev_priv,
* that situation.
*/
if (bo->mem.mem_type == TTM_PL_VRAM &&
- bo->mem.start < bo->num_pages &&
+ bo->mem.start < bo->mem.num_pages &&
bo->mem.start > 0 &&
buf->base.pin_count == 0) {
ctx.interruptible = false;
@@ -391,7 +391,7 @@ void *vmw_bo_map_and_cache(struct vmw_buffer_object *vbo)
if (virtual)
return virtual;
- ret = ttm_bo_kmap(bo, 0, bo->num_pages, &vbo->map);
+ ret = ttm_bo_kmap(bo, 0, bo->mem.num_pages, &vbo->map);
if (ret)
DRM_ERROR("Buffer object map failed: %d.\n", ret);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c
index a95156fc5db7..7400d617ae3c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
+ * Copyright 2009-2020 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -36,9 +36,8 @@ struct vmw_temp_set_context {
SVGA3dCmdDXTempSetContext body;
};
-bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
+bool vmw_supports_3d(struct vmw_private *dev_priv)
{
- u32 *fifo_mem = dev_priv->mmio_virt;
uint32_t fifo_min, hwversion;
const struct vmw_fifo_state *fifo = &dev_priv->fifo;
@@ -62,15 +61,15 @@ bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO))
return false;
- fifo_min = vmw_mmio_read(fifo_mem + SVGA_FIFO_MIN);
+ fifo_min = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MIN);
if (fifo_min <= SVGA_FIFO_3D_HWVERSION * sizeof(unsigned int))
return false;
- hwversion = vmw_mmio_read(fifo_mem +
- ((fifo->capabilities &
- SVGA_FIFO_CAP_3D_HWVERSION_REVISED) ?
- SVGA_FIFO_3D_HWVERSION_REVISED :
- SVGA_FIFO_3D_HWVERSION));
+ hwversion = vmw_fifo_mem_read(dev_priv,
+ ((fifo->capabilities &
+ SVGA_FIFO_CAP_3D_HWVERSION_REVISED) ?
+ SVGA_FIFO_3D_HWVERSION_REVISED :
+ SVGA_FIFO_3D_HWVERSION));
if (hwversion == 0)
return false;
@@ -87,13 +86,12 @@ bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv)
{
- u32 *fifo_mem = dev_priv->mmio_virt;
uint32_t caps;
if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO))
return false;
- caps = vmw_mmio_read(fifo_mem + SVGA_FIFO_CAPABILITIES);
+ caps = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_CAPABILITIES);
if (caps & SVGA_FIFO_CAP_PITCHLOCK)
return true;
@@ -102,7 +100,6 @@ bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv)
int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
{
- u32 *fifo_mem = dev_priv->mmio_virt;
uint32_t max;
uint32_t min;
@@ -129,6 +126,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
vmw_write(dev_priv, SVGA_REG_ENABLE, SVGA_REG_ENABLE_ENABLE |
SVGA_REG_ENABLE_HIDE);
+
vmw_write(dev_priv, SVGA_REG_TRACES, 0);
min = 4;
@@ -139,19 +137,19 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
if (min < PAGE_SIZE)
min = PAGE_SIZE;
- vmw_mmio_write(min, fifo_mem + SVGA_FIFO_MIN);
- vmw_mmio_write(dev_priv->mmio_size, fifo_mem + SVGA_FIFO_MAX);
+ vmw_fifo_mem_write(dev_priv, SVGA_FIFO_MIN, min);
+ vmw_fifo_mem_write(dev_priv, SVGA_FIFO_MAX, dev_priv->fifo_mem_size);
wmb();
- vmw_mmio_write(min, fifo_mem + SVGA_FIFO_NEXT_CMD);
- vmw_mmio_write(min, fifo_mem + SVGA_FIFO_STOP);
- vmw_mmio_write(0, fifo_mem + SVGA_FIFO_BUSY);
+ vmw_fifo_mem_write(dev_priv, SVGA_FIFO_NEXT_CMD, min);
+ vmw_fifo_mem_write(dev_priv, SVGA_FIFO_STOP, min);
+ vmw_fifo_mem_write(dev_priv, SVGA_FIFO_BUSY, 0);
mb();
vmw_write(dev_priv, SVGA_REG_CONFIG_DONE, 1);
- max = vmw_mmio_read(fifo_mem + SVGA_FIFO_MAX);
- min = vmw_mmio_read(fifo_mem + SVGA_FIFO_MIN);
- fifo->capabilities = vmw_mmio_read(fifo_mem + SVGA_FIFO_CAPABILITIES);
+ max = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MAX);
+ min = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MIN);
+ fifo->capabilities = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_CAPABILITIES);
DRM_INFO("Fifo max 0x%08x min 0x%08x cap 0x%08x\n",
(unsigned int) max,
@@ -159,15 +157,14 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
(unsigned int) fifo->capabilities);
atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
- vmw_mmio_write(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
- vmw_marker_queue_init(&fifo->marker_queue);
+ vmw_fifo_mem_write(dev_priv, SVGA_FIFO_FENCE, dev_priv->last_read_seqno);
return 0;
}
void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason)
{
- u32 *fifo_mem = dev_priv->mmio_virt;
+ u32 *fifo_mem = dev_priv->fifo_mem;
if (cmpxchg(fifo_mem + SVGA_FIFO_BUSY, 0, 1) == 0)
vmw_write(dev_priv, SVGA_REG_SYNC, reason);
@@ -175,13 +172,11 @@ void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason)
void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
{
- u32 *fifo_mem = dev_priv->mmio_virt;
-
vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC);
while (vmw_read(dev_priv, SVGA_REG_BUSY) != 0)
;
- dev_priv->last_read_seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE);
+ dev_priv->last_read_seqno = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_FENCE);
vmw_write(dev_priv, SVGA_REG_CONFIG_DONE,
dev_priv->config_done_state);
@@ -190,8 +185,6 @@ void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
vmw_write(dev_priv, SVGA_REG_TRACES,
dev_priv->traces_state);
- vmw_marker_queue_takedown(&fifo->marker_queue);
-
if (likely(fifo->static_buffer != NULL)) {
vfree(fifo->static_buffer);
fifo->static_buffer = NULL;
@@ -205,11 +198,10 @@ void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
static bool vmw_fifo_is_full(struct vmw_private *dev_priv, uint32_t bytes)
{
- u32 *fifo_mem = dev_priv->mmio_virt;
- uint32_t max = vmw_mmio_read(fifo_mem + SVGA_FIFO_MAX);
- uint32_t next_cmd = vmw_mmio_read(fifo_mem + SVGA_FIFO_NEXT_CMD);
- uint32_t min = vmw_mmio_read(fifo_mem + SVGA_FIFO_MIN);
- uint32_t stop = vmw_mmio_read(fifo_mem + SVGA_FIFO_STOP);
+ uint32_t max = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MAX);
+ uint32_t next_cmd = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_NEXT_CMD);
+ uint32_t min = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MIN);
+ uint32_t stop = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_STOP);
return ((max - next_cmd) + (stop - min) <= bytes);
}
@@ -298,7 +290,7 @@ static void *vmw_local_fifo_reserve(struct vmw_private *dev_priv,
uint32_t bytes)
{
struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
- u32 *fifo_mem = dev_priv->mmio_virt;
+ u32 *fifo_mem = dev_priv->fifo_mem;
uint32_t max;
uint32_t min;
uint32_t next_cmd;
@@ -306,9 +298,9 @@ static void *vmw_local_fifo_reserve(struct vmw_private *dev_priv,
int ret;
mutex_lock(&fifo_state->fifo_mutex);
- max = vmw_mmio_read(fifo_mem + SVGA_FIFO_MAX);
- min = vmw_mmio_read(fifo_mem + SVGA_FIFO_MIN);
- next_cmd = vmw_mmio_read(fifo_mem + SVGA_FIFO_NEXT_CMD);
+ max = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MAX);
+ min = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MIN);
+ next_cmd = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_NEXT_CMD);
if (unlikely(bytes >= (max - min)))
goto out_err;
@@ -319,7 +311,7 @@ static void *vmw_local_fifo_reserve(struct vmw_private *dev_priv,
fifo_state->reserved_size = bytes;
while (1) {
- uint32_t stop = vmw_mmio_read(fifo_mem + SVGA_FIFO_STOP);
+ uint32_t stop = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_STOP);
bool need_bounce = false;
bool reserve_in_place = false;
@@ -353,8 +345,9 @@ static void *vmw_local_fifo_reserve(struct vmw_private *dev_priv,
fifo_state->using_bounce_buffer = false;
if (reserveable)
- vmw_mmio_write(bytes, fifo_mem +
- SVGA_FIFO_RESERVED);
+ vmw_fifo_mem_write(dev_priv,
+ SVGA_FIFO_RESERVED,
+ bytes);
return (void __force *) (fifo_mem +
(next_cmd >> 2));
} else {
@@ -381,7 +374,7 @@ out_err:
return NULL;
}
-void *vmw_fifo_reserve_dx(struct vmw_private *dev_priv, uint32_t bytes,
+void *vmw_cmd_ctx_reserve(struct vmw_private *dev_priv, uint32_t bytes,
int ctx_id)
{
void *ret;
@@ -402,10 +395,11 @@ void *vmw_fifo_reserve_dx(struct vmw_private *dev_priv, uint32_t bytes,
}
static void vmw_fifo_res_copy(struct vmw_fifo_state *fifo_state,
- u32 *fifo_mem,
+ struct vmw_private *vmw,
uint32_t next_cmd,
uint32_t max, uint32_t min, uint32_t bytes)
{
+ u32 *fifo_mem = vmw->fifo_mem;
uint32_t chunk_size = max - next_cmd;
uint32_t rest;
uint32_t *buffer = (fifo_state->dynamic_buffer != NULL) ?
@@ -414,7 +408,7 @@ static void vmw_fifo_res_copy(struct vmw_fifo_state *fifo_state,
if (bytes < chunk_size)
chunk_size = bytes;
- vmw_mmio_write(bytes, fifo_mem + SVGA_FIFO_RESERVED);
+ vmw_fifo_mem_write(vmw, SVGA_FIFO_RESERVED, bytes);
mb();
memcpy(fifo_mem + (next_cmd >> 2), buffer, chunk_size);
rest = bytes - chunk_size;
@@ -423,7 +417,7 @@ static void vmw_fifo_res_copy(struct vmw_fifo_state *fifo_state,
}
static void vmw_fifo_slow_copy(struct vmw_fifo_state *fifo_state,
- u32 *fifo_mem,
+ struct vmw_private *vmw,
uint32_t next_cmd,
uint32_t max, uint32_t min, uint32_t bytes)
{
@@ -431,12 +425,12 @@ static void vmw_fifo_slow_copy(struct vmw_fifo_state *fifo_state,
fifo_state->dynamic_buffer : fifo_state->static_buffer;
while (bytes > 0) {
- vmw_mmio_write(*buffer++, fifo_mem + (next_cmd >> 2));
+ vmw_fifo_mem_write(vmw, (next_cmd >> 2), *buffer++);
next_cmd += sizeof(uint32_t);
if (unlikely(next_cmd == max))
next_cmd = min;
mb();
- vmw_mmio_write(next_cmd, fifo_mem + SVGA_FIFO_NEXT_CMD);
+ vmw_fifo_mem_write(vmw, SVGA_FIFO_NEXT_CMD, next_cmd);
mb();
bytes -= sizeof(uint32_t);
}
@@ -445,10 +439,9 @@ static void vmw_fifo_slow_copy(struct vmw_fifo_state *fifo_state,
static void vmw_local_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
{
struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
- u32 *fifo_mem = dev_priv->mmio_virt;
- uint32_t next_cmd = vmw_mmio_read(fifo_mem + SVGA_FIFO_NEXT_CMD);
- uint32_t max = vmw_mmio_read(fifo_mem + SVGA_FIFO_MAX);
- uint32_t min = vmw_mmio_read(fifo_mem + SVGA_FIFO_MIN);
+ uint32_t next_cmd = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_NEXT_CMD);
+ uint32_t max = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MAX);
+ uint32_t min = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MIN);
bool reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE;
if (fifo_state->dx)
@@ -462,10 +455,10 @@ static void vmw_local_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
if (fifo_state->using_bounce_buffer) {
if (reserveable)
- vmw_fifo_res_copy(fifo_state, fifo_mem,
+ vmw_fifo_res_copy(fifo_state, dev_priv,
next_cmd, max, min, bytes);
else
- vmw_fifo_slow_copy(fifo_state, fifo_mem,
+ vmw_fifo_slow_copy(fifo_state, dev_priv,
next_cmd, max, min, bytes);
if (fifo_state->dynamic_buffer) {
@@ -481,18 +474,18 @@ static void vmw_local_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
if (next_cmd >= max)
next_cmd -= max - min;
mb();
- vmw_mmio_write(next_cmd, fifo_mem + SVGA_FIFO_NEXT_CMD);
+ vmw_fifo_mem_write(dev_priv, SVGA_FIFO_NEXT_CMD, next_cmd);
}
if (reserveable)
- vmw_mmio_write(0, fifo_mem + SVGA_FIFO_RESERVED);
+ vmw_fifo_mem_write(dev_priv, SVGA_FIFO_RESERVED, 0);
mb();
up_write(&fifo_state->rwsem);
vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
mutex_unlock(&fifo_state->fifo_mutex);
}
-void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
+void vmw_cmd_commit(struct vmw_private *dev_priv, uint32_t bytes)
{
if (dev_priv->cman)
vmw_cmdbuf_commit(dev_priv->cman, bytes, NULL, false);
@@ -507,7 +500,7 @@ void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
* @dev_priv: Pointer to device private structure.
* @bytes: Number of bytes to commit.
*/
-void vmw_fifo_commit_flush(struct vmw_private *dev_priv, uint32_t bytes)
+void vmw_cmd_commit_flush(struct vmw_private *dev_priv, uint32_t bytes)
{
if (dev_priv->cman)
vmw_cmdbuf_commit(dev_priv->cman, bytes, NULL, true);
@@ -522,7 +515,7 @@ void vmw_fifo_commit_flush(struct vmw_private *dev_priv, uint32_t bytes)
* @dev_priv: Pointer to device private structure.
* @interruptible: Whether to wait interruptible if function needs to sleep.
*/
-int vmw_fifo_flush(struct vmw_private *dev_priv, bool interruptible)
+int vmw_cmd_flush(struct vmw_private *dev_priv, bool interruptible)
{
might_sleep();
@@ -532,7 +525,7 @@ int vmw_fifo_flush(struct vmw_private *dev_priv, bool interruptible)
return 0;
}
-int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
+int vmw_cmd_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
{
struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
struct svga_fifo_cmd_fence *cmd_fence;
@@ -540,7 +533,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
int ret = 0;
uint32_t bytes = sizeof(u32) + sizeof(*cmd_fence);
- fm = VMW_FIFO_RESERVE(dev_priv, bytes);
+ fm = VMW_CMD_RESERVE(dev_priv, bytes);
if (unlikely(fm == NULL)) {
*seqno = atomic_read(&dev_priv->marker_seq);
ret = -ENOMEM;
@@ -560,15 +553,14 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
* waiting code in vmwgfx_irq.c will emulate this.
*/
- vmw_fifo_commit(dev_priv, 0);
+ vmw_cmd_commit(dev_priv, 0);
return 0;
}
*fm++ = SVGA_CMD_FENCE;
cmd_fence = (struct svga_fifo_cmd_fence *) fm;
cmd_fence->fence = *seqno;
- vmw_fifo_commit_flush(dev_priv, bytes);
- (void) vmw_marker_push(&fifo_state->marker_queue, *seqno);
+ vmw_cmd_commit_flush(dev_priv, bytes);
vmw_update_seqno(dev_priv, fifo_state);
out_err:
@@ -599,7 +591,7 @@ static int vmw_fifo_emit_dummy_legacy_query(struct vmw_private *dev_priv,
SVGA3dCmdWaitForQuery body;
} *cmd;
- cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL))
return -ENOMEM;
@@ -616,7 +608,7 @@ static int vmw_fifo_emit_dummy_legacy_query(struct vmw_private *dev_priv,
cmd->body.guestResult.offset = 0;
}
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ vmw_cmd_commit(dev_priv, sizeof(*cmd));
return 0;
}
@@ -645,7 +637,7 @@ static int vmw_fifo_emit_dummy_gb_query(struct vmw_private *dev_priv,
SVGA3dCmdWaitForGBQuery body;
} *cmd;
- cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL))
return -ENOMEM;
@@ -657,7 +649,7 @@ static int vmw_fifo_emit_dummy_gb_query(struct vmw_private *dev_priv,
cmd->body.mobid = bo->mem.start;
cmd->body.offset = 0;
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ vmw_cmd_commit(dev_priv, sizeof(*cmd));
return 0;
}
@@ -681,7 +673,7 @@ static int vmw_fifo_emit_dummy_gb_query(struct vmw_private *dev_priv,
*
* Returns -ENOMEM on failure to reserve fifo space.
*/
-int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv,
+int vmw_cmd_emit_dummy_query(struct vmw_private *dev_priv,
uint32_t cid)
{
if (dev_priv->has_mob)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
index 9a9fe10d829b..45fbc41440f1 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
@@ -610,7 +610,7 @@ static void vmw_cmdbuf_work_func(struct work_struct *work)
/* Send a new fence in case one was removed */
if (send_fence) {
- vmw_fifo_send_fence(man->dev_priv, &dummy);
+ vmw_cmd_send_fence(man->dev_priv, &dummy);
wake_up_all(&man->idle_queue);
}
@@ -1208,18 +1208,14 @@ static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man, u32 context,
*
* @man: The command buffer manager.
* @size: The size of the main space pool.
- * @default_size: The default size of the command buffer for small kernel
- * submissions.
*
- * Set the size and allocate the main command buffer space pool,
- * as well as the default size of the command buffer for
- * small kernel submissions. If successful, this enables large command
- * submissions. Note that this function requires that rudimentary command
+ * Set the size and allocate the main command buffer space pool.
+ * If successful, this enables large command submissions.
+ * Note that this function requires that rudimentary command
* submission is already available and that the MOB memory manager is alive.
* Returns 0 on success. Negative error code on failure.
*/
-int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man,
- size_t size, size_t default_size)
+int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man, size_t size)
{
struct vmw_private *dev_priv = man->dev_priv;
bool dummy;
@@ -1230,7 +1226,7 @@ int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man,
/* First, try to allocate a huge chunk of DMA memory */
size = PAGE_ALIGN(size);
- man->map = dma_alloc_coherent(&dev_priv->dev->pdev->dev, size,
+ man->map = dma_alloc_coherent(dev_priv->drm.dev, size,
&man->handle, GFP_KERNEL);
if (man->map) {
man->using_mob = false;
@@ -1313,7 +1309,7 @@ struct vmw_cmdbuf_man *vmw_cmdbuf_man_create(struct vmw_private *dev_priv)
man->num_contexts = (dev_priv->capabilities & SVGA_CAP_HP_CMD_QUEUE) ?
2 : 1;
man->headers = dma_pool_create("vmwgfx cmdbuf",
- &dev_priv->dev->pdev->dev,
+ dev_priv->drm.dev,
sizeof(SVGACBHeader),
64, PAGE_SIZE);
if (!man->headers) {
@@ -1322,7 +1318,7 @@ struct vmw_cmdbuf_man *vmw_cmdbuf_man_create(struct vmw_private *dev_priv)
}
man->dheaders = dma_pool_create("vmwgfx inline cmdbuf",
- &dev_priv->dev->pdev->dev,
+ dev_priv->drm.dev,
sizeof(struct vmw_cmdbuf_dheader),
64, PAGE_SIZE);
if (!man->dheaders) {
@@ -1387,7 +1383,7 @@ void vmw_cmdbuf_remove_pool(struct vmw_cmdbuf_man *man)
ttm_bo_put(man->cmd_space);
man->cmd_space = NULL;
} else {
- dma_free_coherent(&man->dev_priv->dev->pdev->dev,
+ dma_free_coherent(man->dev_priv->drm.dev,
man->size, man->map, man->handle);
}
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
index 61c246335e66..6f4d0da11ad8 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
@@ -163,7 +163,7 @@ static void vmw_hw_context_destroy(struct vmw_resource *res)
}
vmw_execbuf_release_pinned_bo(dev_priv);
- cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL))
return;
@@ -171,7 +171,7 @@ static void vmw_hw_context_destroy(struct vmw_resource *res)
cmd->header.size = sizeof(cmd->body);
cmd->body.cid = res->id;
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ vmw_cmd_commit(dev_priv, sizeof(*cmd));
vmw_fifo_resource_dec(dev_priv);
}
@@ -265,7 +265,7 @@ static int vmw_context_init(struct vmw_private *dev_priv,
return -ENOMEM;
}
- cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL)) {
vmw_resource_unreference(&res);
return -ENOMEM;
@@ -275,7 +275,7 @@ static int vmw_context_init(struct vmw_private *dev_priv,
cmd->header.size = sizeof(cmd->body);
cmd->body.cid = res->id;
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ vmw_cmd_commit(dev_priv, sizeof(*cmd));
vmw_fifo_resource_inc(dev_priv);
res->hw_destroy = vmw_hw_context_destroy;
return 0;
@@ -316,7 +316,7 @@ static int vmw_gb_context_create(struct vmw_resource *res)
goto out_no_fifo;
}
- cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL)) {
ret = -ENOMEM;
goto out_no_fifo;
@@ -325,7 +325,7 @@ static int vmw_gb_context_create(struct vmw_resource *res)
cmd->header.id = SVGA_3D_CMD_DEFINE_GB_CONTEXT;
cmd->header.size = sizeof(cmd->body);
cmd->body.cid = res->id;
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ vmw_cmd_commit(dev_priv, sizeof(*cmd));
vmw_fifo_resource_inc(dev_priv);
return 0;
@@ -348,7 +348,7 @@ static int vmw_gb_context_bind(struct vmw_resource *res,
BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
- cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL))
return -ENOMEM;
@@ -358,7 +358,7 @@ static int vmw_gb_context_bind(struct vmw_resource *res,
cmd->body.mobid = bo->mem.start;
cmd->body.validContents = res->backup_dirty;
res->backup_dirty = false;
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ vmw_cmd_commit(dev_priv, sizeof(*cmd));
return 0;
}
@@ -392,7 +392,7 @@ static int vmw_gb_context_unbind(struct vmw_resource *res,
submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0);
- cmd = VMW_FIFO_RESERVE(dev_priv, submit_size);
+ cmd = VMW_CMD_RESERVE(dev_priv, submit_size);
if (unlikely(cmd == NULL)) {
mutex_unlock(&dev_priv->binding_mutex);
return -ENOMEM;
@@ -411,7 +411,7 @@ static int vmw_gb_context_unbind(struct vmw_resource *res,
cmd2->body.cid = res->id;
cmd2->body.mobid = SVGA3D_INVALID_ID;
- vmw_fifo_commit(dev_priv, submit_size);
+ vmw_cmd_commit(dev_priv, submit_size);
mutex_unlock(&dev_priv->binding_mutex);
/*
@@ -440,14 +440,14 @@ static int vmw_gb_context_destroy(struct vmw_resource *res)
if (likely(res->id == -1))
return 0;
- cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL))
return -ENOMEM;
cmd->header.id = SVGA_3D_CMD_DESTROY_GB_CONTEXT;
cmd->header.size = sizeof(cmd->body);
cmd->body.cid = res->id;
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ vmw_cmd_commit(dev_priv, sizeof(*cmd));
if (dev_priv->query_cid == res->id)
dev_priv->query_cid_valid = false;
vmw_resource_release_id(res);
@@ -483,7 +483,7 @@ static int vmw_dx_context_create(struct vmw_resource *res)
goto out_no_fifo;
}
- cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL)) {
ret = -ENOMEM;
goto out_no_fifo;
@@ -492,7 +492,7 @@ static int vmw_dx_context_create(struct vmw_resource *res)
cmd->header.id = SVGA_3D_CMD_DX_DEFINE_CONTEXT;
cmd->header.size = sizeof(cmd->body);
cmd->body.cid = res->id;
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ vmw_cmd_commit(dev_priv, sizeof(*cmd));
vmw_fifo_resource_inc(dev_priv);
return 0;
@@ -515,7 +515,7 @@ static int vmw_dx_context_bind(struct vmw_resource *res,
BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
- cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL))
return -ENOMEM;
@@ -525,7 +525,7 @@ static int vmw_dx_context_bind(struct vmw_resource *res,
cmd->body.mobid = bo->mem.start;
cmd->body.validContents = res->backup_dirty;
res->backup_dirty = false;
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ vmw_cmd_commit(dev_priv, sizeof(*cmd));
return 0;
@@ -608,7 +608,7 @@ static int vmw_dx_context_unbind(struct vmw_resource *res,
submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0);
- cmd = VMW_FIFO_RESERVE(dev_priv, submit_size);
+ cmd = VMW_CMD_RESERVE(dev_priv, submit_size);
if (unlikely(cmd == NULL)) {
mutex_unlock(&dev_priv->binding_mutex);
return -ENOMEM;
@@ -627,7 +627,7 @@ static int vmw_dx_context_unbind(struct vmw_resource *res,
cmd2->body.cid = res->id;
cmd2->body.mobid = SVGA3D_INVALID_ID;
- vmw_fifo_commit(dev_priv, submit_size);
+ vmw_cmd_commit(dev_priv, submit_size);
mutex_unlock(&dev_priv->binding_mutex);
/*
@@ -656,14 +656,14 @@ static int vmw_dx_context_destroy(struct vmw_resource *res)
if (likely(res->id == -1))
return 0;
- cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL))
return -ENOMEM;
cmd->header.id = SVGA_3D_CMD_DX_DESTROY_CONTEXT;
cmd->header.size = sizeof(cmd->body);
cmd->body.cid = res->id;
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ vmw_cmd_commit(dev_priv, sizeof(*cmd));
if (dev_priv->query_cid == res->id)
dev_priv->query_cid_valid = false;
vmw_resource_release_id(res);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
index 984d8884357d..ba658fa9cf6c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
@@ -175,7 +175,7 @@ static int vmw_cotable_unscrub(struct vmw_resource *res)
WARN_ON_ONCE(bo->mem.mem_type != VMW_PL_MOB);
dma_resv_assert_held(bo->base.resv);
- cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
if (!cmd)
return -ENOMEM;
@@ -188,7 +188,7 @@ static int vmw_cotable_unscrub(struct vmw_resource *res)
cmd->body.mobid = bo->mem.start;
cmd->body.validSizeInBytes = vcotbl->size_read_back;
- vmw_fifo_commit_flush(dev_priv, sizeof(*cmd));
+ vmw_cmd_commit_flush(dev_priv, sizeof(*cmd));
vcotbl->scrubbed = false;
return 0;
@@ -263,7 +263,7 @@ int vmw_cotable_scrub(struct vmw_resource *res, bool readback)
if (readback)
submit_size += sizeof(*cmd0);
- cmd1 = VMW_FIFO_RESERVE(dev_priv, submit_size);
+ cmd1 = VMW_CMD_RESERVE(dev_priv, submit_size);
if (!cmd1)
return -ENOMEM;
@@ -283,7 +283,7 @@ int vmw_cotable_scrub(struct vmw_resource *res, bool readback)
cmd1->body.type = vcotbl->type;
cmd1->body.mobid = SVGA3D_INVALID_ID;
cmd1->body.validSizeInBytes = 0;
- vmw_fifo_commit_flush(dev_priv, submit_size);
+ vmw_cmd_commit_flush(dev_priv, submit_size);
vcotbl->scrubbed = true;
/* Trigger a create() on next validate. */
@@ -349,7 +349,7 @@ static int vmw_cotable_readback(struct vmw_resource *res)
struct vmw_fence_obj *fence;
if (!vcotbl->scrubbed) {
- cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
if (!cmd)
return -ENOMEM;
@@ -358,7 +358,7 @@ static int vmw_cotable_readback(struct vmw_resource *res)
cmd->body.cid = vcotbl->ctx->id;
cmd->body.type = vcotbl->type;
vcotbl->size_read_back = res->backup_size;
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ vmw_cmd_commit(dev_priv, sizeof(*cmd));
}
(void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
@@ -430,7 +430,7 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
* Do a page by page copy of COTables. This eliminates slow vmap()s.
* This should really be a TTM utility.
*/
- for (i = 0; i < old_bo->num_pages; ++i) {
+ for (i = 0; i < old_bo->mem.num_pages; ++i) {
bool dummy;
ret = ttm_bo_kmap(old_bo, i, 1, &old_map);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 216daf93022c..408847a68cf3 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -32,10 +32,10 @@
#include <linux/mem_encrypt.h>
#include <drm/drm_drv.h>
+#include <drm/drm_fb_helper.h>
#include <drm/drm_ioctl.h>
#include <drm/drm_sysfs.h>
#include <drm/ttm/ttm_bo_driver.h>
-#include <drm/ttm/ttm_module.h>
#include <drm/ttm/ttm_placement.h>
#include "ttm_object.h"
@@ -43,8 +43,6 @@
#include "vmwgfx_drv.h"
#define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices"
-#define VMWGFX_CHIP_SVGAII 0
-#define VMW_FB_RESERVATION 0
#define VMW_MIN_INITIAL_WIDTH 800
#define VMW_MIN_INITIAL_HEIGHT 600
@@ -253,8 +251,8 @@ static const struct drm_ioctl_desc vmw_ioctls[] = {
};
static const struct pci_device_id vmw_pci_id_list[] = {
- {0x15ad, 0x0405, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VMWGFX_CHIP_SVGAII},
- {0, 0, 0}
+ { PCI_DEVICE(0x15ad, VMWGFX_PCI_ID_SVGA2) },
+ { }
};
MODULE_DEVICE_TABLE(pci, vmw_pci_id_list);
@@ -425,8 +423,7 @@ static int vmw_request_device_late(struct vmw_private *dev_priv)
}
if (dev_priv->cman) {
- ret = vmw_cmdbuf_set_pool_size(dev_priv->cman,
- 256*4096, 2*4096);
+ ret = vmw_cmdbuf_set_pool_size(dev_priv->cman, 256*4096);
if (ret) {
struct vmw_cmdbuf_man *man = dev_priv->cman;
@@ -609,7 +606,7 @@ static int vmw_dma_select_mode(struct vmw_private *dev_priv)
*/
static int vmw_dma_masks(struct vmw_private *dev_priv)
{
- struct drm_device *dev = dev_priv->dev;
+ struct drm_device *dev = &dev_priv->drm;
int ret = 0;
ret = dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(64));
@@ -644,26 +641,83 @@ static void vmw_vram_manager_fini(struct vmw_private *dev_priv)
#endif
}
-static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
+static int vmw_setup_pci_resources(struct vmw_private *dev,
+ unsigned long pci_id)
{
- struct vmw_private *dev_priv;
+ resource_size_t fifo_start;
+ resource_size_t fifo_size;
int ret;
+ struct pci_dev *pdev = to_pci_dev(dev->drm.dev);
+
+ pci_set_master(pdev);
+
+ ret = pci_request_regions(pdev, "vmwgfx probe");
+ if (ret)
+ return ret;
+
+ dev->io_start = pci_resource_start(pdev, 0);
+ dev->vram_start = pci_resource_start(pdev, 1);
+ dev->vram_size = pci_resource_len(pdev, 1);
+ fifo_start = pci_resource_start(pdev, 2);
+ fifo_size = pci_resource_len(pdev, 2);
+
+ DRM_INFO("FIFO at %pa size is %llu kiB\n",
+ &fifo_start, (uint64_t)fifo_size / 1024);
+ dev->fifo_mem = devm_memremap(dev->drm.dev,
+ fifo_start,
+ fifo_size,
+ MEMREMAP_WB);
+
+ if (unlikely(dev->fifo_mem == NULL)) {
+ DRM_ERROR("Failed mapping FIFO memory.\n");
+ return -ENOMEM;
+ }
+
+ /*
+ * This is approximate size of the vram, the exact size will only
+ * be known after we read SVGA_REG_VRAM_SIZE. The PCI resource
+ * size will be equal to or bigger than the size reported by
+ * SVGA_REG_VRAM_SIZE.
+ */
+ DRM_INFO("VRAM at %pa size is %llu kiB\n",
+ &dev->vram_start, (uint64_t)dev->vram_size / 1024);
+
+ return 0;
+}
+
+static int vmw_detect_version(struct vmw_private *dev)
+{
uint32_t svga_id;
+
+ vmw_write(dev, SVGA_REG_ID, SVGA_ID_2);
+ svga_id = vmw_read(dev, SVGA_REG_ID);
+ if (svga_id != SVGA_ID_2) {
+ DRM_ERROR("Unsupported SVGA ID 0x%x on chipset 0x%x\n",
+ svga_id, dev->vmw_chipset);
+ return -ENOSYS;
+ }
+ return 0;
+}
+
+static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id)
+{
+ int ret;
enum vmw_res_type i;
bool refuse_dma = false;
char host_log[100] = {0};
+ struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
- dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
- if (unlikely(!dev_priv)) {
- DRM_ERROR("Failed allocating a device private struct.\n");
- return -ENOMEM;
- }
+ dev_priv->vmw_chipset = pci_id;
+ dev_priv->last_read_seqno = (uint32_t) -100;
+ dev_priv->drm.dev_private = dev_priv;
- pci_set_master(dev->pdev);
+ ret = vmw_setup_pci_resources(dev_priv, pci_id);
+ if (ret)
+ return ret;
+ ret = vmw_detect_version(dev_priv);
+ if (ret)
+ return ret;
- dev_priv->dev = dev;
- dev_priv->vmw_chipset = chipset;
- dev_priv->last_read_seqno = (uint32_t) -100;
mutex_init(&dev_priv->cmdbuf_mutex);
mutex_init(&dev_priv->release_mutex);
mutex_init(&dev_priv->binding_mutex);
@@ -673,7 +727,6 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
spin_lock_init(&dev_priv->hw_lock);
spin_lock_init(&dev_priv->waiter_lock);
spin_lock_init(&dev_priv->cap_lock);
- spin_lock_init(&dev_priv->svga_lock);
spin_lock_init(&dev_priv->cursor_lock);
for (i = vmw_res_context; i < vmw_res_max; ++i) {
@@ -688,21 +741,10 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
dev_priv->used_memory_size = 0;
- dev_priv->io_start = pci_resource_start(dev->pdev, 0);
- dev_priv->vram_start = pci_resource_start(dev->pdev, 1);
- dev_priv->mmio_start = pci_resource_start(dev->pdev, 2);
-
dev_priv->assume_16bpp = !!vmw_assume_16bpp;
dev_priv->enable_fb = enable_fbdev;
- vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
- svga_id = vmw_read(dev_priv, SVGA_REG_ID);
- if (svga_id != SVGA_ID_2) {
- ret = -ENOSYS;
- DRM_ERROR("Unsupported SVGA ID 0x%x\n", svga_id);
- goto out_err0;
- }
dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES);
@@ -720,7 +762,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
}
dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE);
- dev_priv->mmio_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE);
+ dev_priv->fifo_mem_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE);
dev_priv->fb_max_width = vmw_read(dev_priv, SVGA_REG_MAX_WIDTH);
dev_priv->fb_max_height = vmw_read(dev_priv, SVGA_REG_MAX_HEIGHT);
@@ -794,7 +836,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
if (unlikely(ret != 0))
goto out_err0;
- dma_set_max_seg_size(dev->dev, U32_MAX);
+ dma_set_max_seg_size(dev_priv->drm.dev, U32_MAX);
if (dev_priv->capabilities & SVGA_CAP_GMR2) {
DRM_INFO("Max GMR ids is %u\n",
@@ -804,21 +846,8 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
DRM_INFO("Max dedicated hypervisor surface memory is %u kiB\n",
(unsigned)dev_priv->memory_size / 1024);
}
- DRM_INFO("Maximum display memory size is %u kiB\n",
- dev_priv->prim_bb_mem / 1024);
- DRM_INFO("VRAM at 0x%08x size is %u kiB\n",
- dev_priv->vram_start, dev_priv->vram_size / 1024);
- DRM_INFO("MMIO at 0x%08x size is %u kiB\n",
- dev_priv->mmio_start, dev_priv->mmio_size / 1024);
-
- dev_priv->mmio_virt = memremap(dev_priv->mmio_start,
- dev_priv->mmio_size, MEMREMAP_WB);
-
- if (unlikely(dev_priv->mmio_virt == NULL)) {
- ret = -ENOMEM;
- DRM_ERROR("Failed mapping MMIO.\n");
- goto out_err0;
- }
+ DRM_INFO("Maximum display memory size is %llu kiB\n",
+ (uint64_t)dev_priv->prim_bb_mem / 1024);
/* Need mmio memory to check for fifo pitchlock cap. */
if (!(dev_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) &&
@@ -826,7 +855,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
!vmw_fifo_have_pitchlock(dev_priv)) {
ret = -ENOSYS;
DRM_ERROR("Hardware has no pitchlock\n");
- goto out_err4;
+ goto out_err0;
}
dev_priv->tdev = ttm_object_device_init(&ttm_mem_glob, 12,
@@ -835,29 +864,11 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
if (unlikely(dev_priv->tdev == NULL)) {
DRM_ERROR("Unable to initialize TTM object management.\n");
ret = -ENOMEM;
- goto out_err4;
- }
-
- dev->dev_private = dev_priv;
-
- ret = pci_request_regions(dev->pdev, "vmwgfx probe");
- dev_priv->stealth = (ret != 0);
- if (dev_priv->stealth) {
- /**
- * Request at least the mmio PCI resource.
- */
-
- DRM_INFO("It appears like vesafb is loaded. "
- "Ignore above error if any.\n");
- ret = pci_request_region(dev->pdev, 2, "vmwgfx stealth probe");
- if (unlikely(ret != 0)) {
- DRM_ERROR("Failed reserving the SVGA MMIO resource.\n");
- goto out_no_device;
- }
+ goto out_err0;
}
if (dev_priv->capabilities & SVGA_CAP_IRQMASK) {
- ret = vmw_irq_install(dev, dev->pdev->irq);
+ ret = vmw_irq_install(&dev_priv->drm, pdev->irq);
if (ret != 0) {
DRM_ERROR("Failed installing irq: %d\n", ret);
goto out_no_irq;
@@ -874,8 +885,8 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
DRM_FILE_PAGE_OFFSET_START,
DRM_FILE_PAGE_OFFSET_SIZE);
ret = ttm_bo_device_init(&dev_priv->bdev, &vmw_bo_driver,
- dev_priv->dev->dev,
- dev->anon_inode->i_mapping,
+ dev_priv->drm.dev,
+ dev_priv->drm.anon_inode->i_mapping,
&dev_priv->vma_manager,
dev_priv->map_mode == vmw_dma_alloc_coherent,
false);
@@ -955,7 +966,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
if (ret)
goto out_no_fifo;
- DRM_INFO("Atomic: %s\n", (dev->driver->driver_features & DRIVER_ATOMIC)
+ DRM_INFO("Atomic: %s\n", (dev_priv->drm.driver->driver_features & DRIVER_ATOMIC)
? "yes." : "no.");
if (dev_priv->sm_type == VMW_SM_5)
DRM_INFO("SM5 support available.\n");
@@ -1000,16 +1011,10 @@ out_no_bdev:
vmw_fence_manager_takedown(dev_priv->fman);
out_no_fman:
if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
- vmw_irq_uninstall(dev_priv->dev);
+ vmw_irq_uninstall(&dev_priv->drm);
out_no_irq:
- if (dev_priv->stealth)
- pci_release_region(dev->pdev, 2);
- else
- pci_release_regions(dev->pdev);
-out_no_device:
+ pci_release_regions(pdev);
ttm_object_device_release(&dev_priv->tdev);
-out_err4:
- memunmap(dev_priv->mmio_virt);
out_err0:
for (i = vmw_res_context; i < vmw_res_max; ++i)
idr_destroy(&dev_priv->res_idr[i]);
@@ -1023,6 +1028,7 @@ out_err0:
static void vmw_driver_unload(struct drm_device *dev)
{
struct vmw_private *dev_priv = vmw_priv(dev);
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
enum vmw_res_type i;
unregister_pm_notifier(&dev_priv->pm_nb);
@@ -1052,14 +1058,10 @@ static void vmw_driver_unload(struct drm_device *dev)
vmw_release_device_late(dev_priv);
vmw_fence_manager_takedown(dev_priv->fman);
if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
- vmw_irq_uninstall(dev_priv->dev);
- if (dev_priv->stealth)
- pci_release_region(dev->pdev, 2);
- else
- pci_release_regions(dev->pdev);
+ vmw_irq_uninstall(&dev_priv->drm);
+ pci_release_regions(pdev);
ttm_object_device_release(&dev_priv->tdev);
- memunmap(dev_priv->mmio_virt);
if (dev_priv->ctx.staged_bindings)
vmw_binding_state_free(dev_priv->ctx.staged_bindings);
@@ -1190,12 +1192,10 @@ static void __vmw_svga_enable(struct vmw_private *dev_priv)
{
struct ttm_resource_manager *man = ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM);
- spin_lock(&dev_priv->svga_lock);
if (!ttm_resource_manager_used(man)) {
vmw_write(dev_priv, SVGA_REG_ENABLE, SVGA_REG_ENABLE);
ttm_resource_manager_set_used(man, true);
}
- spin_unlock(&dev_priv->svga_lock);
}
/**
@@ -1221,14 +1221,12 @@ static void __vmw_svga_disable(struct vmw_private *dev_priv)
{
struct ttm_resource_manager *man = ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM);
- spin_lock(&dev_priv->svga_lock);
if (ttm_resource_manager_used(man)) {
ttm_resource_manager_set_used(man, false);
vmw_write(dev_priv, SVGA_REG_ENABLE,
SVGA_REG_ENABLE_HIDE |
SVGA_REG_ENABLE_ENABLE);
}
- spin_unlock(&dev_priv->svga_lock);
}
/**
@@ -1253,19 +1251,16 @@ void vmw_svga_disable(struct vmw_private *dev_priv)
* to be inconsistent with the device, causing modesetting problems.
*
*/
- vmw_kms_lost_device(dev_priv->dev);
+ vmw_kms_lost_device(&dev_priv->drm);
ttm_write_lock(&dev_priv->reservation_sem, false);
- spin_lock(&dev_priv->svga_lock);
if (ttm_resource_manager_used(man)) {
- ttm_resource_manager_set_used(man, false);
- spin_unlock(&dev_priv->svga_lock);
if (ttm_resource_manager_evict_all(&dev_priv->bdev, man))
DRM_ERROR("Failed evicting VRAM buffers.\n");
+ ttm_resource_manager_set_used(man, false);
vmw_write(dev_priv, SVGA_REG_ENABLE,
SVGA_REG_ENABLE_HIDE |
SVGA_REG_ENABLE_ENABLE);
- } else
- spin_unlock(&dev_priv->svga_lock);
+ }
ttm_write_unlock(&dev_priv->reservation_sem);
}
@@ -1275,8 +1270,6 @@ static void vmw_remove(struct pci_dev *pdev)
drm_dev_unregister(dev);
vmw_driver_unload(dev);
- drm_dev_put(dev);
- pci_disable_device(pdev);
}
static unsigned long
@@ -1377,7 +1370,7 @@ static int vmw_pm_freeze(struct device *kdev)
* No user-space processes should be running now.
*/
ttm_suspend_unlock(&dev_priv->reservation_sem);
- ret = vmw_kms_suspend(dev_priv->dev);
+ ret = vmw_kms_suspend(&dev_priv->drm);
if (ret) {
ttm_suspend_lock(&dev_priv->reservation_sem);
DRM_ERROR("Failed to freeze modesetting.\n");
@@ -1409,7 +1402,7 @@ static int vmw_pm_freeze(struct device *kdev)
vmw_fence_fifo_down(dev_priv->fman);
__vmw_svga_disable(dev_priv);
-
+
vmw_release_device_late(dev_priv);
return 0;
}
@@ -1438,7 +1431,7 @@ static int vmw_pm_restore(struct device *kdev)
dev_priv->suspend_locked = false;
ttm_suspend_unlock(&dev_priv->reservation_sem);
if (dev_priv->suspend_state)
- vmw_kms_resume(dev_priv->dev);
+ vmw_kms_resume(&dev_priv->drm);
if (dev_priv->enable_fb)
vmw_fb_on(dev_priv);
@@ -1507,39 +1500,36 @@ static struct pci_driver vmw_pci_driver = {
static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
- struct drm_device *dev;
+ struct vmw_private *vmw;
int ret;
- ret = pci_enable_device(pdev);
+ ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, "svgadrmfb");
if (ret)
return ret;
- dev = drm_dev_alloc(&driver, &pdev->dev);
- if (IS_ERR(dev)) {
- ret = PTR_ERR(dev);
- goto err_pci_disable_device;
- }
+ ret = pcim_enable_device(pdev);
+ if (ret)
+ return ret;
- dev->pdev = pdev;
- pci_set_drvdata(pdev, dev);
+ vmw = devm_drm_dev_alloc(&pdev->dev, &driver,
+ struct vmw_private, drm);
+ if (IS_ERR(vmw))
+ return PTR_ERR(vmw);
- ret = vmw_driver_load(dev, ent->driver_data);
- if (ret)
- goto err_drm_dev_put;
+ vmw->drm.pdev = pdev;
+ pci_set_drvdata(pdev, &vmw->drm);
- ret = drm_dev_register(dev, ent->driver_data);
+ ret = vmw_driver_load(vmw, ent->device);
if (ret)
- goto err_vmw_driver_unload;
+ return ret;
- return 0;
+ ret = drm_dev_register(&vmw->drm, 0);
+ if (ret) {
+ vmw_driver_unload(&vmw->drm);
+ return ret;
+ }
-err_vmw_driver_unload:
- vmw_driver_unload(dev);
-err_drm_dev_put:
- drm_dev_put(dev);
-err_pci_disable_device:
- pci_disable_device(pdev);
- return ret;
+ return 0;
}
static int __init vmwgfx_init(void)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index b45becbb00f8..5fa5bcd20cc5 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -39,7 +39,6 @@
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_execbuf_util.h>
-#include <drm/ttm/ttm_module.h>
#include "ttm_lock.h"
#include "ttm_object.h"
@@ -67,6 +66,8 @@
#define VMWGFX_CMD_BOUNCE_INIT_SIZE 32768
#define VMWGFX_ENABLE_SCREEN_TARGET_OTABLE 1
+#define VMWGFX_PCI_ID_SVGA2 0x0405
+
/*
* Perhaps we should have sysfs entries for these.
*/
@@ -275,13 +276,6 @@ struct vmw_surface {
struct list_head view_list;
};
-struct vmw_marker_queue {
- struct list_head head;
- u64 lag;
- u64 lag_time;
- spinlock_t lock;
-};
-
struct vmw_fifo_state {
unsigned long reserved_size;
u32 *dynamic_buffer;
@@ -291,7 +285,6 @@ struct vmw_fifo_state {
uint32_t capabilities;
struct mutex fifo_mutex;
struct rw_semaphore rwsem;
- struct vmw_marker_queue marker_queue;
bool dx;
};
@@ -490,19 +483,19 @@ enum vmw_sm_type {
};
struct vmw_private {
+ struct drm_device drm;
struct ttm_bo_device bdev;
struct vmw_fifo_state fifo;
- struct drm_device *dev;
struct drm_vma_offset_manager vma_manager;
- unsigned long vmw_chipset;
- unsigned int io_start;
- uint32_t vram_start;
- uint32_t vram_size;
- uint32_t prim_bb_mem;
- uint32_t mmio_start;
- uint32_t mmio_size;
+ u32 vmw_chipset;
+ resource_size_t io_start;
+ resource_size_t vram_start;
+ resource_size_t vram_size;
+ resource_size_t prim_bb_mem;
+ u32 *fifo_mem;
+ resource_size_t fifo_mem_size;
uint32_t fb_max_width;
uint32_t fb_max_height;
uint32_t texture_max_width;
@@ -511,7 +504,6 @@ struct vmw_private {
uint32_t stdu_max_height;
uint32_t initial_width;
uint32_t initial_height;
- u32 *mmio_virt;
uint32_t capabilities;
uint32_t capabilities2;
uint32_t max_gmr_ids;
@@ -591,13 +583,7 @@ struct vmw_private {
struct mutex cmdbuf_mutex;
struct mutex binding_mutex;
- /**
- * Operating mode.
- */
-
- bool stealth;
bool enable_fb;
- spinlock_t svga_lock;
/**
* PM management.
@@ -967,30 +953,29 @@ extern int vmw_fifo_init(struct vmw_private *dev_priv,
extern void vmw_fifo_release(struct vmw_private *dev_priv,
struct vmw_fifo_state *fifo);
extern void *
-vmw_fifo_reserve_dx(struct vmw_private *dev_priv, uint32_t bytes, int ctx_id);
-extern void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes);
-extern void vmw_fifo_commit_flush(struct vmw_private *dev_priv, uint32_t bytes);
-extern int vmw_fifo_send_fence(struct vmw_private *dev_priv,
- uint32_t *seqno);
+vmw_cmd_ctx_reserve(struct vmw_private *dev_priv, uint32_t bytes, int ctx_id);
+extern void vmw_cmd_commit(struct vmw_private *dev_priv, uint32_t bytes);
+extern void vmw_cmd_commit_flush(struct vmw_private *dev_priv, uint32_t bytes);
+extern int vmw_cmd_send_fence(struct vmw_private *dev_priv, uint32_t *seqno);
+extern bool vmw_supports_3d(struct vmw_private *dev_priv);
extern void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason);
-extern bool vmw_fifo_have_3d(struct vmw_private *dev_priv);
extern bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv);
-extern int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv,
- uint32_t cid);
-extern int vmw_fifo_flush(struct vmw_private *dev_priv,
- bool interruptible);
+extern int vmw_cmd_emit_dummy_query(struct vmw_private *dev_priv,
+ uint32_t cid);
+extern int vmw_cmd_flush(struct vmw_private *dev_priv,
+ bool interruptible);
-#define VMW_FIFO_RESERVE_DX(__priv, __bytes, __ctx_id) \
+#define VMW_CMD_CTX_RESERVE(__priv, __bytes, __ctx_id) \
({ \
- vmw_fifo_reserve_dx(__priv, __bytes, __ctx_id) ? : ({ \
+ vmw_cmd_ctx_reserve(__priv, __bytes, __ctx_id) ? : ({ \
DRM_ERROR("FIFO reserve failed at %s for %u bytes\n", \
__func__, (unsigned int) __bytes); \
NULL; \
}); \
})
-#define VMW_FIFO_RESERVE(__priv, __bytes) \
- VMW_FIFO_RESERVE_DX(__priv, __bytes, SVGA3D_INVALID_ID)
+#define VMW_CMD_RESERVE(__priv, __bytes) \
+ VMW_CMD_CTX_RESERVE(__priv, __bytes, SVGA3D_INVALID_ID)
/**
* TTM glue - vmwgfx_ttm_glue.c
@@ -1125,19 +1110,6 @@ extern void vmw_generic_waiter_add(struct vmw_private *dev_priv, u32 flag,
extern void vmw_generic_waiter_remove(struct vmw_private *dev_priv,
u32 flag, int *waiter_count);
-/**
- * Rudimentary fence-like objects currently used only for throttling -
- * vmwgfx_marker.c
- */
-
-extern void vmw_marker_queue_init(struct vmw_marker_queue *queue);
-extern void vmw_marker_queue_takedown(struct vmw_marker_queue *queue);
-extern int vmw_marker_push(struct vmw_marker_queue *queue,
- uint32_t seqno);
-extern int vmw_marker_pull(struct vmw_marker_queue *queue,
- uint32_t signaled_seqno);
-extern int vmw_wait_lag(struct vmw_private *dev_priv,
- struct vmw_marker_queue *queue, uint32_t us);
/**
* Kernel framebuffer - vmwgfx_fb.c
@@ -1411,8 +1383,7 @@ struct vmw_cmdbuf_header;
extern struct vmw_cmdbuf_man *
vmw_cmdbuf_man_create(struct vmw_private *dev_priv);
-extern int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man,
- size_t size, size_t default_size);
+extern int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man, size_t size);
extern void vmw_cmdbuf_remove_pool(struct vmw_cmdbuf_man *man);
extern void vmw_cmdbuf_man_destroy(struct vmw_cmdbuf_man *man);
extern int vmw_cmdbuf_idle(struct vmw_cmdbuf_man *man, bool interruptible,
@@ -1581,28 +1552,29 @@ static inline void vmw_fifo_resource_dec(struct vmw_private *dev_priv)
}
/**
- * vmw_mmio_read - Perform a MMIO read from volatile memory
+ * vmw_fifo_mem_read - Perform a MMIO read from the fifo memory
*
- * @addr: The address to read from
+ * @fifo_reg: The fifo register to read from
*
* This function is intended to be equivalent to ioread32() on
* memremap'd memory, but without byteswapping.
*/
-static inline u32 vmw_mmio_read(u32 *addr)
+static inline u32 vmw_fifo_mem_read(struct vmw_private *vmw, uint32 fifo_reg)
{
- return READ_ONCE(*addr);
+ return READ_ONCE(*(vmw->fifo_mem + fifo_reg));
}
/**
- * vmw_mmio_write - Perform a MMIO write to volatile memory
+ * vmw_fifo_mem_write - Perform a MMIO write to volatile memory
*
- * @addr: The address to write to
+ * @addr: The fifo register to write to
*
* This function is intended to be equivalent to iowrite32 on
* memremap'd memory, but without byteswapping.
*/
-static inline void vmw_mmio_write(u32 value, u32 *addr)
+static inline void vmw_fifo_mem_write(struct vmw_private *vmw, u32 fifo_reg,
+ u32 value)
{
- WRITE_ONCE(*addr, value);
+ WRITE_ONCE(*(vmw->fifo_mem + fifo_reg), value);
}
#endif
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index e67e2e8f6e6f..462f17320708 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -724,7 +724,7 @@ static int vmw_rebind_all_dx_query(struct vmw_resource *ctx_res)
if (!dx_query_mob || dx_query_mob->dx_query_ctx)
return 0;
- cmd = VMW_FIFO_RESERVE_DX(dev_priv, sizeof(*cmd), ctx_res->id);
+ cmd = VMW_CMD_CTX_RESERVE(dev_priv, sizeof(*cmd), ctx_res->id);
if (cmd == NULL)
return -ENOMEM;
@@ -732,7 +732,7 @@ static int vmw_rebind_all_dx_query(struct vmw_resource *ctx_res)
cmd->header.size = sizeof(cmd->body);
cmd->body.cid = ctx_res->id;
cmd->body.mobid = dx_query_mob->base.mem.start;
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ vmw_cmd_commit(dev_priv, sizeof(*cmd));
vmw_context_bind_dx_query(ctx_res, dx_query_mob);
@@ -1042,7 +1042,7 @@ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
if (unlikely(new_query_bo != sw_context->cur_query_bo)) {
- if (unlikely(new_query_bo->base.num_pages > 4)) {
+ if (unlikely(new_query_bo->base.mem.num_pages > 4)) {
VMW_DEBUG_USER("Query buffer too large.\n");
return -EINVAL;
}
@@ -1100,7 +1100,7 @@ static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
BUG_ON(!ctx_entry->valid);
ctx = ctx_entry->res;
- ret = vmw_fifo_emit_dummy_query(dev_priv, ctx->id);
+ ret = vmw_cmd_emit_dummy_query(dev_priv, ctx->id);
if (unlikely(ret != 0))
VMW_DEBUG_USER("Out of fifo space for dummy query.\n");
@@ -1541,7 +1541,7 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
return ret;
/* Make sure DMA doesn't cross BO boundaries. */
- bo_size = vmw_bo->base.num_pages * PAGE_SIZE;
+ bo_size = vmw_bo->base.base.size;
if (unlikely(cmd->body.guest.ptr.offset > bo_size)) {
VMW_DEBUG_USER("Invalid DMA offset.\n");
return -EINVAL;
@@ -3762,7 +3762,7 @@ int vmw_execbuf_fence_commands(struct drm_file *file_priv,
/* p_handle implies file_priv. */
BUG_ON(p_handle != NULL && file_priv == NULL);
- ret = vmw_fifo_send_fence(dev_priv, &sequence);
+ ret = vmw_cmd_send_fence(dev_priv, &sequence);
if (unlikely(ret != 0)) {
VMW_DEBUG_USER("Fence submission error. Syncing.\n");
synced = true;
@@ -3876,10 +3876,10 @@ static int vmw_execbuf_submit_fifo(struct vmw_private *dev_priv,
void *cmd;
if (sw_context->dx_ctx_node)
- cmd = VMW_FIFO_RESERVE_DX(dev_priv, command_size,
+ cmd = VMW_CMD_CTX_RESERVE(dev_priv, command_size,
sw_context->dx_ctx_node->ctx->id);
else
- cmd = VMW_FIFO_RESERVE(dev_priv, command_size);
+ cmd = VMW_CMD_RESERVE(dev_priv, command_size);
if (!cmd)
return -ENOMEM;
@@ -3888,7 +3888,7 @@ static int vmw_execbuf_submit_fifo(struct vmw_private *dev_priv,
memcpy(cmd, kernel_commands, command_size);
vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
vmw_resource_relocations_free(&sw_context->res_relocations);
- vmw_fifo_commit(dev_priv, command_size);
+ vmw_cmd_commit(dev_priv, command_size);
return 0;
}
@@ -4046,11 +4046,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
}
if (throttle_us) {
- ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue,
- throttle_us);
-
- if (ret)
- goto out_free_fence_fd;
+ VMW_DEBUG_USER("Throttling is no longer supported.\n");
}
kernel_commands = vmw_execbuf_cmdbuf(dev_priv, user_commands,
@@ -4329,7 +4325,7 @@ void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
if (dev_priv->query_cid_valid) {
BUG_ON(fence != NULL);
- ret = vmw_fifo_emit_dummy_query(dev_priv, dev_priv->query_cid);
+ ret = vmw_cmd_emit_dummy_query(dev_priv, dev_priv->query_cid);
if (ret)
goto out_no_emit;
dev_priv->query_cid_valid = false;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
index 4d60201037d1..33f07abfc3ae 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
@@ -258,7 +258,7 @@ out_unreserve:
if (w && h) {
WARN_ON_ONCE(par->set_fb->funcs->dirty(cur_fb, NULL, 0, 0,
&clip, 1));
- vmw_fifo_flush(vmw_priv, false);
+ vmw_cmd_flush(vmw_priv, false);
}
out_unlock:
mutex_unlock(&par->bo_mutex);
@@ -481,7 +481,7 @@ static int vmw_fb_kms_detach(struct vmw_fb_par *par,
DRM_ERROR("Could not unset a mode.\n");
return ret;
}
- drm_mode_destroy(par->vmw_priv->dev, par->set_mode);
+ drm_mode_destroy(&par->vmw_priv->drm, par->set_mode);
par->set_mode = NULL;
}
@@ -567,7 +567,7 @@ static int vmw_fb_set_par(struct fb_info *info)
struct drm_display_mode *mode;
int ret;
- mode = drm_mode_duplicate(vmw_priv->dev, &new_mode);
+ mode = drm_mode_duplicate(&vmw_priv->drm, &new_mode);
if (!mode) {
DRM_ERROR("Could not create new fb mode.\n");
return -ENOMEM;
@@ -581,7 +581,7 @@ static int vmw_fb_set_par(struct fb_info *info)
mode->hdisplay *
DIV_ROUND_UP(var->bits_per_pixel, 8),
mode->vdisplay)) {
- drm_mode_destroy(vmw_priv->dev, mode);
+ drm_mode_destroy(&vmw_priv->drm, mode);
return -EINVAL;
}
@@ -615,7 +615,7 @@ static int vmw_fb_set_par(struct fb_info *info)
out_unlock:
if (par->set_mode)
- drm_mode_destroy(vmw_priv->dev, par->set_mode);
+ drm_mode_destroy(&vmw_priv->drm, par->set_mode);
par->set_mode = mode;
mutex_unlock(&par->bo_mutex);
@@ -638,7 +638,7 @@ static const struct fb_ops vmw_fb_ops = {
int vmw_fb_init(struct vmw_private *vmw_priv)
{
- struct device *device = &vmw_priv->dev->pdev->dev;
+ struct device *device = vmw_priv->drm.dev;
struct vmw_fb_par *par;
struct fb_info *info;
unsigned fb_width, fb_height;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
index 0f8d29397157..378ec7600154 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -141,8 +141,7 @@ static bool vmw_fence_enable_signaling(struct dma_fence *f)
struct vmw_fence_manager *fman = fman_from_fence(fence);
struct vmw_private *dev_priv = fman->dev_priv;
- u32 *fifo_mem = dev_priv->mmio_virt;
- u32 seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE);
+ u32 seqno = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_FENCE);
if (seqno - fence->base.seqno < VMW_FENCE_WRAP)
return false;
@@ -401,14 +400,12 @@ static bool vmw_fence_goal_new_locked(struct vmw_fence_manager *fman,
u32 passed_seqno)
{
u32 goal_seqno;
- u32 *fifo_mem;
struct vmw_fence_obj *fence;
if (likely(!fman->seqno_valid))
return false;
- fifo_mem = fman->dev_priv->mmio_virt;
- goal_seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE_GOAL);
+ goal_seqno = vmw_fifo_mem_read(fman->dev_priv, SVGA_FIFO_FENCE_GOAL);
if (likely(passed_seqno - goal_seqno >= VMW_FENCE_WRAP))
return false;
@@ -416,8 +413,9 @@ static bool vmw_fence_goal_new_locked(struct vmw_fence_manager *fman,
list_for_each_entry(fence, &fman->fence_list, head) {
if (!list_empty(&fence->seq_passed_actions)) {
fman->seqno_valid = true;
- vmw_mmio_write(fence->base.seqno,
- fifo_mem + SVGA_FIFO_FENCE_GOAL);
+ vmw_fifo_mem_write(fman->dev_priv,
+ SVGA_FIFO_FENCE_GOAL,
+ fence->base.seqno);
break;
}
}
@@ -445,18 +443,17 @@ static bool vmw_fence_goal_check_locked(struct vmw_fence_obj *fence)
{
struct vmw_fence_manager *fman = fman_from_fence(fence);
u32 goal_seqno;
- u32 *fifo_mem;
if (dma_fence_is_signaled_locked(&fence->base))
return false;
- fifo_mem = fman->dev_priv->mmio_virt;
- goal_seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE_GOAL);
+ goal_seqno = vmw_fifo_mem_read(fman->dev_priv, SVGA_FIFO_FENCE_GOAL);
if (likely(fman->seqno_valid &&
goal_seqno - fence->base.seqno < VMW_FENCE_WRAP))
return false;
- vmw_mmio_write(fence->base.seqno, fifo_mem + SVGA_FIFO_FENCE_GOAL);
+ vmw_fifo_mem_write(fman->dev_priv, SVGA_FIFO_FENCE_GOAL,
+ fence->base.seqno);
fman->seqno_valid = true;
return true;
@@ -468,9 +465,8 @@ static void __vmw_fences_update(struct vmw_fence_manager *fman)
struct list_head action_list;
bool needs_rerun;
uint32_t seqno, new_seqno;
- u32 *fifo_mem = fman->dev_priv->mmio_virt;
- seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE);
+ seqno = vmw_fifo_mem_read(fman->dev_priv, SVGA_FIFO_FENCE);
rerun:
list_for_each_entry_safe(fence, next_fence, &fman->fence_list, head) {
if (seqno - fence->base.seqno < VMW_FENCE_WRAP) {
@@ -492,7 +488,7 @@ rerun:
needs_rerun = vmw_fence_goal_new_locked(fman, seqno);
if (unlikely(needs_rerun)) {
- new_seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE);
+ new_seqno = vmw_fifo_mem_read(fman->dev_priv, SVGA_FIFO_FENCE);
if (new_seqno != seqno) {
seqno = new_seqno;
goto rerun;
@@ -1033,7 +1029,7 @@ int vmw_event_fence_action_queue(struct drm_file *file_priv,
eaction->action.type = VMW_ACTION_EVENT;
eaction->fence = vmw_fence_obj_reference(fence);
- eaction->dev = fman->dev_priv->dev;
+ eaction->dev = &fman->dev_priv->drm;
eaction->tv_sec = tv_sec;
eaction->tv_usec = tv_usec;
@@ -1055,7 +1051,7 @@ static int vmw_event_fence_action_create(struct drm_file *file_priv,
{
struct vmw_event_fence_pending *event;
struct vmw_fence_manager *fman = fman_from_fence(fence);
- struct drm_device *dev = fman->dev_priv->dev;
+ struct drm_device *dev = &fman->dev_priv->drm;
int ret;
event = kzalloc(sizeof(*event), GFP_KERNEL);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
index 83c0d5a3e4fd..964ddf1ca57a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
@@ -51,7 +51,7 @@ static int vmw_gmr2_bind(struct vmw_private *dev_priv,
uint32_t cmd_size = define_size + remap_size;
uint32_t i;
- cmd_orig = cmd = VMW_FIFO_RESERVE(dev_priv, cmd_size);
+ cmd_orig = cmd = VMW_CMD_RESERVE(dev_priv, cmd_size);
if (unlikely(cmd == NULL))
return -ENOMEM;
@@ -98,7 +98,7 @@ static int vmw_gmr2_bind(struct vmw_private *dev_priv,
BUG_ON(cmd != cmd_orig + cmd_size / sizeof(*cmd));
- vmw_fifo_commit(dev_priv, cmd_size);
+ vmw_cmd_commit(dev_priv, cmd_size);
return 0;
}
@@ -110,7 +110,7 @@ static void vmw_gmr2_unbind(struct vmw_private *dev_priv,
uint32_t define_size = sizeof(define_cmd) + 4;
uint32_t *cmd;
- cmd = VMW_FIFO_RESERVE(dev_priv, define_size);
+ cmd = VMW_CMD_RESERVE(dev_priv, define_size);
if (unlikely(cmd == NULL))
return;
@@ -120,7 +120,7 @@ static void vmw_gmr2_unbind(struct vmw_private *dev_priv,
*cmd++ = SVGA_CMD_DEFINE_GMR2;
memcpy(cmd, &define_cmd, sizeof(define_cmd));
- vmw_fifo_commit(dev_priv, define_size);
+ vmw_cmd_commit(dev_priv, define_size);
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
index be325a62c178..1774960d1b89 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
@@ -29,7 +29,6 @@
*/
#include "vmwgfx_drv.h"
-#include <drm/ttm/ttm_module.h>
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_placement.h>
#include <linux/idr.h>
@@ -65,20 +64,19 @@ static int vmw_gmrid_man_get_node(struct ttm_resource_manager *man,
spin_lock(&gman->lock);
if (gman->max_gmr_pages > 0) {
- gman->used_gmr_pages += bo->num_pages;
+ gman->used_gmr_pages += mem->num_pages;
if (unlikely(gman->used_gmr_pages > gman->max_gmr_pages))
goto nospace;
}
mem->mm_node = gman;
mem->start = id;
- mem->num_pages = bo->num_pages;
spin_unlock(&gman->lock);
return 0;
nospace:
- gman->used_gmr_pages -= bo->num_pages;
+ gman->used_gmr_pages -= mem->num_pages;
spin_unlock(&gman->lock);
ida_free(&gman->gmr_ida, id);
return -ENOSPC;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
index f681b7b4df1b..80af8772b8c2 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
@@ -51,7 +51,7 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
param->value = vmw_overlay_num_free_overlays(dev_priv);
break;
case DRM_VMW_PARAM_3D:
- param->value = vmw_fifo_have_3d(dev_priv) ? 1 : 0;
+ param->value = vmw_supports_3d(dev_priv) ? 1 : 0;
break;
case DRM_VMW_PARAM_HW_CAPS:
param->value = dev_priv->capabilities;
@@ -67,7 +67,6 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
break;
case DRM_VMW_PARAM_FIFO_HW_VERSION:
{
- u32 *fifo_mem = dev_priv->mmio_virt;
const struct vmw_fifo_state *fifo = &dev_priv->fifo;
if ((dev_priv->capabilities & SVGA_CAP_GBOBJECTS)) {
@@ -76,11 +75,11 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
}
param->value =
- vmw_mmio_read(fifo_mem +
- ((fifo->capabilities &
- SVGA_FIFO_CAP_3D_HWVERSION_REVISED) ?
- SVGA_FIFO_3D_HWVERSION_REVISED :
- SVGA_FIFO_3D_HWVERSION));
+ vmw_fifo_mem_read(dev_priv,
+ ((fifo->capabilities &
+ SVGA_FIFO_CAP_3D_HWVERSION_REVISED) ?
+ SVGA_FIFO_3D_HWVERSION_REVISED :
+ SVGA_FIFO_3D_HWVERSION));
break;
}
case DRM_VMW_PARAM_MAX_SURF_MEMORY:
@@ -235,7 +234,7 @@ int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
if (unlikely(ret != 0))
goto out_err;
} else {
- fifo_mem = dev_priv->mmio_virt;
+ fifo_mem = dev_priv->fifo_mem;
memcpy(bounce, &fifo_mem[SVGA_FIFO_3D_CAPS], size);
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
index 75f3efee21a4..6c2a569f1fcb 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
@@ -117,12 +117,10 @@ static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t seqno)
void vmw_update_seqno(struct vmw_private *dev_priv,
struct vmw_fifo_state *fifo_state)
{
- u32 *fifo_mem = dev_priv->mmio_virt;
- uint32_t seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE);
+ uint32_t seqno = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_FENCE);
if (dev_priv->last_read_seqno != seqno) {
dev_priv->last_read_seqno = seqno;
- vmw_marker_pull(&fifo_state->marker_queue, seqno);
vmw_fences_update(dev_priv->fman);
}
}
@@ -222,11 +220,9 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
}
}
finish_wait(&dev_priv->fence_queue, &__wait);
- if (ret == 0 && fifo_idle) {
- u32 *fifo_mem = dev_priv->mmio_virt;
+ if (ret == 0 && fifo_idle)
+ vmw_fifo_mem_write(dev_priv, SVGA_FIFO_FENCE, signal_seq);
- vmw_mmio_write(signal_seq, fifo_mem + SVGA_FIFO_FENCE);
- }
wake_up_all(&dev_priv->fence_queue);
out_err:
if (fifo_idle)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index bc67f2b930e1..9a89f658e501 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -36,9 +36,6 @@
#include "vmwgfx_kms.h"
-/* Might need a hrtimer here? */
-#define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1)
-
void vmw_du_cleanup(struct vmw_display_unit *du)
{
drm_plane_cleanup(&du->primary);
@@ -68,7 +65,7 @@ static int vmw_cursor_update_image(struct vmw_private *dev_priv,
if (!image)
return -EINVAL;
- cmd = VMW_FIFO_RESERVE(dev_priv, cmd_size);
+ cmd = VMW_CMD_RESERVE(dev_priv, cmd_size);
if (unlikely(cmd == NULL))
return -ENOMEM;
@@ -83,7 +80,7 @@ static int vmw_cursor_update_image(struct vmw_private *dev_priv,
cmd->cursor.hotspotX = hotspotX;
cmd->cursor.hotspotY = hotspotY;
- vmw_fifo_commit_flush(dev_priv, cmd_size);
+ vmw_cmd_commit_flush(dev_priv, cmd_size);
return 0;
}
@@ -128,15 +125,14 @@ err_unreserve:
static void vmw_cursor_update_position(struct vmw_private *dev_priv,
bool show, int x, int y)
{
- u32 *fifo_mem = dev_priv->mmio_virt;
uint32_t count;
spin_lock(&dev_priv->cursor_lock);
- vmw_mmio_write(show ? 1 : 0, fifo_mem + SVGA_FIFO_CURSOR_ON);
- vmw_mmio_write(x, fifo_mem + SVGA_FIFO_CURSOR_X);
- vmw_mmio_write(y, fifo_mem + SVGA_FIFO_CURSOR_Y);
- count = vmw_mmio_read(fifo_mem + SVGA_FIFO_CURSOR_COUNT);
- vmw_mmio_write(++count, fifo_mem + SVGA_FIFO_CURSOR_COUNT);
+ vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_ON, show ? 1 : 0);
+ vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_X, x);
+ vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_Y, y);
+ count = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_CURSOR_COUNT);
+ vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_COUNT, ++count);
spin_unlock(&dev_priv->cursor_lock);
}
@@ -236,7 +232,7 @@ err_unreserve:
*/
void vmw_kms_legacy_hotspot_clear(struct vmw_private *dev_priv)
{
- struct drm_device *dev = dev_priv->dev;
+ struct drm_device *dev = &dev_priv->drm;
struct vmw_display_unit *du;
struct drm_crtc *crtc;
@@ -252,7 +248,7 @@ void vmw_kms_legacy_hotspot_clear(struct vmw_private *dev_priv)
void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv)
{
- struct drm_device *dev = dev_priv->dev;
+ struct drm_device *dev = &dev_priv->drm;
struct vmw_display_unit *du;
struct drm_crtc *crtc;
@@ -891,7 +887,7 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
bool is_bo_proxy)
{
- struct drm_device *dev = dev_priv->dev;
+ struct drm_device *dev = &dev_priv->drm;
struct vmw_framebuffer_surface *vfbs;
enum SVGA3dSurfaceFormat format;
int ret;
@@ -1003,11 +999,11 @@ static int vmw_framebuffer_bo_dirty(struct drm_framebuffer *framebuffer,
struct drm_clip_rect norect;
int ret, increment = 1;
- drm_modeset_lock_all(dev_priv->dev);
+ drm_modeset_lock_all(&dev_priv->drm);
ret = ttm_read_lock(&dev_priv->reservation_sem, true);
if (unlikely(ret != 0)) {
- drm_modeset_unlock_all(dev_priv->dev);
+ drm_modeset_unlock_all(&dev_priv->drm);
return ret;
}
@@ -1033,10 +1029,10 @@ static int vmw_framebuffer_bo_dirty(struct drm_framebuffer *framebuffer,
break;
}
- vmw_fifo_flush(dev_priv, false);
+ vmw_cmd_flush(dev_priv, false);
ttm_read_unlock(&dev_priv->reservation_sem);
- drm_modeset_unlock_all(dev_priv->dev);
+ drm_modeset_unlock_all(&dev_priv->drm);
return ret;
}
@@ -1213,14 +1209,14 @@ static int vmw_kms_new_framebuffer_bo(struct vmw_private *dev_priv,
*mode_cmd)
{
- struct drm_device *dev = dev_priv->dev;
+ struct drm_device *dev = &dev_priv->drm;
struct vmw_framebuffer_bo *vfbd;
unsigned int requested_size;
struct drm_format_name_buf format_name;
int ret;
requested_size = mode_cmd->height * mode_cmd->pitches[0];
- if (unlikely(requested_size > bo->base.num_pages * PAGE_SIZE)) {
+ if (unlikely(requested_size > bo->base.base.size)) {
DRM_ERROR("Screen buffer object size is too small "
"for requested mode.\n");
return -EINVAL;
@@ -1319,7 +1315,7 @@ vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
bo && only_2d &&
mode_cmd->width > 64 && /* Don't create a proxy for cursor */
dev_priv->active_display_unit == vmw_du_screen_target) {
- ret = vmw_create_bo_proxy(dev_priv->dev, mode_cmd,
+ ret = vmw_create_bo_proxy(&dev_priv->drm, mode_cmd,
bo, &surface);
if (ret)
return ERR_PTR(ret);
@@ -1768,7 +1764,7 @@ int vmw_kms_present(struct vmw_private *dev_priv,
if (ret)
return ret;
- vmw_fifo_flush(dev_priv, false);
+ vmw_cmd_flush(dev_priv, false);
return 0;
}
@@ -1780,7 +1776,7 @@ vmw_kms_create_hotplug_mode_update_property(struct vmw_private *dev_priv)
return;
dev_priv->hotplug_mode_update_property =
- drm_property_create_range(dev_priv->dev,
+ drm_property_create_range(&dev_priv->drm,
DRM_MODE_PROP_IMMUTABLE,
"hotplug_mode_update", 0, 1);
@@ -1791,7 +1787,7 @@ vmw_kms_create_hotplug_mode_update_property(struct vmw_private *dev_priv)
int vmw_kms_init(struct vmw_private *dev_priv)
{
- struct drm_device *dev = dev_priv->dev;
+ struct drm_device *dev = &dev_priv->drm;
int ret;
drm_mode_config_init(dev);
@@ -1823,7 +1819,7 @@ int vmw_kms_close(struct vmw_private *dev_priv)
* but since it destroys encoders and our destructor calls
* drm_encoder_cleanup which takes the lock we deadlock.
*/
- drm_mode_config_cleanup(dev_priv->dev);
+ drm_mode_config_cleanup(&dev_priv->drm);
if (dev_priv->active_display_unit == vmw_du_legacy)
ret = vmw_kms_ldu_close_display(dev_priv);
@@ -1876,11 +1872,11 @@ int vmw_kms_write_svga(struct vmw_private *vmw_priv,
if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
vmw_write(vmw_priv, SVGA_REG_PITCHLOCK, pitch);
else if (vmw_fifo_have_pitchlock(vmw_priv))
- vmw_mmio_write(pitch, vmw_priv->mmio_virt +
- SVGA_FIFO_PITCHLOCK);
+ vmw_fifo_mem_write(vmw_priv, SVGA_FIFO_PITCHLOCK, pitch);
vmw_write(vmw_priv, SVGA_REG_WIDTH, width);
vmw_write(vmw_priv, SVGA_REG_HEIGHT, height);
- vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, bpp);
+ if ((vmw_priv->capabilities & SVGA_CAP_8BIT_EMULATION) != 0)
+ vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, bpp);
if (vmw_read(vmw_priv, SVGA_REG_DEPTH) != depth) {
DRM_ERROR("Invalid depth %u for %u bpp, host expects %u\n",
@@ -1934,7 +1930,7 @@ void vmw_disable_vblank(struct drm_crtc *crtc)
static int vmw_du_update_layout(struct vmw_private *dev_priv,
unsigned int num_rects, struct drm_rect *rects)
{
- struct drm_device *dev = dev_priv->dev;
+ struct drm_device *dev = &dev_priv->drm;
struct vmw_display_unit *du;
struct drm_connector *con;
struct drm_connector_list_iter conn_iter;
@@ -2366,7 +2362,7 @@ int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
if (dirty->crtc) {
units[num_units++] = vmw_crtc_to_du(dirty->crtc);
} else {
- list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list,
+ list_for_each_entry(crtc, &dev_priv->drm.mode_config.crtc_list,
head) {
struct drm_plane *plane = crtc->primary;
@@ -2386,7 +2382,7 @@ int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
dirty->unit = unit;
if (dirty->fifo_reserve_size > 0) {
- dirty->cmd = VMW_FIFO_RESERVE(dev_priv,
+ dirty->cmd = VMW_CMD_RESERVE(dev_priv,
dirty->fifo_reserve_size);
if (!dirty->cmd)
return -ENOMEM;
@@ -2520,7 +2516,7 @@ int vmw_kms_update_proxy(struct vmw_resource *res,
if (!clips)
return 0;
- cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd) * num_clips);
+ cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd) * num_clips);
if (!cmd)
return -ENOMEM;
@@ -2549,7 +2545,7 @@ int vmw_kms_update_proxy(struct vmw_resource *res,
copy_size += sizeof(*cmd);
}
- vmw_fifo_commit(dev_priv, copy_size);
+ vmw_cmd_commit(dev_priv, copy_size);
return 0;
}
@@ -2568,8 +2564,8 @@ int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv,
int i = 0;
int ret = 0;
- mutex_lock(&dev_priv->dev->mode_config.mutex);
- list_for_each_entry(con, &dev_priv->dev->mode_config.connector_list,
+ mutex_lock(&dev_priv->drm.mode_config.mutex);
+ list_for_each_entry(con, &dev_priv->drm.mode_config.connector_list,
head) {
if (i == unit)
break;
@@ -2577,7 +2573,7 @@ int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv,
++i;
}
- if (&con->head == &dev_priv->dev->mode_config.connector_list) {
+ if (&con->head == &dev_priv->drm.mode_config.connector_list) {
DRM_ERROR("Could not find initial display unit.\n");
ret = -EINVAL;
goto out_unlock;
@@ -2611,7 +2607,7 @@ int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv,
}
out_unlock:
- mutex_unlock(&dev_priv->dev->mode_config.mutex);
+ mutex_unlock(&dev_priv->drm.mode_config.mutex);
return ret;
}
@@ -2631,7 +2627,7 @@ vmw_kms_create_implicit_placement_property(struct vmw_private *dev_priv)
return;
dev_priv->implicit_placement_property =
- drm_property_create_range(dev_priv->dev,
+ drm_property_create_range(&dev_priv->drm,
DRM_MODE_PROP_IMMUTABLE,
"implicit_placement", 0, 1);
}
@@ -2752,7 +2748,7 @@ int vmw_du_helper_plane_update(struct vmw_du_update_plane *update)
goto out_unref;
reserved_size = update->calc_fifo_size(update, num_hits);
- cmd_start = VMW_FIFO_RESERVE(update->dev_priv, reserved_size);
+ cmd_start = VMW_CMD_RESERVE(update->dev_priv, reserved_size);
if (!cmd_start) {
ret = -ENOMEM;
goto out_revert;
@@ -2801,7 +2797,7 @@ int vmw_du_helper_plane_update(struct vmw_du_update_plane *update)
if (reserved_size < submit_size)
submit_size = 0;
- vmw_fifo_commit(update->dev_priv, submit_size);
+ vmw_cmd_commit(update->dev_priv, submit_size);
vmw_kms_helper_validation_finish(update->dev_priv, NULL, &val_ctx,
update->out_fence, NULL);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index 9d1de5b5cc6a..9a9508edbc9e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -125,7 +125,6 @@ static int vmw_ldu_commit_list(struct vmw_private *dev_priv)
vmw_write(dev_priv, SVGA_REG_DISPLAY_POSITION_Y, crtc->y);
vmw_write(dev_priv, SVGA_REG_DISPLAY_WIDTH, crtc->mode.hdisplay);
vmw_write(dev_priv, SVGA_REG_DISPLAY_HEIGHT, crtc->mode.vdisplay);
- vmw_write(dev_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
i++;
}
@@ -355,7 +354,7 @@ static const struct drm_crtc_helper_funcs vmw_ldu_crtc_helper_funcs = {
static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
{
struct vmw_legacy_display_unit *ldu;
- struct drm_device *dev = dev_priv->dev;
+ struct drm_device *dev = &dev_priv->drm;
struct drm_connector *connector;
struct drm_encoder *encoder;
struct drm_plane *primary, *cursor;
@@ -479,7 +478,7 @@ err_free:
int vmw_kms_ldu_init_display(struct vmw_private *dev_priv)
{
- struct drm_device *dev = dev_priv->dev;
+ struct drm_device *dev = &dev_priv->drm;
int i, ret;
if (dev_priv->ldu_priv) {
@@ -554,7 +553,7 @@ int vmw_kms_ldu_do_bo_dirty(struct vmw_private *dev_priv,
} *cmd;
fifo_size = sizeof(*cmd) * num_clips;
- cmd = VMW_FIFO_RESERVE(dev_priv, fifo_size);
+ cmd = VMW_CMD_RESERVE(dev_priv, fifo_size);
if (unlikely(cmd == NULL))
return -ENOMEM;
@@ -567,6 +566,6 @@ int vmw_kms_ldu_do_bo_dirty(struct vmw_private *dev_priv,
cmd[i].body.height = clips->y2 - clips->y1;
}
- vmw_fifo_commit(dev_priv, fifo_size);
+ vmw_cmd_commit(dev_priv, fifo_size);
return 0;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
deleted file mode 100644
index e53bc639a754..000000000000
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
+++ /dev/null
@@ -1,155 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0 OR MIT
-/**************************************************************************
- *
- * Copyright 2010 VMware, Inc., Palo Alto, CA., USA
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
-
-
-#include "vmwgfx_drv.h"
-
-struct vmw_marker {
- struct list_head head;
- uint32_t seqno;
- u64 submitted;
-};
-
-void vmw_marker_queue_init(struct vmw_marker_queue *queue)
-{
- INIT_LIST_HEAD(&queue->head);
- queue->lag = 0;
- queue->lag_time = ktime_get_raw_ns();
- spin_lock_init(&queue->lock);
-}
-
-void vmw_marker_queue_takedown(struct vmw_marker_queue *queue)
-{
- struct vmw_marker *marker, *next;
-
- spin_lock(&queue->lock);
- list_for_each_entry_safe(marker, next, &queue->head, head) {
- kfree(marker);
- }
- spin_unlock(&queue->lock);
-}
-
-int vmw_marker_push(struct vmw_marker_queue *queue,
- uint32_t seqno)
-{
- struct vmw_marker *marker = kmalloc(sizeof(*marker), GFP_KERNEL);
-
- if (unlikely(!marker))
- return -ENOMEM;
-
- marker->seqno = seqno;
- marker->submitted = ktime_get_raw_ns();
- spin_lock(&queue->lock);
- list_add_tail(&marker->head, &queue->head);
- spin_unlock(&queue->lock);
-
- return 0;
-}
-
-int vmw_marker_pull(struct vmw_marker_queue *queue,
- uint32_t signaled_seqno)
-{
- struct vmw_marker *marker, *next;
- bool updated = false;
- u64 now;
-
- spin_lock(&queue->lock);
- now = ktime_get_raw_ns();
-
- if (list_empty(&queue->head)) {
- queue->lag = 0;
- queue->lag_time = now;
- updated = true;
- goto out_unlock;
- }
-
- list_for_each_entry_safe(marker, next, &queue->head, head) {
- if (signaled_seqno - marker->seqno > (1 << 30))
- continue;
-
- queue->lag = now - marker->submitted;
- queue->lag_time = now;
- updated = true;
- list_del(&marker->head);
- kfree(marker);
- }
-
-out_unlock:
- spin_unlock(&queue->lock);
-
- return (updated) ? 0 : -EBUSY;
-}
-
-static u64 vmw_fifo_lag(struct vmw_marker_queue *queue)
-{
- u64 now;
-
- spin_lock(&queue->lock);
- now = ktime_get_raw_ns();
- queue->lag += now - queue->lag_time;
- queue->lag_time = now;
- spin_unlock(&queue->lock);
- return queue->lag;
-}
-
-
-static bool vmw_lag_lt(struct vmw_marker_queue *queue,
- uint32_t us)
-{
- u64 cond = (u64) us * NSEC_PER_USEC;
-
- return vmw_fifo_lag(queue) <= cond;
-}
-
-int vmw_wait_lag(struct vmw_private *dev_priv,
- struct vmw_marker_queue *queue, uint32_t us)
-{
- struct vmw_marker *marker;
- uint32_t seqno;
- int ret;
-
- while (!vmw_lag_lt(queue, us)) {
- spin_lock(&queue->lock);
- if (list_empty(&queue->head))
- seqno = atomic_read(&dev_priv->marker_seq);
- else {
- marker = list_first_entry(&queue->head,
- struct vmw_marker, head);
- seqno = marker->seqno;
- }
- spin_unlock(&queue->lock);
-
- ret = vmw_wait_seqno(dev_priv, false, seqno, true,
- 3*HZ);
-
- if (unlikely(ret != 0))
- return ret;
-
- (void) vmw_marker_pull(queue, seqno);
- }
- return 0;
-}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
index 7f95ed6aa224..a372980fe6a5 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
@@ -148,7 +148,7 @@ static int vmw_setup_otable_base(struct vmw_private *dev_priv,
mob->pt_level += VMW_MOBFMT_PTDEPTH_1 - SVGA3D_MOBFMT_PTDEPTH_1;
}
- cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL)) {
ret = -ENOMEM;
goto out_no_fifo;
@@ -170,7 +170,7 @@ static int vmw_setup_otable_base(struct vmw_private *dev_priv,
*/
BUG_ON(mob->pt_level == VMW_MOBFMT_PTDEPTH_2);
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ vmw_cmd_commit(dev_priv, sizeof(*cmd));
otable->page_table = mob;
return 0;
@@ -203,7 +203,7 @@ static void vmw_takedown_otable_base(struct vmw_private *dev_priv,
return;
bo = otable->page_table->pt_bo;
- cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL))
return;
@@ -215,7 +215,7 @@ static void vmw_takedown_otable_base(struct vmw_private *dev_priv,
cmd->body.sizeInBytes = 0;
cmd->body.validSizeInBytes = 0;
cmd->body.ptDepth = SVGA3D_MOBFMT_INVALID;
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ vmw_cmd_commit(dev_priv, sizeof(*cmd));
if (bo) {
int ret;
@@ -558,12 +558,12 @@ void vmw_mob_unbind(struct vmw_private *dev_priv,
BUG_ON(ret != 0);
}
- cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
if (cmd) {
cmd->header.id = SVGA_3D_CMD_DESTROY_GB_MOB;
cmd->header.size = sizeof(cmd->body);
cmd->body.mobid = mob->id;
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ vmw_cmd_commit(dev_priv, sizeof(*cmd));
}
if (bo) {
@@ -625,7 +625,7 @@ int vmw_mob_bind(struct vmw_private *dev_priv,
vmw_fifo_resource_inc(dev_priv);
- cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL))
goto out_no_cmd_space;
@@ -636,7 +636,7 @@ int vmw_mob_bind(struct vmw_private *dev_priv,
cmd->body.base = mob->pt_root_page >> PAGE_SHIFT;
cmd->body.sizeInBytes = num_data_pages * PAGE_SIZE;
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ vmw_cmd_commit(dev_priv, sizeof(*cmd));
return 0;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
index cd7ed1650d60..d6d282c13b7f 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
@@ -122,7 +122,7 @@ static int vmw_overlay_send_put(struct vmw_private *dev_priv,
fifo_size = sizeof(*cmds) + sizeof(*flush) + sizeof(*items) * num_items;
- cmds = VMW_FIFO_RESERVE(dev_priv, fifo_size);
+ cmds = VMW_CMD_RESERVE(dev_priv, fifo_size);
/* hardware has hung, can't do anything here */
if (!cmds)
return -ENOMEM;
@@ -169,7 +169,7 @@ static int vmw_overlay_send_put(struct vmw_private *dev_priv,
fill_flush(flush, arg->stream_id);
- vmw_fifo_commit(dev_priv, fifo_size);
+ vmw_cmd_commit(dev_priv, fifo_size);
return 0;
}
@@ -192,7 +192,7 @@ static int vmw_overlay_send_stop(struct vmw_private *dev_priv,
int ret;
for (;;) {
- cmds = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmds));
+ cmds = VMW_CMD_RESERVE(dev_priv, sizeof(*cmds));
if (cmds)
break;
@@ -211,7 +211,7 @@ static int vmw_overlay_send_stop(struct vmw_private *dev_priv,
cmds->body.items[0].value = false;
fill_flush(&cmds->flush, stream_id);
- vmw_fifo_commit(dev_priv, sizeof(*cmds));
+ vmw_cmd_commit(dev_priv, sizeof(*cmds));
return 0;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c b/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
index 0b76b3d17d4c..0a900afc66ff 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
@@ -232,7 +232,7 @@ void vmw_bo_dirty_unmap(struct vmw_buffer_object *vbo,
int vmw_bo_dirty_add(struct vmw_buffer_object *vbo)
{
struct vmw_bo_dirty *dirty = vbo->dirty;
- pgoff_t num_pages = vbo->base.num_pages;
+ pgoff_t num_pages = vbo->base.mem.num_pages;
size_t size, acc_size;
int ret;
static struct ttm_operation_ctx ctx = {
@@ -413,7 +413,7 @@ vm_fault_t vmw_bo_vm_mkwrite(struct vm_fault *vmf)
return ret;
page_offset = vmf->pgoff - drm_vma_node_start(&bo->base.vma_node);
- if (unlikely(page_offset >= bo->num_pages)) {
+ if (unlikely(page_offset >= bo->mem.num_pages)) {
ret = VM_FAULT_SIGBUS;
goto out_unlock;
}
@@ -456,7 +456,7 @@ vm_fault_t vmw_bo_vm_fault(struct vm_fault *vmf)
page_offset = vmf->pgoff -
drm_vma_node_start(&bo->base.vma_node);
- if (page_offset >= bo->num_pages ||
+ if (page_offset >= bo->mem.num_pages ||
vmw_resources_clean(vbo, page_offset,
page_offset + PAGE_SIZE,
&allowed_prefault)) {
@@ -531,7 +531,7 @@ vm_fault_t vmw_bo_vm_huge_fault(struct vm_fault *vmf,
page_offset = vmf->pgoff -
drm_vma_node_start(&bo->base.vma_node);
- if (page_offset >= bo->num_pages ||
+ if (page_offset >= bo->mem.num_pages ||
vmw_resources_clean(vbo, page_offset,
page_offset + PAGE_SIZE,
&allowed_prefault)) {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 00b535831a7a..d1e7b9608145 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -360,7 +360,7 @@ static int vmw_resource_buf_alloc(struct vmw_resource *res,
int ret;
if (likely(res->backup)) {
- BUG_ON(res->backup->base.num_pages * PAGE_SIZE < size);
+ BUG_ON(res->backup->base.base.size < size);
return 0;
}
@@ -827,7 +827,7 @@ int vmw_query_readback_all(struct vmw_buffer_object *dx_query_mob)
dx_query_ctx = dx_query_mob->dx_query_ctx;
dev_priv = dx_query_ctx->dev_priv;
- cmd = VMW_FIFO_RESERVE_DX(dev_priv, sizeof(*cmd), dx_query_ctx->id);
+ cmd = VMW_CMD_CTX_RESERVE(dev_priv, sizeof(*cmd), dx_query_ctx->id);
if (unlikely(cmd == NULL))
return -ENOMEM;
@@ -835,7 +835,7 @@ int vmw_query_readback_all(struct vmw_buffer_object *dx_query_mob)
cmd->header.size = sizeof(cmd->body);
cmd->body.cid = dx_query_ctx->id;
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ vmw_cmd_commit(dev_priv, sizeof(*cmd));
/* Triggers a rebind the next time affected context is bound */
dx_query_mob->dx_query_ctx = NULL;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
index 4bdad2f2d130..b0db059b8cfb 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
@@ -132,7 +132,7 @@ static int vmw_sou_fifo_create(struct vmw_private *dev_priv,
BUG_ON(!sou->buffer);
fifo_size = sizeof(*cmd);
- cmd = VMW_FIFO_RESERVE(dev_priv, fifo_size);
+ cmd = VMW_CMD_RESERVE(dev_priv, fifo_size);
if (unlikely(cmd == NULL))
return -ENOMEM;
@@ -153,7 +153,7 @@ static int vmw_sou_fifo_create(struct vmw_private *dev_priv,
vmw_bo_get_guest_ptr(&sou->buffer->base, &cmd->obj.backingStore.ptr);
cmd->obj.backingStore.pitch = mode->hdisplay * 4;
- vmw_fifo_commit(dev_priv, fifo_size);
+ vmw_cmd_commit(dev_priv, fifo_size);
sou->defined = true;
@@ -181,7 +181,7 @@ static int vmw_sou_fifo_destroy(struct vmw_private *dev_priv,
return 0;
fifo_size = sizeof(*cmd);
- cmd = VMW_FIFO_RESERVE(dev_priv, fifo_size);
+ cmd = VMW_CMD_RESERVE(dev_priv, fifo_size);
if (unlikely(cmd == NULL))
return -ENOMEM;
@@ -189,7 +189,7 @@ static int vmw_sou_fifo_destroy(struct vmw_private *dev_priv,
cmd->header.cmdType = SVGA_CMD_DESTROY_SCREEN;
cmd->body.screenId = sou->base.unit;
- vmw_fifo_commit(dev_priv, fifo_size);
+ vmw_cmd_commit(dev_priv, fifo_size);
/* Force sync */
ret = vmw_fallback_wait(dev_priv, false, true, 0, false, 3*HZ);
@@ -829,7 +829,7 @@ static const struct drm_crtc_helper_funcs vmw_sou_crtc_helper_funcs = {
static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit)
{
struct vmw_screen_object_unit *sou;
- struct drm_device *dev = dev_priv->dev;
+ struct drm_device *dev = &dev_priv->drm;
struct drm_connector *connector;
struct drm_encoder *encoder;
struct drm_plane *primary, *cursor;
@@ -946,7 +946,7 @@ err_free:
int vmw_kms_sou_init_display(struct vmw_private *dev_priv)
{
- struct drm_device *dev = dev_priv->dev;
+ struct drm_device *dev = &dev_priv->drm;
int i, ret;
if (!(dev_priv->capabilities & SVGA_CAP_SCREEN_OBJECT_2)) {
@@ -992,7 +992,7 @@ static int do_bo_define_gmrfb(struct vmw_private *dev_priv,
if (depth == 32)
depth = 24;
- cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
if (!cmd)
return -ENOMEM;
@@ -1003,7 +1003,7 @@ static int do_bo_define_gmrfb(struct vmw_private *dev_priv,
cmd->body.bytesPerLine = framebuffer->base.pitches[0];
/* Buffer is reserved in vram or GMR */
vmw_bo_get_guest_ptr(&buf->base, &cmd->body.ptr);
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ vmw_cmd_commit(dev_priv, sizeof(*cmd));
return 0;
}
@@ -1029,7 +1029,7 @@ static void vmw_sou_surface_fifo_commit(struct vmw_kms_dirty *dirty)
int i;
if (!dirty->num_hits) {
- vmw_fifo_commit(dirty->dev_priv, 0);
+ vmw_cmd_commit(dirty->dev_priv, 0);
return;
}
@@ -1061,7 +1061,7 @@ static void vmw_sou_surface_fifo_commit(struct vmw_kms_dirty *dirty)
blit->bottom -= sdirty->top;
}
- vmw_fifo_commit(dirty->dev_priv, region_size + sizeof(*cmd));
+ vmw_cmd_commit(dirty->dev_priv, region_size + sizeof(*cmd));
sdirty->left = sdirty->top = S32_MAX;
sdirty->right = sdirty->bottom = S32_MIN;
@@ -1185,11 +1185,11 @@ out_unref:
static void vmw_sou_bo_fifo_commit(struct vmw_kms_dirty *dirty)
{
if (!dirty->num_hits) {
- vmw_fifo_commit(dirty->dev_priv, 0);
+ vmw_cmd_commit(dirty->dev_priv, 0);
return;
}
- vmw_fifo_commit(dirty->dev_priv,
+ vmw_cmd_commit(dirty->dev_priv,
sizeof(struct vmw_kms_sou_bo_blit) *
dirty->num_hits);
}
@@ -1295,11 +1295,11 @@ out_unref:
static void vmw_sou_readback_fifo_commit(struct vmw_kms_dirty *dirty)
{
if (!dirty->num_hits) {
- vmw_fifo_commit(dirty->dev_priv, 0);
+ vmw_cmd_commit(dirty->dev_priv, 0);
return;
}
- vmw_fifo_commit(dirty->dev_priv,
+ vmw_cmd_commit(dirty->dev_priv,
sizeof(struct vmw_kms_sou_readback_blit) *
dirty->num_hits);
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
index f328aa5839a2..905ae50aaa2a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
@@ -222,7 +222,7 @@ static int vmw_gb_shader_create(struct vmw_resource *res)
goto out_no_fifo;
}
- cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL)) {
ret = -ENOMEM;
goto out_no_fifo;
@@ -233,7 +233,7 @@ static int vmw_gb_shader_create(struct vmw_resource *res)
cmd->body.shid = res->id;
cmd->body.type = shader->type;
cmd->body.sizeInBytes = shader->size;
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ vmw_cmd_commit(dev_priv, sizeof(*cmd));
vmw_fifo_resource_inc(dev_priv);
return 0;
@@ -256,7 +256,7 @@ static int vmw_gb_shader_bind(struct vmw_resource *res,
BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
- cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL))
return -ENOMEM;
@@ -266,7 +266,7 @@ static int vmw_gb_shader_bind(struct vmw_resource *res,
cmd->body.mobid = bo->mem.start;
cmd->body.offsetInBytes = res->backup_offset;
res->backup_dirty = false;
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ vmw_cmd_commit(dev_priv, sizeof(*cmd));
return 0;
}
@@ -284,7 +284,7 @@ static int vmw_gb_shader_unbind(struct vmw_resource *res,
BUG_ON(res->backup->base.mem.mem_type != VMW_PL_MOB);
- cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL))
return -ENOMEM;
@@ -293,7 +293,7 @@ static int vmw_gb_shader_unbind(struct vmw_resource *res,
cmd->body.shid = res->id;
cmd->body.mobid = SVGA3D_INVALID_ID;
cmd->body.offsetInBytes = 0;
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ vmw_cmd_commit(dev_priv, sizeof(*cmd));
/*
* Create a fence object and fence the backup buffer.
@@ -324,7 +324,7 @@ static int vmw_gb_shader_destroy(struct vmw_resource *res)
mutex_lock(&dev_priv->binding_mutex);
vmw_binding_res_list_scrub(&res->binding_head);
- cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL)) {
mutex_unlock(&dev_priv->binding_mutex);
return -ENOMEM;
@@ -333,7 +333,7 @@ static int vmw_gb_shader_destroy(struct vmw_resource *res)
cmd->header.id = SVGA_3D_CMD_DESTROY_GB_SHADER;
cmd->header.size = sizeof(cmd->body);
cmd->body.shid = res->id;
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ vmw_cmd_commit(dev_priv, sizeof(*cmd));
mutex_unlock(&dev_priv->binding_mutex);
vmw_resource_release_id(res);
vmw_fifo_resource_dec(dev_priv);
@@ -394,7 +394,7 @@ static int vmw_dx_shader_unscrub(struct vmw_resource *res)
if (!list_empty(&shader->cotable_head) || !shader->committed)
return 0;
- cmd = VMW_FIFO_RESERVE_DX(dev_priv, sizeof(*cmd), shader->ctx->id);
+ cmd = VMW_CMD_CTX_RESERVE(dev_priv, sizeof(*cmd), shader->ctx->id);
if (unlikely(cmd == NULL))
return -ENOMEM;
@@ -404,7 +404,7 @@ static int vmw_dx_shader_unscrub(struct vmw_resource *res)
cmd->body.shid = shader->id;
cmd->body.mobid = res->backup->base.mem.start;
cmd->body.offsetInBytes = res->backup_offset;
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ vmw_cmd_commit(dev_priv, sizeof(*cmd));
vmw_cotable_add_resource(shader->cotable, &shader->cotable_head);
@@ -481,7 +481,7 @@ static int vmw_dx_shader_scrub(struct vmw_resource *res)
return 0;
WARN_ON_ONCE(!shader->committed);
- cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL))
return -ENOMEM;
@@ -491,7 +491,7 @@ static int vmw_dx_shader_scrub(struct vmw_resource *res)
cmd->body.shid = res->id;
cmd->body.mobid = SVGA3D_INVALID_ID;
cmd->body.offsetInBytes = 0;
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ vmw_cmd_commit(dev_priv, sizeof(*cmd));
res->id = -1;
list_del_init(&shader->cotable_head);
@@ -856,8 +856,7 @@ static int vmw_shader_define(struct drm_device *dev, struct drm_file *file_priv,
return ret;
}
- if ((u64)buffer->base.num_pages * PAGE_SIZE <
- (u64)size + (u64)offset) {
+ if ((u64)buffer->base.base.size < (u64)size + (u64)offset) {
VMW_DEBUG_USER("Illegal buffer- or shader size.\n");
ret = -EINVAL;
goto out_bad_arg;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_so.c b/drivers/gpu/drm/vmwgfx/vmwgfx_so.c
index 3f97b61dd5d8..7369dd86d3a9 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_so.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_so.c
@@ -170,7 +170,7 @@ static int vmw_view_create(struct vmw_resource *res)
return 0;
}
- cmd = VMW_FIFO_RESERVE_DX(res->dev_priv, view->cmd_size, view->ctx->id);
+ cmd = VMW_CMD_CTX_RESERVE(res->dev_priv, view->cmd_size, view->ctx->id);
if (!cmd) {
mutex_unlock(&dev_priv->binding_mutex);
return -ENOMEM;
@@ -181,7 +181,7 @@ static int vmw_view_create(struct vmw_resource *res)
/* Sid may have changed due to surface eviction. */
WARN_ON(view->srf->id == SVGA3D_INVALID_ID);
cmd->body.sid = view->srf->id;
- vmw_fifo_commit(res->dev_priv, view->cmd_size);
+ vmw_cmd_commit(res->dev_priv, view->cmd_size);
res->id = view->view_id;
list_add_tail(&view->srf_head, &srf->view_list);
vmw_cotable_add_resource(view->cotable, &view->cotable_head);
@@ -213,14 +213,14 @@ static int vmw_view_destroy(struct vmw_resource *res)
if (!view->committed || res->id == -1)
return 0;
- cmd = VMW_FIFO_RESERVE_DX(dev_priv, sizeof(*cmd), view->ctx->id);
+ cmd = VMW_CMD_CTX_RESERVE(dev_priv, sizeof(*cmd), view->ctx->id);
if (!cmd)
return -ENOMEM;
cmd->header.id = vmw_view_destroy_cmds[view->view_type];
cmd->header.size = sizeof(cmd->body);
cmd->body.view_id = view->view_id;
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ vmw_cmd_commit(dev_priv, sizeof(*cmd));
res->id = -1;
list_del_init(&view->cotable_head);
list_del_init(&view->srf_head);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
index 5b04ec047ef3..fbe977881364 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
@@ -170,7 +170,7 @@ static int vmw_stdu_define_st(struct vmw_private *dev_priv,
SVGA3dCmdDefineGBScreenTarget body;
} *cmd;
- cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL))
return -ENOMEM;
@@ -188,7 +188,7 @@ static int vmw_stdu_define_st(struct vmw_private *dev_priv,
stdu->base.set_gui_x = cmd->body.xRoot;
stdu->base.set_gui_y = cmd->body.yRoot;
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ vmw_cmd_commit(dev_priv, sizeof(*cmd));
stdu->defined = true;
stdu->display_width = mode->hdisplay;
@@ -229,7 +229,7 @@ static int vmw_stdu_bind_st(struct vmw_private *dev_priv,
memset(&image, 0, sizeof(image));
image.sid = res ? res->id : SVGA3D_INVALID_ID;
- cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL))
return -ENOMEM;
@@ -239,7 +239,7 @@ static int vmw_stdu_bind_st(struct vmw_private *dev_priv,
cmd->body.stid = stdu->base.unit;
cmd->body.image = image;
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ vmw_cmd_commit(dev_priv, sizeof(*cmd));
return 0;
}
@@ -293,7 +293,7 @@ static int vmw_stdu_update_st(struct vmw_private *dev_priv,
return -EINVAL;
}
- cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL))
return -ENOMEM;
@@ -301,7 +301,7 @@ static int vmw_stdu_update_st(struct vmw_private *dev_priv,
0, stdu->display_width,
0, stdu->display_height);
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ vmw_cmd_commit(dev_priv, sizeof(*cmd));
return 0;
}
@@ -329,7 +329,7 @@ static int vmw_stdu_destroy_st(struct vmw_private *dev_priv,
if (unlikely(!stdu->defined))
return 0;
- cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL))
return -ENOMEM;
@@ -338,7 +338,7 @@ static int vmw_stdu_destroy_st(struct vmw_private *dev_priv,
cmd->body.stid = stdu->base.unit;
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ vmw_cmd_commit(dev_priv, sizeof(*cmd));
/* Force sync */
ret = vmw_fallback_wait(dev_priv, false, true, 0, false, 3*HZ);
@@ -499,7 +499,7 @@ static void vmw_stdu_bo_fifo_commit(struct vmw_kms_dirty *dirty)
size_t blit_size = sizeof(*blit) * dirty->num_hits + sizeof(*suffix);
if (!dirty->num_hits) {
- vmw_fifo_commit(dirty->dev_priv, 0);
+ vmw_cmd_commit(dirty->dev_priv, 0);
return;
}
@@ -512,7 +512,7 @@ static void vmw_stdu_bo_fifo_commit(struct vmw_kms_dirty *dirty)
cmd->body.host.mipmap = 0;
cmd->body.transfer = ddirty->transfer;
suffix->suffixSize = sizeof(*suffix);
- suffix->maximumOffset = ddirty->buf->base.num_pages * PAGE_SIZE;
+ suffix->maximumOffset = ddirty->buf->base.base.size;
if (ddirty->transfer == SVGA3D_WRITE_HOST_VRAM) {
blit_size += sizeof(struct vmw_stdu_update);
@@ -522,7 +522,7 @@ static void vmw_stdu_bo_fifo_commit(struct vmw_kms_dirty *dirty)
ddirty->top, ddirty->bottom);
}
- vmw_fifo_commit(dirty->dev_priv, sizeof(*cmd) + blit_size);
+ vmw_cmd_commit(dirty->dev_priv, sizeof(*cmd) + blit_size);
stdu->display_srf->res.res_dirty = true;
ddirty->left = ddirty->top = S32_MAX;
@@ -628,7 +628,7 @@ static void vmw_stdu_bo_cpu_commit(struct vmw_kms_dirty *dirty)
dev_priv = vmw_priv(stdu->base.crtc.dev);
- cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
if (!cmd)
goto out_cleanup;
@@ -636,7 +636,7 @@ static void vmw_stdu_bo_cpu_commit(struct vmw_kms_dirty *dirty)
region.x1, region.x2,
region.y1, region.y2);
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ vmw_cmd_commit(dev_priv, sizeof(*cmd));
}
out_cleanup:
@@ -795,7 +795,7 @@ static void vmw_kms_stdu_surface_fifo_commit(struct vmw_kms_dirty *dirty)
size_t commit_size;
if (!dirty->num_hits) {
- vmw_fifo_commit(dirty->dev_priv, 0);
+ vmw_cmd_commit(dirty->dev_priv, 0);
return;
}
@@ -817,7 +817,7 @@ static void vmw_kms_stdu_surface_fifo_commit(struct vmw_kms_dirty *dirty)
vmw_stdu_populate_update(update, stdu->base.unit, sdirty->left,
sdirty->right, sdirty->top, sdirty->bottom);
- vmw_fifo_commit(dirty->dev_priv, commit_size);
+ vmw_cmd_commit(dirty->dev_priv, commit_size);
sdirty->left = sdirty->top = S32_MAX;
sdirty->right = sdirty->bottom = S32_MIN;
@@ -1238,7 +1238,7 @@ static uint32_t vmw_stdu_bo_populate_update(struct vmw_du_update_plane *update,
vfbbo = container_of(update->vfb, typeof(*vfbbo), base);
suffix->suffixSize = sizeof(*suffix);
- suffix->maximumOffset = vfbbo->buffer->base.num_pages * PAGE_SIZE;
+ suffix->maximumOffset = vfbbo->buffer->base.base.size;
vmw_stdu_populate_update(&suffix[1], stdu->base.unit, bb->x1, bb->x2,
bb->y1, bb->y2);
@@ -1713,7 +1713,7 @@ static const struct drm_crtc_helper_funcs vmw_stdu_crtc_helper_funcs = {
static int vmw_stdu_init(struct vmw_private *dev_priv, unsigned unit)
{
struct vmw_screen_target_display_unit *stdu;
- struct drm_device *dev = dev_priv->dev;
+ struct drm_device *dev = &dev_priv->drm;
struct drm_connector *connector;
struct drm_encoder *encoder;
struct drm_plane *primary, *cursor;
@@ -1861,7 +1861,7 @@ static void vmw_stdu_destroy(struct vmw_screen_target_display_unit *stdu)
*/
int vmw_kms_stdu_init_display(struct vmw_private *dev_priv)
{
- struct drm_device *dev = dev_priv->dev;
+ struct drm_device *dev = &dev_priv->drm;
int i, ret;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_streamoutput.c b/drivers/gpu/drm/vmwgfx/vmwgfx_streamoutput.c
index 193192456663..1dd042a20a66 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_streamoutput.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_streamoutput.c
@@ -99,7 +99,7 @@ static int vmw_dx_streamoutput_unscrub(struct vmw_resource *res)
if (!list_empty(&so->cotable_head) || !so->committed )
return 0;
- cmd = VMW_FIFO_RESERVE_DX(dev_priv, sizeof(*cmd), so->ctx->id);
+ cmd = VMW_CMD_CTX_RESERVE(dev_priv, sizeof(*cmd), so->ctx->id);
if (!cmd)
return -ENOMEM;
@@ -109,7 +109,7 @@ static int vmw_dx_streamoutput_unscrub(struct vmw_resource *res)
cmd->body.mobid = res->backup->base.mem.start;
cmd->body.offsetInBytes = res->backup_offset;
cmd->body.sizeInBytes = so->size;
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ vmw_cmd_commit(dev_priv, sizeof(*cmd));
vmw_cotable_add_resource(so->cotable, &so->cotable_head);
@@ -172,7 +172,7 @@ static int vmw_dx_streamoutput_scrub(struct vmw_resource *res)
WARN_ON_ONCE(!so->committed);
- cmd = VMW_FIFO_RESERVE_DX(dev_priv, sizeof(*cmd), so->ctx->id);
+ cmd = VMW_CMD_CTX_RESERVE(dev_priv, sizeof(*cmd), so->ctx->id);
if (!cmd)
return -ENOMEM;
@@ -182,7 +182,7 @@ static int vmw_dx_streamoutput_scrub(struct vmw_resource *res)
cmd->body.mobid = SVGA3D_INVALID_ID;
cmd->body.offsetInBytes = 0;
cmd->body.sizeInBytes = so->size;
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ vmw_cmd_commit(dev_priv, sizeof(*cmd));
res->id = -1;
list_del_init(&so->cotable_head);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index 3914bfee0533..f6cab77075a0 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -372,12 +372,12 @@ static void vmw_hw_surface_destroy(struct vmw_resource *res)
if (res->id != -1) {
- cmd = VMW_FIFO_RESERVE(dev_priv, vmw_surface_destroy_size());
+ cmd = VMW_CMD_RESERVE(dev_priv, vmw_surface_destroy_size());
if (unlikely(!cmd))
return;
vmw_surface_destroy_encode(res->id, cmd);
- vmw_fifo_commit(dev_priv, vmw_surface_destroy_size());
+ vmw_cmd_commit(dev_priv, vmw_surface_destroy_size());
/*
* used_memory_size_atomic, or separate lock
@@ -440,14 +440,14 @@ static int vmw_legacy_srf_create(struct vmw_resource *res)
*/
submit_size = vmw_surface_define_size(srf);
- cmd = VMW_FIFO_RESERVE(dev_priv, submit_size);
+ cmd = VMW_CMD_RESERVE(dev_priv, submit_size);
if (unlikely(!cmd)) {
ret = -ENOMEM;
goto out_no_fifo;
}
vmw_surface_define_encode(srf, cmd);
- vmw_fifo_commit(dev_priv, submit_size);
+ vmw_cmd_commit(dev_priv, submit_size);
vmw_fifo_resource_inc(dev_priv);
/*
@@ -492,14 +492,14 @@ static int vmw_legacy_srf_dma(struct vmw_resource *res,
BUG_ON(!val_buf->bo);
submit_size = vmw_surface_dma_size(srf);
- cmd = VMW_FIFO_RESERVE(dev_priv, submit_size);
+ cmd = VMW_CMD_RESERVE(dev_priv, submit_size);
if (unlikely(!cmd))
return -ENOMEM;
vmw_bo_get_guest_ptr(val_buf->bo, &ptr);
vmw_surface_dma_encode(srf, cmd, &ptr, bind);
- vmw_fifo_commit(dev_priv, submit_size);
+ vmw_cmd_commit(dev_priv, submit_size);
/*
* Create a fence object and fence the backup buffer.
@@ -578,12 +578,12 @@ static int vmw_legacy_srf_destroy(struct vmw_resource *res)
*/
submit_size = vmw_surface_destroy_size();
- cmd = VMW_FIFO_RESERVE(dev_priv, submit_size);
+ cmd = VMW_CMD_RESERVE(dev_priv, submit_size);
if (unlikely(!cmd))
return -ENOMEM;
vmw_surface_destroy_encode(res->id, cmd);
- vmw_fifo_commit(dev_priv, submit_size);
+ vmw_cmd_commit(dev_priv, submit_size);
/*
* Surface memory usage accounting.
@@ -1121,7 +1121,7 @@ static int vmw_gb_surface_create(struct vmw_resource *res)
submit_len = sizeof(*cmd);
}
- cmd = VMW_FIFO_RESERVE(dev_priv, submit_len);
+ cmd = VMW_CMD_RESERVE(dev_priv, submit_len);
cmd2 = (typeof(cmd2))cmd;
cmd3 = (typeof(cmd3))cmd;
cmd4 = (typeof(cmd4))cmd;
@@ -1188,7 +1188,7 @@ static int vmw_gb_surface_create(struct vmw_resource *res)
cmd->body.size.depth = metadata->base_size.depth;
}
- vmw_fifo_commit(dev_priv, submit_len);
+ vmw_cmd_commit(dev_priv, submit_len);
return 0;
@@ -1219,7 +1219,7 @@ static int vmw_gb_surface_bind(struct vmw_resource *res,
submit_size = sizeof(*cmd1) + (res->backup_dirty ? sizeof(*cmd2) : 0);
- cmd1 = VMW_FIFO_RESERVE(dev_priv, submit_size);
+ cmd1 = VMW_CMD_RESERVE(dev_priv, submit_size);
if (unlikely(!cmd1))
return -ENOMEM;
@@ -1233,7 +1233,7 @@ static int vmw_gb_surface_bind(struct vmw_resource *res,
cmd2->header.size = sizeof(cmd2->body);
cmd2->body.sid = res->id;
}
- vmw_fifo_commit(dev_priv, submit_size);
+ vmw_cmd_commit(dev_priv, submit_size);
if (res->backup->dirty && res->backup_dirty) {
/* We've just made a full upload. Cear dirty regions. */
@@ -1272,7 +1272,7 @@ static int vmw_gb_surface_unbind(struct vmw_resource *res,
BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
submit_size = sizeof(*cmd3) + (readback ? sizeof(*cmd1) : sizeof(*cmd2));
- cmd = VMW_FIFO_RESERVE(dev_priv, submit_size);
+ cmd = VMW_CMD_RESERVE(dev_priv, submit_size);
if (unlikely(!cmd))
return -ENOMEM;
@@ -1295,7 +1295,7 @@ static int vmw_gb_surface_unbind(struct vmw_resource *res,
cmd3->body.sid = res->id;
cmd3->body.mobid = SVGA3D_INVALID_ID;
- vmw_fifo_commit(dev_priv, submit_size);
+ vmw_cmd_commit(dev_priv, submit_size);
/*
* Create a fence object and fence the backup buffer.
@@ -1328,7 +1328,7 @@ static int vmw_gb_surface_destroy(struct vmw_resource *res)
vmw_view_surface_list_destroy(dev_priv, &srf->view_list);
vmw_binding_res_list_scrub(&res->binding_head);
- cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
if (unlikely(!cmd)) {
mutex_unlock(&dev_priv->binding_mutex);
return -ENOMEM;
@@ -1337,7 +1337,7 @@ static int vmw_gb_surface_destroy(struct vmw_resource *res)
cmd->header.id = SVGA_3D_CMD_DESTROY_GB_SURFACE;
cmd->header.size = sizeof(cmd->body);
cmd->body.sid = res->id;
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ vmw_cmd_commit(dev_priv, sizeof(*cmd));
mutex_unlock(&dev_priv->binding_mutex);
vmw_resource_release_id(res);
vmw_fifo_resource_dec(dev_priv);
@@ -1550,8 +1550,7 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
&res->backup,
&user_srf->backup_base);
if (ret == 0) {
- if (res->backup->base.num_pages * PAGE_SIZE <
- res->backup_size) {
+ if (res->backup->base.base.size < res->backup_size) {
VMW_DEBUG_USER("Surface backup buffer too small.\n");
vmw_bo_unreference(&res->backup);
ret = -EINVAL;
@@ -1614,7 +1613,7 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
if (res->backup) {
rep->buffer_map_handle =
drm_vma_node_offset_addr(&res->backup->base.base.vma_node);
- rep->buffer_size = res->backup->base.num_pages * PAGE_SIZE;
+ rep->buffer_size = res->backup->base.base.size;
rep->buffer_handle = backup_handle;
} else {
rep->buffer_map_handle = 0;
@@ -1692,7 +1691,7 @@ vmw_gb_surface_reference_internal(struct drm_device *dev,
rep->crep.buffer_handle = backup_handle;
rep->crep.buffer_map_handle =
drm_vma_node_offset_addr(&srf->res.backup->base.base.vma_node);
- rep->crep.buffer_size = srf->res.backup->base.num_pages * PAGE_SIZE;
+ rep->crep.buffer_size = srf->res.backup->base.base.size;
rep->creq.version = drm_vmw_gb_surface_v1;
rep->creq.svga3d_flags_upper_32_bits =
@@ -1896,7 +1895,7 @@ static int vmw_surface_dirty_sync(struct vmw_resource *res)
goto out;
alloc_size = num_dirty * ((has_dx) ? sizeof(*cmd1) : sizeof(*cmd2));
- cmd = VMW_FIFO_RESERVE(dev_priv, alloc_size);
+ cmd = VMW_CMD_RESERVE(dev_priv, alloc_size);
if (!cmd)
return -ENOMEM;
@@ -1932,7 +1931,7 @@ static int vmw_surface_dirty_sync(struct vmw_resource *res)
}
}
- vmw_fifo_commit(dev_priv, alloc_size);
+ vmw_cmd_commit(dev_priv, alloc_size);
out:
memset(&dirty->boxes[0], 0, sizeof(dirty->boxes[0]) *
dirty->num_subres);
@@ -2032,14 +2031,14 @@ static int vmw_surface_clean(struct vmw_resource *res)
} *cmd;
alloc_size = sizeof(*cmd);
- cmd = VMW_FIFO_RESERVE(dev_priv, alloc_size);
+ cmd = VMW_CMD_RESERVE(dev_priv, alloc_size);
if (!cmd)
return -ENOMEM;
cmd->header.id = SVGA_3D_CMD_READBACK_GB_SURFACE;
cmd->header.size = sizeof(cmd->body);
cmd->body.sid = res->id;
- vmw_fifo_commit(dev_priv, alloc_size);
+ vmw_cmd_commit(dev_priv, alloc_size);
return 0;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c b/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c
index 155ca3a5c7e5..e8e79de255cf 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c
@@ -5,7 +5,6 @@
* Copyright (C) 2007-2019 Vmware, Inc. All rights reservedd.
*/
#include "vmwgfx_drv.h"
-#include <drm/ttm/ttm_module.h>
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_placement.h>
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
index 6a04261ce760..dbb068830d80 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
@@ -309,7 +309,7 @@ void vmw_piter_start(struct vmw_piter *viter, const struct vmw_sg_table *vsgt,
*/
static void vmw_ttm_unmap_from_dma(struct vmw_ttm_tt *vmw_tt)
{
- struct device *dev = vmw_tt->dev_priv->dev->dev;
+ struct device *dev = vmw_tt->dev_priv->drm.dev;
dma_unmap_sgtable(dev, &vmw_tt->sgt, DMA_BIDIRECTIONAL, 0);
vmw_tt->sgt.nents = vmw_tt->sgt.orig_nents;
@@ -330,7 +330,7 @@ static void vmw_ttm_unmap_from_dma(struct vmw_ttm_tt *vmw_tt)
*/
static int vmw_ttm_map_for_dma(struct vmw_ttm_tt *vmw_tt)
{
- struct device *dev = vmw_tt->dev_priv->dev->dev;
+ struct device *dev = vmw_tt->dev_priv->drm.dev;
return dma_map_sgtable(dev, &vmw_tt->sgt, DMA_BIDIRECTIONAL, 0);
}
@@ -385,7 +385,7 @@ static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt)
sg = __sg_alloc_table_from_pages(&vmw_tt->sgt, vsgt->pages,
vsgt->num_pages, 0,
(unsigned long) vsgt->num_pages << PAGE_SHIFT,
- dma_get_max_seg_size(dev_priv->dev->dev),
+ dma_get_max_seg_size(dev_priv->drm.dev),
NULL, 0, GFP_KERNEL);
if (IS_ERR(sg)) {
ret = PTR_ERR(sg);
@@ -611,8 +611,8 @@ static struct ttm_tt *vmw_ttm_tt_create(struct ttm_buffer_object *bo,
vmw_be->mob = NULL;
if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent)
- ret = ttm_dma_tt_init(&vmw_be->dma_ttm, bo, page_flags,
- ttm_cached);
+ ret = ttm_sg_tt_init(&vmw_be->dma_ttm, bo, page_flags,
+ ttm_cached);
else
ret = ttm_tt_init(&vmw_be->dma_ttm, bo, page_flags,
ttm_cached);
diff --git a/drivers/gpu/drm/xen/xen_drm_front_gem.c b/drivers/gpu/drm/xen/xen_drm_front_gem.c
index 74db5a840bed..b293c67230ef 100644
--- a/drivers/gpu/drm/xen/xen_drm_front_gem.c
+++ b/drivers/gpu/drm/xen/xen_drm_front_gem.c
@@ -220,8 +220,8 @@ xen_drm_front_gem_import_sg_table(struct drm_device *dev,
xen_obj->sgt_imported = sgt;
- ret = drm_prime_sg_to_page_addr_arrays(sgt, xen_obj->pages,
- NULL, xen_obj->num_pages);
+ ret = drm_prime_sg_to_page_array(sgt, xen_obj->pages,
+ xen_obj->num_pages);
if (ret < 0)
return ERR_PTR(ret);
diff --git a/drivers/gpu/drm/zte/zx_plane.c b/drivers/gpu/drm/zte/zx_plane.c
index c8f7b21fa09e..78d787afe594 100644
--- a/drivers/gpu/drm/zte/zx_plane.c
+++ b/drivers/gpu/drm/zte/zx_plane.c
@@ -438,15 +438,10 @@ static const struct drm_plane_helper_funcs zx_gl_plane_helper_funcs = {
.atomic_disable = zx_plane_atomic_disable,
};
-static void zx_plane_destroy(struct drm_plane *plane)
-{
- drm_plane_cleanup(plane);
-}
-
static const struct drm_plane_funcs zx_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
- .destroy = zx_plane_destroy,
+ .destroy = drm_plane_cleanup,
.reset = drm_atomic_helper_plane_reset,
.atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
diff --git a/drivers/gpu/ipu-v3/ipu-di.c b/drivers/gpu/ipu-v3/ipu-di.c
index b4a31d506fcc..e617f60afeea 100644
--- a/drivers/gpu/ipu-v3/ipu-di.c
+++ b/drivers/gpu/ipu-v3/ipu-di.c
@@ -310,10 +310,6 @@ static void ipu_di_sync_config_noninterlaced(struct ipu_di *di,
/* unused */
} , {
/* unused */
- } , {
- /* unused */
- } , {
- /* unused */
},
};
/* can't use #7 and #8 for line active and pixel active counters */
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 7bdda1b5b221..09fa75a2b289 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -899,6 +899,7 @@ config HID_SONY
depends on NEW_LEDS
depends on LEDS_CLASS
select POWER_SUPPLY
+ select CRC32
help
Support for
diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_client.c b/drivers/hid/amd-sfh-hid/amd_sfh_client.c
index 3d1ccac5d99a..2ab38b715347 100644
--- a/drivers/hid/amd-sfh-hid/amd_sfh_client.c
+++ b/drivers/hid/amd-sfh-hid/amd_sfh_client.c
@@ -154,7 +154,7 @@ int amd_sfh_hid_client_init(struct amd_mp2_dev *privdata)
for (i = 0; i < cl_data->num_hid_devices; i++) {
cl_data->sensor_virt_addr[i] = dma_alloc_coherent(dev, sizeof(int) * 8,
- &cl_data->sensor_phys_addr[i],
+ &cl_data->sensor_dma_addr[i],
GFP_KERNEL);
cl_data->sensor_sts[i] = 0;
cl_data->sensor_requested_cnt[i] = 0;
@@ -187,7 +187,7 @@ int amd_sfh_hid_client_init(struct amd_mp2_dev *privdata)
}
info.period = msecs_to_jiffies(AMD_SFH_IDLE_LOOP);
info.sensor_idx = cl_idx;
- info.phys_address = cl_data->sensor_phys_addr[i];
+ info.dma_address = cl_data->sensor_dma_addr[i];
cl_data->report_descr[i] = kzalloc(cl_data->report_descr_sz[i], GFP_KERNEL);
if (!cl_data->report_descr[i]) {
@@ -212,7 +212,7 @@ cleanup:
if (cl_data->sensor_virt_addr[i]) {
dma_free_coherent(&privdata->pdev->dev, 8 * sizeof(int),
cl_data->sensor_virt_addr[i],
- cl_data->sensor_phys_addr[i]);
+ cl_data->sensor_dma_addr[i]);
}
kfree(cl_data->feature_report[i]);
kfree(cl_data->input_report[i]);
@@ -238,7 +238,7 @@ int amd_sfh_hid_client_deinit(struct amd_mp2_dev *privdata)
if (cl_data->sensor_virt_addr[i]) {
dma_free_coherent(&privdata->pdev->dev, 8 * sizeof(int),
cl_data->sensor_virt_addr[i],
- cl_data->sensor_phys_addr[i]);
+ cl_data->sensor_dma_addr[i]);
}
}
kfree(cl_data);
diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_hid.h b/drivers/hid/amd-sfh-hid/amd_sfh_hid.h
index 6be0783d885c..d7eac1728e31 100644
--- a/drivers/hid/amd-sfh-hid/amd_sfh_hid.h
+++ b/drivers/hid/amd-sfh-hid/amd_sfh_hid.h
@@ -27,7 +27,7 @@ struct amdtp_cl_data {
int hid_descr_size[MAX_HID_DEVICES];
phys_addr_t phys_addr_base;
u32 *sensor_virt_addr[MAX_HID_DEVICES];
- phys_addr_t sensor_phys_addr[MAX_HID_DEVICES];
+ dma_addr_t sensor_dma_addr[MAX_HID_DEVICES];
u32 sensor_sts[MAX_HID_DEVICES];
u32 sensor_requested_cnt[MAX_HID_DEVICES];
u8 report_type[MAX_HID_DEVICES];
diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
index a51c7b76283b..dbac16641662 100644
--- a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
+++ b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
@@ -41,7 +41,7 @@ void amd_start_sensor(struct amd_mp2_dev *privdata, struct amd_mp2_sensor_info i
cmd_param.s.buf_layout = 1;
cmd_param.s.buf_length = 16;
- writeq(info.phys_address, privdata->mmio + AMD_C2P_MSG2);
+ writeq(info.dma_address, privdata->mmio + AMD_C2P_MSG2);
writel(cmd_param.ul, privdata->mmio + AMD_C2P_MSG1);
writel(cmd_base.ul, privdata->mmio + AMD_C2P_MSG0);
}
diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.h b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.h
index e8be94f935b7..8f8d19b2cfe5 100644
--- a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.h
+++ b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.h
@@ -67,7 +67,7 @@ struct amd_mp2_dev {
struct amd_mp2_sensor_info {
u8 sensor_idx;
u32 period;
- phys_addr_t phys_address;
+ dma_addr_t dma_address;
};
void amd_start_sensor(struct amd_mp2_dev *privdata, struct amd_mp2_sensor_info info);
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 4c5f23640f9c..5ba0aa1d2335 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -389,6 +389,7 @@
#define USB_DEVICE_ID_TOSHIBA_CLICK_L9W 0x0401
#define USB_DEVICE_ID_HP_X2 0x074d
#define USB_DEVICE_ID_HP_X2_10_COVER 0x0755
+#define USB_DEVICE_ID_ASUS_UX550_TOUCHSCREEN 0x2706
#define USB_VENDOR_ID_ELECOM 0x056e
#define USB_DEVICE_ID_ELECOM_BM084 0x0061
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index dc7f6b4a775c..f23027d2795b 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -322,6 +322,8 @@ static const struct hid_device_id hid_battery_quirks[] = {
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH,
USB_DEVICE_ID_LOGITECH_DINOVO_EDGE_KBD),
HID_BATTERY_QUIRK_IGNORE },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ASUS_UX550_TOUCHSCREEN),
+ HID_BATTERY_QUIRK_IGNORE },
{}
};
diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c
index 1ffcfc9a1e03..45e7e0bdd382 100644
--- a/drivers/hid/hid-logitech-dj.c
+++ b/drivers/hid/hid-logitech-dj.c
@@ -1869,6 +1869,10 @@ static const struct hid_device_id logi_dj_receivers[] = {
HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
0xc531),
.driver_data = recvr_type_gaming_hidpp},
+ { /* Logitech G602 receiver (0xc537) */
+ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
+ 0xc537),
+ .driver_data = recvr_type_gaming_hidpp},
{ /* Logitech lightspeed receiver (0xc539) */
HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_LIGHTSPEED_1),
diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
index f85781464807..7eb9a6ddb46a 100644
--- a/drivers/hid/hid-logitech-hidpp.c
+++ b/drivers/hid/hid-logitech-hidpp.c
@@ -4053,6 +4053,8 @@ static const struct hid_device_id hidpp_devices[] = {
{ /* MX Master mouse over Bluetooth */
HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb012),
.driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
+ { /* MX Ergo trackball over Bluetooth */
+ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb01d) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb01e),
.driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
{ /* MX Master 3 mouse over Bluetooth */
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index d670bcd57bde..0743ef51d3b2 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -2054,6 +2054,10 @@ static const struct hid_device_id mt_devices[] = {
HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8,
USB_VENDOR_ID_SYNAPTICS, 0xce08) },
+ { .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT,
+ HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8,
+ USB_VENDOR_ID_SYNAPTICS, 0xce09) },
+
/* TopSeed panels */
{ .driver_data = MT_CLS_TOPSEED,
MT_USB_DEVICE(USB_VENDOR_ID_TOPSEED2,
diff --git a/drivers/hid/hid-uclogic-params.c b/drivers/hid/hid-uclogic-params.c
index d26d8cd98efc..56406cee401f 100644
--- a/drivers/hid/hid-uclogic-params.c
+++ b/drivers/hid/hid-uclogic-params.c
@@ -90,7 +90,7 @@ static int uclogic_params_get_str_desc(__u8 **pbuf, struct hid_device *hdev,
goto cleanup;
} else if (rc < 0) {
hid_err(hdev,
- "failed retrieving string descriptor #%hhu: %d\n",
+ "failed retrieving string descriptor #%u: %d\n",
idx, rc);
goto cleanup;
}
diff --git a/drivers/hid/hid-wiimote-core.c b/drivers/hid/hid-wiimote-core.c
index 41012681cafd..4399d6c6afef 100644
--- a/drivers/hid/hid-wiimote-core.c
+++ b/drivers/hid/hid-wiimote-core.c
@@ -1482,7 +1482,7 @@ static void handler_return(struct wiimote_data *wdata, const __u8 *payload)
wdata->state.cmd_err = err;
wiimote_cmd_complete(wdata);
} else if (err) {
- hid_warn(wdata->hdev, "Remote error %hhu on req %hhu\n", err,
+ hid_warn(wdata->hdev, "Remote error %u on req %u\n", err,
cmd);
}
}
diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
index 045c464228d9..e8acd235db2a 100644
--- a/drivers/hid/wacom_sys.c
+++ b/drivers/hid/wacom_sys.c
@@ -1270,6 +1270,37 @@ static int wacom_devm_sysfs_create_group(struct wacom *wacom,
group);
}
+static void wacom_devm_kfifo_release(struct device *dev, void *res)
+{
+ struct kfifo_rec_ptr_2 *devres = res;
+
+ kfifo_free(devres);
+}
+
+static int wacom_devm_kfifo_alloc(struct wacom *wacom)
+{
+ struct wacom_wac *wacom_wac = &wacom->wacom_wac;
+ struct kfifo_rec_ptr_2 *pen_fifo = &wacom_wac->pen_fifo;
+ int error;
+
+ pen_fifo = devres_alloc(wacom_devm_kfifo_release,
+ sizeof(struct kfifo_rec_ptr_2),
+ GFP_KERNEL);
+
+ if (!pen_fifo)
+ return -ENOMEM;
+
+ error = kfifo_alloc(pen_fifo, WACOM_PKGLEN_MAX, GFP_KERNEL);
+ if (error) {
+ devres_free(pen_fifo);
+ return error;
+ }
+
+ devres_add(&wacom->hdev->dev, pen_fifo);
+
+ return 0;
+}
+
enum led_brightness wacom_leds_brightness_get(struct wacom_led *led)
{
struct wacom *wacom = led->wacom;
@@ -2724,7 +2755,7 @@ static int wacom_probe(struct hid_device *hdev,
if (features->check_for_hid_type && features->hid_type != hdev->type)
return -ENODEV;
- error = kfifo_alloc(&wacom_wac->pen_fifo, WACOM_PKGLEN_MAX, GFP_KERNEL);
+ error = wacom_devm_kfifo_alloc(wacom);
if (error)
return error;
@@ -2786,8 +2817,6 @@ static void wacom_remove(struct hid_device *hdev)
if (wacom->wacom_wac.features.type != REMOTE)
wacom_release_resources(wacom);
-
- kfifo_free(&wacom_wac->pen_fifo);
}
#ifdef CONFIG_PM
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index 502f8cd95f6d..d491fdcee61f 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -2550,7 +2550,6 @@ static void hv_kexec_handler(void)
/* Make sure conn_state is set as hv_synic_cleanup checks for it */
mb();
cpuhp_remove_state(hyperv_cpuhp_online);
- hyperv_cleanup();
};
static void hv_crash_handler(struct pt_regs *regs)
@@ -2566,7 +2565,6 @@ static void hv_crash_handler(struct pt_regs *regs)
cpu = smp_processor_id();
hv_stimer_cleanup(cpu);
hv_synic_disable_regs(cpu);
- hyperv_cleanup();
};
static int hv_synic_suspend(void)
diff --git a/drivers/hwmon/amd_energy.c b/drivers/hwmon/amd_energy.c
index 9b306448b7a0..822c2e74b98d 100644
--- a/drivers/hwmon/amd_energy.c
+++ b/drivers/hwmon/amd_energy.c
@@ -222,7 +222,7 @@ static int amd_create_sensor(struct device *dev,
*/
cpus = num_present_cpus() / num_siblings;
- s_config = devm_kcalloc(dev, cpus + sockets,
+ s_config = devm_kcalloc(dev, cpus + sockets + 1,
sizeof(u32), GFP_KERNEL);
if (!s_config)
return -ENOMEM;
@@ -254,6 +254,7 @@ static int amd_create_sensor(struct device *dev,
scnprintf(label_l[i], 10, "Esocket%u", (i - cpus));
}
+ s_config[i] = 0;
return 0;
}
diff --git a/drivers/hwmon/pwm-fan.c b/drivers/hwmon/pwm-fan.c
index 777439f48c14..111a91dc6b79 100644
--- a/drivers/hwmon/pwm-fan.c
+++ b/drivers/hwmon/pwm-fan.c
@@ -334,8 +334,18 @@ static int pwm_fan_probe(struct platform_device *pdev)
ctx->pwm_value = MAX_PWM;
- /* Set duty cycle to maximum allowed and enable PWM output */
pwm_init_state(ctx->pwm, &state);
+ /*
+ * __set_pwm assumes that MAX_PWM * (period - 1) fits into an unsigned
+ * long. Check this here to prevent the fan running at a too low
+ * frequency.
+ */
+ if (state.period > ULONG_MAX / MAX_PWM + 1) {
+ dev_err(dev, "Configured period too big\n");
+ return -EINVAL;
+ }
+
+ /* Set duty cycle to maximum allowed and enable PWM output */
state.duty_cycle = ctx->pwm->args.period - 1;
state.enabled = true;
diff --git a/drivers/hwtracing/intel_th/pci.c b/drivers/hwtracing/intel_th/pci.c
index 52acd77438ed..251e75c9ba9d 100644
--- a/drivers/hwtracing/intel_th/pci.c
+++ b/drivers/hwtracing/intel_th/pci.c
@@ -269,6 +269,11 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
.driver_data = (kernel_ulong_t)&intel_th_2x,
},
{
+ /* Alder Lake-P */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x51a6),
+ .driver_data = (kernel_ulong_t)&intel_th_2x,
+ },
+ {
/* Alder Lake CPU */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x466f),
.driver_data = (kernel_ulong_t)&intel_th_2x,
diff --git a/drivers/hwtracing/stm/heartbeat.c b/drivers/hwtracing/stm/heartbeat.c
index 3e7df1c0477f..81d7b21d31ec 100644
--- a/drivers/hwtracing/stm/heartbeat.c
+++ b/drivers/hwtracing/stm/heartbeat.c
@@ -64,7 +64,7 @@ static void stm_heartbeat_unlink(struct stm_source_data *data)
static int stm_heartbeat_init(void)
{
- int i, ret = -ENOMEM;
+ int i, ret;
if (nr_devs < 0 || nr_devs > STM_HEARTBEAT_MAX)
return -EINVAL;
@@ -72,8 +72,10 @@ static int stm_heartbeat_init(void)
for (i = 0; i < nr_devs; i++) {
stm_heartbeat[i].data.name =
kasprintf(GFP_KERNEL, "heartbeat.%d", i);
- if (!stm_heartbeat[i].data.name)
+ if (!stm_heartbeat[i].data.name) {
+ ret = -ENOMEM;
goto fail_unregister;
+ }
stm_heartbeat[i].data.nr_chans = 1;
stm_heartbeat[i].data.link = stm_heartbeat_link;
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index d4d60ad0eda0..ab1f39ac39f4 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -1013,6 +1013,7 @@ config I2C_SIRF
config I2C_SPRD
tristate "Spreadtrum I2C interface"
depends on I2C=y && (ARCH_SPRD || COMPILE_TEST)
+ depends on COMMON_CLK
help
If you say yes to this option, support will be included for the
Spreadtrum I2C interface.
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index ae90713443fa..877fe3733a42 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -1449,7 +1449,7 @@ static int i801_add_mux(struct i801_priv *priv)
/* Register GPIO descriptor lookup table */
lookup = devm_kzalloc(dev,
- struct_size(lookup, table, mux_config->n_gpios),
+ struct_size(lookup, table, mux_config->n_gpios + 1),
GFP_KERNEL);
if (!lookup)
return -ENOMEM;
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index b444fbf1a262..a8e8af57e33f 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -241,6 +241,19 @@ static struct imx_i2c_hwdata vf610_i2c_hwdata = {
};
+static const struct platform_device_id imx_i2c_devtype[] = {
+ {
+ .name = "imx1-i2c",
+ .driver_data = (kernel_ulong_t)&imx1_i2c_hwdata,
+ }, {
+ .name = "imx21-i2c",
+ .driver_data = (kernel_ulong_t)&imx21_i2c_hwdata,
+ }, {
+ /* sentinel */
+ }
+};
+MODULE_DEVICE_TABLE(platform, imx_i2c_devtype);
+
static const struct of_device_id i2c_imx_dt_ids[] = {
{ .compatible = "fsl,imx1-i2c", .data = &imx1_i2c_hwdata, },
{ .compatible = "fsl,imx21-i2c", .data = &imx21_i2c_hwdata, },
@@ -1330,7 +1343,11 @@ static int i2c_imx_probe(struct platform_device *pdev)
return -ENOMEM;
match = device_get_match_data(&pdev->dev);
- i2c_imx->hwdata = match;
+ if (match)
+ i2c_imx->hwdata = match;
+ else
+ i2c_imx->hwdata = (struct imx_i2c_hwdata *)
+ platform_get_device_id(pdev)->driver_data;
/* Setup i2c_imx driver structure */
strlcpy(i2c_imx->adapter.name, pdev->name, sizeof(i2c_imx->adapter.name));
@@ -1498,6 +1515,7 @@ static struct platform_driver i2c_imx_driver = {
.of_match_table = i2c_imx_dt_ids,
.acpi_match_table = i2c_imx_acpi_ids,
},
+ .id_table = imx_i2c_devtype,
};
static int __init i2c_adap_imx_init(void)
diff --git a/drivers/i2c/busses/i2c-mt65xx.c b/drivers/i2c/busses/i2c-mt65xx.c
index 33de99b7bc20..0818d3e50734 100644
--- a/drivers/i2c/busses/i2c-mt65xx.c
+++ b/drivers/i2c/busses/i2c-mt65xx.c
@@ -38,6 +38,7 @@
#define I2C_IO_CONFIG_OPEN_DRAIN 0x0003
#define I2C_IO_CONFIG_PUSH_PULL 0x0000
#define I2C_SOFT_RST 0x0001
+#define I2C_HANDSHAKE_RST 0x0020
#define I2C_FIFO_ADDR_CLR 0x0001
#define I2C_DELAY_LEN 0x0002
#define I2C_TIME_CLR_VALUE 0x0000
@@ -45,6 +46,7 @@
#define I2C_WRRD_TRANAC_VALUE 0x0002
#define I2C_RD_TRANAC_VALUE 0x0001
#define I2C_SCL_MIS_COMP_VALUE 0x0000
+#define I2C_CHN_CLR_FLAG 0x0000
#define I2C_DMA_CON_TX 0x0000
#define I2C_DMA_CON_RX 0x0001
@@ -54,7 +56,9 @@
#define I2C_DMA_START_EN 0x0001
#define I2C_DMA_INT_FLAG_NONE 0x0000
#define I2C_DMA_CLR_FLAG 0x0000
+#define I2C_DMA_WARM_RST 0x0001
#define I2C_DMA_HARD_RST 0x0002
+#define I2C_DMA_HANDSHAKE_RST 0x0004
#define MAX_SAMPLE_CNT_DIV 8
#define MAX_STEP_CNT_DIV 64
@@ -475,11 +479,24 @@ static void mtk_i2c_init_hw(struct mtk_i2c *i2c)
{
u16 control_reg;
- writel(I2C_DMA_HARD_RST, i2c->pdmabase + OFFSET_RST);
- udelay(50);
- writel(I2C_DMA_CLR_FLAG, i2c->pdmabase + OFFSET_RST);
-
- mtk_i2c_writew(i2c, I2C_SOFT_RST, OFFSET_SOFTRESET);
+ if (i2c->dev_comp->dma_sync) {
+ writel(I2C_DMA_WARM_RST, i2c->pdmabase + OFFSET_RST);
+ udelay(10);
+ writel(I2C_DMA_CLR_FLAG, i2c->pdmabase + OFFSET_RST);
+ udelay(10);
+ writel(I2C_DMA_HANDSHAKE_RST | I2C_DMA_HARD_RST,
+ i2c->pdmabase + OFFSET_RST);
+ mtk_i2c_writew(i2c, I2C_HANDSHAKE_RST | I2C_SOFT_RST,
+ OFFSET_SOFTRESET);
+ udelay(10);
+ writel(I2C_DMA_CLR_FLAG, i2c->pdmabase + OFFSET_RST);
+ mtk_i2c_writew(i2c, I2C_CHN_CLR_FLAG, OFFSET_SOFTRESET);
+ } else {
+ writel(I2C_DMA_HARD_RST, i2c->pdmabase + OFFSET_RST);
+ udelay(50);
+ writel(I2C_DMA_CLR_FLAG, i2c->pdmabase + OFFSET_RST);
+ mtk_i2c_writew(i2c, I2C_SOFT_RST, OFFSET_SOFTRESET);
+ }
/* Set ioconfig */
if (i2c->use_push_pull)
diff --git a/drivers/i2c/busses/i2c-octeon-core.c b/drivers/i2c/busses/i2c-octeon-core.c
index d9607905dc2f..845eda70b8ca 100644
--- a/drivers/i2c/busses/i2c-octeon-core.c
+++ b/drivers/i2c/busses/i2c-octeon-core.c
@@ -347,7 +347,7 @@ static int octeon_i2c_read(struct octeon_i2c *i2c, int target,
if (result)
return result;
if (recv_len && i == 0) {
- if (data[i] > I2C_SMBUS_BLOCK_MAX + 1)
+ if (data[i] > I2C_SMBUS_BLOCK_MAX)
return -EPROTO;
length += data[i];
}
diff --git a/drivers/i2c/busses/i2c-sprd.c b/drivers/i2c/busses/i2c-sprd.c
index 19cda6742423..2917fecf6c80 100644
--- a/drivers/i2c/busses/i2c-sprd.c
+++ b/drivers/i2c/busses/i2c-sprd.c
@@ -72,6 +72,8 @@
/* timeout (ms) for pm runtime autosuspend */
#define SPRD_I2C_PM_TIMEOUT 1000
+/* timeout (ms) for transfer message */
+#define I2C_XFER_TIMEOUT 1000
/* SPRD i2c data structure */
struct sprd_i2c {
@@ -244,6 +246,7 @@ static int sprd_i2c_handle_msg(struct i2c_adapter *i2c_adap,
struct i2c_msg *msg, bool is_last_msg)
{
struct sprd_i2c *i2c_dev = i2c_adap->algo_data;
+ unsigned long time_left;
i2c_dev->msg = msg;
i2c_dev->buf = msg->buf;
@@ -273,7 +276,10 @@ static int sprd_i2c_handle_msg(struct i2c_adapter *i2c_adap,
sprd_i2c_opt_start(i2c_dev);
- wait_for_completion(&i2c_dev->complete);
+ time_left = wait_for_completion_timeout(&i2c_dev->complete,
+ msecs_to_jiffies(I2C_XFER_TIMEOUT));
+ if (!time_left)
+ return -ETIMEDOUT;
return i2c_dev->err;
}
diff --git a/drivers/i2c/busses/i2c-tegra-bpmp.c b/drivers/i2c/busses/i2c-tegra-bpmp.c
index ec7a7e917edd..c0c7d01473f2 100644
--- a/drivers/i2c/busses/i2c-tegra-bpmp.c
+++ b/drivers/i2c/busses/i2c-tegra-bpmp.c
@@ -80,7 +80,7 @@ static int tegra_bpmp_xlate_flags(u16 flags, u16 *out)
flags &= ~I2C_M_RECV_LEN;
}
- return (flags != 0) ? -EINVAL : 0;
+ return 0;
}
/**
diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
index 6f08c0c3238d..8b113ae32dc7 100644
--- a/drivers/i2c/busses/i2c-tegra.c
+++ b/drivers/i2c/busses/i2c-tegra.c
@@ -326,6 +326,8 @@ static void i2c_writel(struct tegra_i2c_dev *i2c_dev, u32 val, unsigned int reg)
/* read back register to make sure that register writes completed */
if (reg != I2C_TX_FIFO)
readl_relaxed(i2c_dev->base + tegra_i2c_reg_addr(i2c_dev, reg));
+ else if (i2c_dev->is_vi)
+ readl_relaxed(i2c_dev->base + tegra_i2c_reg_addr(i2c_dev, I2C_INT_STATUS));
}
static u32 i2c_readl(struct tegra_i2c_dev *i2c_dev, unsigned int reg)
@@ -339,6 +341,21 @@ static void i2c_writesl(struct tegra_i2c_dev *i2c_dev, void *data,
writesl(i2c_dev->base + tegra_i2c_reg_addr(i2c_dev, reg), data, len);
}
+static void i2c_writesl_vi(struct tegra_i2c_dev *i2c_dev, void *data,
+ unsigned int reg, unsigned int len)
+{
+ u32 *data32 = data;
+
+ /*
+ * VI I2C controller has known hardware bug where writes get stuck
+ * when immediate multiple writes happen to TX_FIFO register.
+ * Recommended software work around is to read I2C register after
+ * each write to TX_FIFO register to flush out the data.
+ */
+ while (len--)
+ i2c_writel(i2c_dev, *data32++, reg);
+}
+
static void i2c_readsl(struct tegra_i2c_dev *i2c_dev, void *data,
unsigned int reg, unsigned int len)
{
@@ -533,7 +550,7 @@ static int tegra_i2c_poll_register(struct tegra_i2c_dev *i2c_dev,
void __iomem *addr = i2c_dev->base + tegra_i2c_reg_addr(i2c_dev, reg);
u32 val;
- if (!i2c_dev->atomic_mode)
+ if (!i2c_dev->atomic_mode && !in_irq())
return readl_relaxed_poll_timeout(addr, val, !(val & mask),
delay_us, timeout_us);
@@ -811,7 +828,10 @@ static int tegra_i2c_fill_tx_fifo(struct tegra_i2c_dev *i2c_dev)
i2c_dev->msg_buf_remaining = buf_remaining;
i2c_dev->msg_buf = buf + words_to_transfer * BYTES_PER_FIFO_WORD;
- i2c_writesl(i2c_dev, buf, I2C_TX_FIFO, words_to_transfer);
+ if (i2c_dev->is_vi)
+ i2c_writesl_vi(i2c_dev, buf, I2C_TX_FIFO, words_to_transfer);
+ else
+ i2c_writesl(i2c_dev, buf, I2C_TX_FIFO, words_to_transfer);
buf += words_to_transfer * BYTES_PER_FIFO_WORD;
}
diff --git a/drivers/ide/ide-atapi.c b/drivers/ide/ide-atapi.c
index 2162bc80f09e..013ad33fbbc8 100644
--- a/drivers/ide/ide-atapi.c
+++ b/drivers/ide/ide-atapi.c
@@ -223,7 +223,6 @@ void ide_prep_sense(ide_drive_t *drive, struct request *rq)
sense_rq->rq_disk = rq->rq_disk;
sense_rq->cmd_flags = REQ_OP_DRV_IN;
ide_req(sense_rq)->type = ATA_PRIV_SENSE;
- sense_rq->rq_flags |= RQF_PREEMPT;
req->cmd[0] = GPCMD_REQUEST_SENSE;
req->cmd[4] = cmd_len;
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c
index 1a53c7a75224..4867b67b60d6 100644
--- a/drivers/ide/ide-io.c
+++ b/drivers/ide/ide-io.c
@@ -515,15 +515,10 @@ repeat:
* above to return us whatever is in the queue. Since we call
* ide_do_request() ourselves, we end up taking requests while
* the queue is blocked...
- *
- * We let requests forced at head of queue with ide-preempt
- * though. I hope that doesn't happen too much, hopefully not
- * unless the subdriver triggers such a thing in its own PM
- * state machine.
*/
if ((drive->dev_flags & IDE_DFLAG_BLOCKED) &&
ata_pm_request(rq) == 0 &&
- (rq->rq_flags & RQF_PREEMPT) == 0) {
+ (rq->rq_flags & RQF_PM) == 0) {
/* there should be no pending command at this point */
ide_unlock_port(hwif);
goto plug_device;
diff --git a/drivers/ide/ide-pm.c b/drivers/ide/ide-pm.c
index 192e6c65d34e..82ab308f1aaf 100644
--- a/drivers/ide/ide-pm.c
+++ b/drivers/ide/ide-pm.c
@@ -77,7 +77,7 @@ int generic_ide_resume(struct device *dev)
}
memset(&rqpm, 0, sizeof(rqpm));
- rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, BLK_MQ_REQ_PREEMPT);
+ rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, BLK_MQ_REQ_PM);
ide_req(rq)->type = ATA_PRIV_PM_RESUME;
ide_req(rq)->special = &rqpm;
rqpm.pm_step = IDE_PM_START_RESUME;
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index d79335506ecd..28f93b9aa51b 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -963,6 +963,39 @@ static struct cpuidle_state dnv_cstates[] __initdata = {
.enter = NULL }
};
+/*
+ * Note, depending on HW and FW revision, SnowRidge SoC may or may not support
+ * C6, and this is indicated in the CPUID mwait leaf.
+ */
+static struct cpuidle_state snr_cstates[] __initdata = {
+ {
+ .name = "C1",
+ .desc = "MWAIT 0x00",
+ .flags = MWAIT2flg(0x00),
+ .exit_latency = 2,
+ .target_residency = 2,
+ .enter = &intel_idle,
+ .enter_s2idle = intel_idle_s2idle, },
+ {
+ .name = "C1E",
+ .desc = "MWAIT 0x01",
+ .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
+ .exit_latency = 15,
+ .target_residency = 25,
+ .enter = &intel_idle,
+ .enter_s2idle = intel_idle_s2idle, },
+ {
+ .name = "C6",
+ .desc = "MWAIT 0x20",
+ .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 130,
+ .target_residency = 500,
+ .enter = &intel_idle,
+ .enter_s2idle = intel_idle_s2idle, },
+ {
+ .enter = NULL }
+};
+
static const struct idle_cpu idle_cpu_nehalem __initconst = {
.state_table = nehalem_cstates,
.auto_demotion_disable_flags = NHM_C1_AUTO_DEMOTE | NHM_C3_AUTO_DEMOTE,
@@ -1084,6 +1117,12 @@ static const struct idle_cpu idle_cpu_dnv __initconst = {
.use_acpi = true,
};
+static const struct idle_cpu idle_cpu_snr __initconst = {
+ .state_table = snr_cstates,
+ .disable_promotion_to_c1e = true,
+ .use_acpi = true,
+};
+
static const struct x86_cpu_id intel_idle_ids[] __initconst = {
X86_MATCH_INTEL_FAM6_MODEL(NEHALEM_EP, &idle_cpu_nhx),
X86_MATCH_INTEL_FAM6_MODEL(NEHALEM, &idle_cpu_nehalem),
@@ -1122,7 +1161,7 @@ static const struct x86_cpu_id intel_idle_ids[] __initconst = {
X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT, &idle_cpu_bxt),
X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT_PLUS, &idle_cpu_bxt),
X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT_D, &idle_cpu_dnv),
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_D, &idle_cpu_dnv),
+ X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_D, &idle_cpu_snr),
{}
};
diff --git a/drivers/iio/adc/ti_am335x_adc.c b/drivers/iio/adc/ti_am335x_adc.c
index b11c8c47ba2a..e946903b0993 100644
--- a/drivers/iio/adc/ti_am335x_adc.c
+++ b/drivers/iio/adc/ti_am335x_adc.c
@@ -397,16 +397,12 @@ static int tiadc_iio_buffered_hardware_setup(struct device *dev,
ret = devm_request_threaded_irq(dev, irq, pollfunc_th, pollfunc_bh,
flags, indio_dev->name, indio_dev);
if (ret)
- goto error_kfifo_free;
+ return ret;
indio_dev->setup_ops = setup_ops;
indio_dev->modes |= INDIO_BUFFER_SOFTWARE;
return 0;
-
-error_kfifo_free:
- iio_kfifo_free(indio_dev->buffer);
- return ret;
}
static const char * const chan_name_ain[] = {
diff --git a/drivers/iio/common/st_sensors/st_sensors_trigger.c b/drivers/iio/common/st_sensors/st_sensors_trigger.c
index 0507283bd4c1..2dbd2646e44e 100644
--- a/drivers/iio/common/st_sensors/st_sensors_trigger.c
+++ b/drivers/iio/common/st_sensors/st_sensors_trigger.c
@@ -23,35 +23,31 @@
* @sdata: Sensor data.
*
* returns:
- * 0 - no new samples available
- * 1 - new samples available
- * negative - error or unknown
+ * false - no new samples available or read error
+ * true - new samples available
*/
-static int st_sensors_new_samples_available(struct iio_dev *indio_dev,
- struct st_sensor_data *sdata)
+static bool st_sensors_new_samples_available(struct iio_dev *indio_dev,
+ struct st_sensor_data *sdata)
{
int ret, status;
/* How would I know if I can't check it? */
if (!sdata->sensor_settings->drdy_irq.stat_drdy.addr)
- return -EINVAL;
+ return true;
/* No scan mask, no interrupt */
if (!indio_dev->active_scan_mask)
- return 0;
+ return false;
ret = regmap_read(sdata->regmap,
sdata->sensor_settings->drdy_irq.stat_drdy.addr,
&status);
if (ret < 0) {
dev_err(sdata->dev, "error checking samples available\n");
- return ret;
+ return false;
}
- if (status & sdata->sensor_settings->drdy_irq.stat_drdy.mask)
- return 1;
-
- return 0;
+ return !!(status & sdata->sensor_settings->drdy_irq.stat_drdy.mask);
}
/**
@@ -180,9 +176,15 @@ int st_sensors_allocate_trigger(struct iio_dev *indio_dev,
/* Tell the interrupt handler that we're dealing with edges */
if (irq_trig == IRQF_TRIGGER_FALLING ||
- irq_trig == IRQF_TRIGGER_RISING)
+ irq_trig == IRQF_TRIGGER_RISING) {
+ if (!sdata->sensor_settings->drdy_irq.stat_drdy.addr) {
+ dev_err(&indio_dev->dev,
+ "edge IRQ not supported w/o stat register.\n");
+ err = -EOPNOTSUPP;
+ goto iio_trigger_free;
+ }
sdata->edge_irq = true;
- else
+ } else {
/*
* If we're not using edges (i.e. level interrupts) we
* just mask off the IRQ, handle one interrupt, then
@@ -190,6 +192,7 @@ int st_sensors_allocate_trigger(struct iio_dev *indio_dev,
* interrupt handler top half again and start over.
*/
irq_trig |= IRQF_ONESHOT;
+ }
/*
* If the interrupt pin is Open Drain, by definition this
diff --git a/drivers/iio/dac/ad5504.c b/drivers/iio/dac/ad5504.c
index 28921b62e642..e9297c25d4ef 100644
--- a/drivers/iio/dac/ad5504.c
+++ b/drivers/iio/dac/ad5504.c
@@ -187,9 +187,9 @@ static ssize_t ad5504_write_dac_powerdown(struct iio_dev *indio_dev,
return ret;
if (pwr_down)
- st->pwr_down_mask |= (1 << chan->channel);
- else
st->pwr_down_mask &= ~(1 << chan->channel);
+ else
+ st->pwr_down_mask |= (1 << chan->channel);
ret = ad5504_spi_write(st, AD5504_ADDR_CTRL,
AD5504_DAC_PWRDWN_MODE(st->pwr_down_mode) |
diff --git a/drivers/iio/proximity/sx9310.c b/drivers/iio/proximity/sx9310.c
index a2f820997afc..37fd0b65a014 100644
--- a/drivers/iio/proximity/sx9310.c
+++ b/drivers/iio/proximity/sx9310.c
@@ -601,7 +601,7 @@ static int sx9310_read_thresh(struct sx9310_data *data,
return ret;
regval = FIELD_GET(SX9310_REG_PROX_CTRL8_9_PTHRESH_MASK, regval);
- if (regval > ARRAY_SIZE(sx9310_pthresh_codes))
+ if (regval >= ARRAY_SIZE(sx9310_pthresh_codes))
return -EINVAL;
*val = sx9310_pthresh_codes[regval];
@@ -1305,7 +1305,8 @@ sx9310_get_default_reg(struct sx9310_data *data, int i,
if (ret)
break;
- pos = min(max(ilog2(pos), 3), 10) - 3;
+ /* Powers of 2, except for a gap between 16 and 64 */
+ pos = clamp(ilog2(pos), 3, 11) - (pos >= 32 ? 4 : 3);
reg_def->def &= ~SX9310_REG_PROX_CTRL7_AVGPOSFILT_MASK;
reg_def->def |= FIELD_PREP(SX9310_REG_PROX_CTRL7_AVGPOSFILT_MASK,
pos);
diff --git a/drivers/iio/temperature/mlx90632.c b/drivers/iio/temperature/mlx90632.c
index 503fe54a0bb9..608ccb1d8bc8 100644
--- a/drivers/iio/temperature/mlx90632.c
+++ b/drivers/iio/temperature/mlx90632.c
@@ -248,6 +248,12 @@ static int mlx90632_set_meas_type(struct regmap *regmap, u8 type)
if (ret < 0)
return ret;
+ /*
+ * Give the mlx90632 some time to reset properly before sending a new I2C command
+ * if this is not done, the following I2C command(s) will not be accepted.
+ */
+ usleep_range(150, 200);
+
ret = regmap_write_bits(regmap, MLX90632_REG_CONTROL,
(MLX90632_CFG_MTYP_MASK | MLX90632_CFG_PWR_MASK),
(MLX90632_MTYP_STATUS(type) | MLX90632_PWR_STATUS_HALT));
diff --git a/drivers/infiniband/core/cma_configfs.c b/drivers/infiniband/core/cma_configfs.c
index 7f70e5a7de10..97a77ea8d3c9 100644
--- a/drivers/infiniband/core/cma_configfs.c
+++ b/drivers/infiniband/core/cma_configfs.c
@@ -131,8 +131,10 @@ static ssize_t default_roce_mode_store(struct config_item *item,
return ret;
gid_type = ib_cache_gid_parse_type_str(buf);
- if (gid_type < 0)
+ if (gid_type < 0) {
+ cma_configfs_params_put(cma_dev);
return -EINVAL;
+ }
ret = cma_set_default_gid_type(cma_dev, group->port_num, gid_type);
diff --git a/drivers/infiniband/core/restrack.c b/drivers/infiniband/core/restrack.c
index e0a41c867002..ff1551b3cf61 100644
--- a/drivers/infiniband/core/restrack.c
+++ b/drivers/infiniband/core/restrack.c
@@ -254,6 +254,7 @@ void rdma_restrack_add(struct rdma_restrack_entry *res)
} else {
ret = xa_alloc_cyclic(&rt->xa, &res->id, res, xa_limit_32b,
&rt->next_id, GFP_KERNEL);
+ ret = (ret < 0) ? ret : 0;
}
out:
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index 7dab9a27a145..da2512c30ffd 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -95,8 +95,6 @@ struct ucma_context {
u64 uid;
struct list_head list;
- /* sync between removal event and id destroy, protected by file mut */
- int destroying;
struct work_struct close_work;
};
@@ -122,7 +120,7 @@ static DEFINE_XARRAY_ALLOC(ctx_table);
static DEFINE_XARRAY_ALLOC(multicast_table);
static const struct file_operations ucma_fops;
-static int __destroy_id(struct ucma_context *ctx);
+static int ucma_destroy_private_ctx(struct ucma_context *ctx);
static inline struct ucma_context *_ucma_find_context(int id,
struct ucma_file *file)
@@ -179,19 +177,14 @@ static void ucma_close_id(struct work_struct *work)
/* once all inflight tasks are finished, we close all underlying
* resources. The context is still alive till its explicit destryoing
- * by its creator.
+ * by its creator. This puts back the xarray's reference.
*/
ucma_put_ctx(ctx);
wait_for_completion(&ctx->comp);
/* No new events will be generated after destroying the id. */
rdma_destroy_id(ctx->cm_id);
- /*
- * At this point ctx->ref is zero so the only place the ctx can be is in
- * a uevent or in __destroy_id(). Since the former doesn't touch
- * ctx->cm_id and the latter sync cancels this, there is no races with
- * this store.
- */
+ /* Reading the cm_id without holding a positive ref is not allowed */
ctx->cm_id = NULL;
}
@@ -204,7 +197,6 @@ static struct ucma_context *ucma_alloc_ctx(struct ucma_file *file)
return NULL;
INIT_WORK(&ctx->close_work, ucma_close_id);
- refcount_set(&ctx->ref, 1);
init_completion(&ctx->comp);
/* So list_del() will work if we don't do ucma_finish_ctx() */
INIT_LIST_HEAD(&ctx->list);
@@ -218,6 +210,13 @@ static struct ucma_context *ucma_alloc_ctx(struct ucma_file *file)
return ctx;
}
+static void ucma_set_ctx_cm_id(struct ucma_context *ctx,
+ struct rdma_cm_id *cm_id)
+{
+ refcount_set(&ctx->ref, 1);
+ ctx->cm_id = cm_id;
+}
+
static void ucma_finish_ctx(struct ucma_context *ctx)
{
lockdep_assert_held(&ctx->file->mut);
@@ -303,7 +302,7 @@ static int ucma_connect_event_handler(struct rdma_cm_id *cm_id,
ctx = ucma_alloc_ctx(listen_ctx->file);
if (!ctx)
goto err_backlog;
- ctx->cm_id = cm_id;
+ ucma_set_ctx_cm_id(ctx, cm_id);
uevent = ucma_create_uevent(listen_ctx, event);
if (!uevent)
@@ -321,8 +320,7 @@ static int ucma_connect_event_handler(struct rdma_cm_id *cm_id,
return 0;
err_alloc:
- xa_erase(&ctx_table, ctx->id);
- kfree(ctx);
+ ucma_destroy_private_ctx(ctx);
err_backlog:
atomic_inc(&listen_ctx->backlog);
/* Returning error causes the new ID to be destroyed */
@@ -356,8 +354,12 @@ static int ucma_event_handler(struct rdma_cm_id *cm_id,
wake_up_interruptible(&ctx->file->poll_wait);
}
- if (event->event == RDMA_CM_EVENT_DEVICE_REMOVAL && !ctx->destroying)
- queue_work(system_unbound_wq, &ctx->close_work);
+ if (event->event == RDMA_CM_EVENT_DEVICE_REMOVAL) {
+ xa_lock(&ctx_table);
+ if (xa_load(&ctx_table, ctx->id) == ctx)
+ queue_work(system_unbound_wq, &ctx->close_work);
+ xa_unlock(&ctx_table);
+ }
return 0;
}
@@ -461,13 +463,12 @@ static ssize_t ucma_create_id(struct ucma_file *file, const char __user *inbuf,
ret = PTR_ERR(cm_id);
goto err1;
}
- ctx->cm_id = cm_id;
+ ucma_set_ctx_cm_id(ctx, cm_id);
resp.id = ctx->id;
if (copy_to_user(u64_to_user_ptr(cmd.response),
&resp, sizeof(resp))) {
- xa_erase(&ctx_table, ctx->id);
- __destroy_id(ctx);
+ ucma_destroy_private_ctx(ctx);
return -EFAULT;
}
@@ -477,8 +478,7 @@ static ssize_t ucma_create_id(struct ucma_file *file, const char __user *inbuf,
return 0;
err1:
- xa_erase(&ctx_table, ctx->id);
- kfree(ctx);
+ ucma_destroy_private_ctx(ctx);
return ret;
}
@@ -516,68 +516,73 @@ static void ucma_cleanup_mc_events(struct ucma_multicast *mc)
rdma_unlock_handler(mc->ctx->cm_id);
}
-/*
- * ucma_free_ctx is called after the underlying rdma CM-ID is destroyed. At
- * this point, no new events will be reported from the hardware. However, we
- * still need to cleanup the UCMA context for this ID. Specifically, there
- * might be events that have not yet been consumed by the user space software.
- * mutex. After that we release them as needed.
- */
-static int ucma_free_ctx(struct ucma_context *ctx)
+static int ucma_cleanup_ctx_events(struct ucma_context *ctx)
{
int events_reported;
struct ucma_event *uevent, *tmp;
LIST_HEAD(list);
- ucma_cleanup_multicast(ctx);
-
- /* Cleanup events not yet reported to the user. */
+ /* Cleanup events not yet reported to the user.*/
mutex_lock(&ctx->file->mut);
list_for_each_entry_safe(uevent, tmp, &ctx->file->event_list, list) {
- if (uevent->ctx == ctx || uevent->conn_req_ctx == ctx)
+ if (uevent->ctx != ctx)
+ continue;
+
+ if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST &&
+ xa_cmpxchg(&ctx_table, uevent->conn_req_ctx->id,
+ uevent->conn_req_ctx, XA_ZERO_ENTRY,
+ GFP_KERNEL) == uevent->conn_req_ctx) {
list_move_tail(&uevent->list, &list);
+ continue;
+ }
+ list_del(&uevent->list);
+ kfree(uevent);
}
list_del(&ctx->list);
events_reported = ctx->events_reported;
mutex_unlock(&ctx->file->mut);
/*
- * If this was a listening ID then any connections spawned from it
- * that have not been delivered to userspace are cleaned up too.
- * Must be done outside any locks.
+ * If this was a listening ID then any connections spawned from it that
+ * have not been delivered to userspace are cleaned up too. Must be done
+ * outside any locks.
*/
list_for_each_entry_safe(uevent, tmp, &list, list) {
- list_del(&uevent->list);
- if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST &&
- uevent->conn_req_ctx != ctx)
- __destroy_id(uevent->conn_req_ctx);
+ ucma_destroy_private_ctx(uevent->conn_req_ctx);
kfree(uevent);
}
-
- mutex_destroy(&ctx->mutex);
- kfree(ctx);
return events_reported;
}
-static int __destroy_id(struct ucma_context *ctx)
+/*
+ * When this is called the xarray must have a XA_ZERO_ENTRY in the ctx->id (ie
+ * the ctx is not public to the user). This either because:
+ * - ucma_finish_ctx() hasn't been called
+ * - xa_cmpxchg() succeed to remove the entry (only one thread can succeed)
+ */
+static int ucma_destroy_private_ctx(struct ucma_context *ctx)
{
+ int events_reported;
+
/*
- * If the refcount is already 0 then ucma_close_id() has already
- * destroyed the cm_id, otherwise holding the refcount keeps cm_id
- * valid. Prevent queue_work() from being called.
+ * Destroy the underlying cm_id. New work queuing is prevented now by
+ * the removal from the xarray. Once the work is cancled ref will either
+ * be 0 because the work ran to completion and consumed the ref from the
+ * xarray, or it will be positive because we still have the ref from the
+ * xarray. This can also be 0 in cases where cm_id was never set
*/
- if (refcount_inc_not_zero(&ctx->ref)) {
- rdma_lock_handler(ctx->cm_id);
- ctx->destroying = 1;
- rdma_unlock_handler(ctx->cm_id);
- ucma_put_ctx(ctx);
- }
-
cancel_work_sync(&ctx->close_work);
- /* At this point it's guaranteed that there is no inflight closing task */
- if (ctx->cm_id)
+ if (refcount_read(&ctx->ref))
ucma_close_id(&ctx->close_work);
- return ucma_free_ctx(ctx);
+
+ events_reported = ucma_cleanup_ctx_events(ctx);
+ ucma_cleanup_multicast(ctx);
+
+ WARN_ON(xa_cmpxchg(&ctx_table, ctx->id, XA_ZERO_ENTRY, NULL,
+ GFP_KERNEL) != NULL);
+ mutex_destroy(&ctx->mutex);
+ kfree(ctx);
+ return events_reported;
}
static ssize_t ucma_destroy_id(struct ucma_file *file, const char __user *inbuf,
@@ -596,14 +601,17 @@ static ssize_t ucma_destroy_id(struct ucma_file *file, const char __user *inbuf,
xa_lock(&ctx_table);
ctx = _ucma_find_context(cmd.id, file);
- if (!IS_ERR(ctx))
- __xa_erase(&ctx_table, ctx->id);
+ if (!IS_ERR(ctx)) {
+ if (__xa_cmpxchg(&ctx_table, ctx->id, ctx, XA_ZERO_ENTRY,
+ GFP_KERNEL) != ctx)
+ ctx = ERR_PTR(-ENOENT);
+ }
xa_unlock(&ctx_table);
if (IS_ERR(ctx))
return PTR_ERR(ctx);
- resp.events_reported = __destroy_id(ctx);
+ resp.events_reported = ucma_destroy_private_ctx(ctx);
if (copy_to_user(u64_to_user_ptr(cmd.response),
&resp, sizeof(resp)))
ret = -EFAULT;
@@ -1777,15 +1785,16 @@ static int ucma_close(struct inode *inode, struct file *filp)
* prevented by this being a FD release function. The list_add_tail() in
* ucma_connect_event_handler() can run concurrently, however it only
* adds to the list *after* a listening ID. By only reading the first of
- * the list, and relying on __destroy_id() to block
+ * the list, and relying on ucma_destroy_private_ctx() to block
* ucma_connect_event_handler(), no additional locking is needed.
*/
while (!list_empty(&file->ctx_list)) {
struct ucma_context *ctx = list_first_entry(
&file->ctx_list, struct ucma_context, list);
- xa_erase(&ctx_table, ctx->id);
- __destroy_id(ctx);
+ WARN_ON(xa_cmpxchg(&ctx_table, ctx->id, ctx, XA_ZERO_ENTRY,
+ GFP_KERNEL) != ctx);
+ ucma_destroy_private_ctx(ctx);
}
kfree(file);
return 0;
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
index 7ca4112e3e8f..917338db7ac1 100644
--- a/drivers/infiniband/core/umem.c
+++ b/drivers/infiniband/core/umem.c
@@ -135,7 +135,7 @@ unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem,
*/
if (mask)
pgsz_bitmap &= GENMASK(count_trailing_zeros(mask), 0);
- return rounddown_pow_of_two(pgsz_bitmap);
+ return pgsz_bitmap ? rounddown_pow_of_two(pgsz_bitmap) : 0;
}
EXPORT_SYMBOL(ib_umem_find_best_pgsz);
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 3bae9ba0ead8..d26f3f3e0462 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -3956,7 +3956,7 @@ static int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
err = set_has_smi_cap(dev);
if (err)
- return err;
+ goto err_mp;
if (!mlx5_core_mp_enabled(mdev)) {
for (i = 1; i <= dev->num_ports; i++) {
@@ -4319,7 +4319,7 @@ static int mlx5_ib_stage_bfrag_init(struct mlx5_ib_dev *dev)
err = mlx5_alloc_bfreg(dev->mdev, &dev->fp_bfreg, false, true);
if (err)
- mlx5_free_bfreg(dev->mdev, &dev->fp_bfreg);
+ mlx5_free_bfreg(dev->mdev, &dev->bfreg);
return err;
}
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
index bc98bd950d99..3acb5c10b155 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
@@ -434,9 +434,9 @@ static void ocrdma_dealloc_ucontext_pd(struct ocrdma_ucontext *uctx)
pr_err("%s(%d) Freeing in use pdid=0x%x.\n",
__func__, dev->id, pd->id);
}
- kfree(uctx->cntxt_pd);
uctx->cntxt_pd = NULL;
_ocrdma_dealloc_pd(dev, pd);
+ kfree(pd);
}
static struct ocrdma_pd *ocrdma_get_ucontext_pd(struct ocrdma_ucontext *uctx)
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
index 38a37770c016..3705c6b8b223 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
+++ b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
@@ -214,6 +214,7 @@ find_free_vf_and_create_qp_grp(struct usnic_ib_dev *us_ibdev,
}
usnic_uiom_free_dev_list(dev_list);
+ dev_list = NULL;
}
/* Try to find resources on an unused vf */
@@ -239,6 +240,8 @@ find_free_vf_and_create_qp_grp(struct usnic_ib_dev *us_ibdev,
qp_grp_check:
if (IS_ERR_OR_NULL(qp_grp)) {
usnic_err("Failed to allocate qp_grp\n");
+ if (usnic_ib_share_vf)
+ usnic_uiom_free_dev_list(dev_list);
return ERR_PTR(qp_grp ? PTR_ERR(qp_grp) : -ENOMEM);
}
return qp_grp;
diff --git a/drivers/interconnect/imx/imx.c b/drivers/interconnect/imx/imx.c
index 41dba7090c2a..c770951a909c 100644
--- a/drivers/interconnect/imx/imx.c
+++ b/drivers/interconnect/imx/imx.c
@@ -96,9 +96,10 @@ static int imx_icc_node_init_qos(struct icc_provider *provider,
return -ENODEV;
}
/* Allow scaling to be disabled on a per-node basis */
- if (!dn || !of_device_is_available(dn)) {
+ if (!of_device_is_available(dn)) {
dev_warn(dev, "Missing property %s, skip scaling %s\n",
adj->phandle_name, node->name);
+ of_node_put(dn);
return 0;
}
diff --git a/drivers/interconnect/imx/imx8mq.c b/drivers/interconnect/imx/imx8mq.c
index ba43a15aefec..d7768d3c6d8a 100644
--- a/drivers/interconnect/imx/imx8mq.c
+++ b/drivers/interconnect/imx/imx8mq.c
@@ -7,6 +7,7 @@
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/interconnect-provider.h>
#include <dt-bindings/interconnect/imx8mq.h>
#include "imx.h"
@@ -94,6 +95,7 @@ static struct platform_driver imx8mq_icc_driver = {
.remove = imx8mq_icc_remove,
.driver = {
.name = "imx8mq-interconnect",
+ .sync_state = icc_sync_state,
},
};
diff --git a/drivers/interconnect/qcom/Kconfig b/drivers/interconnect/qcom/Kconfig
index a8f93ba265f8..b3fb5b02bcf1 100644
--- a/drivers/interconnect/qcom/Kconfig
+++ b/drivers/interconnect/qcom/Kconfig
@@ -42,13 +42,23 @@ config INTERCONNECT_QCOM_QCS404
This is a driver for the Qualcomm Network-on-Chip on qcs404-based
platforms.
+config INTERCONNECT_QCOM_RPMH_POSSIBLE
+ tristate
+ default INTERCONNECT_QCOM
+ depends on QCOM_RPMH || (COMPILE_TEST && !QCOM_RPMH)
+ depends on QCOM_COMMAND_DB || (COMPILE_TEST && !QCOM_COMMAND_DB)
+ depends on OF || COMPILE_TEST
+ help
+ Compile-testing RPMH drivers is possible on other platforms,
+ but in order to avoid link failures, drivers must not be built-in
+ when QCOM_RPMH or QCOM_COMMAND_DB are loadable modules
+
config INTERCONNECT_QCOM_RPMH
tristate
config INTERCONNECT_QCOM_SC7180
tristate "Qualcomm SC7180 interconnect driver"
- depends on INTERCONNECT_QCOM
- depends on (QCOM_RPMH && QCOM_COMMAND_DB && OF) || COMPILE_TEST
+ depends on INTERCONNECT_QCOM_RPMH_POSSIBLE
select INTERCONNECT_QCOM_RPMH
select INTERCONNECT_QCOM_BCM_VOTER
help
@@ -57,8 +67,7 @@ config INTERCONNECT_QCOM_SC7180
config INTERCONNECT_QCOM_SDM845
tristate "Qualcomm SDM845 interconnect driver"
- depends on INTERCONNECT_QCOM
- depends on (QCOM_RPMH && QCOM_COMMAND_DB && OF) || COMPILE_TEST
+ depends on INTERCONNECT_QCOM_RPMH_POSSIBLE
select INTERCONNECT_QCOM_RPMH
select INTERCONNECT_QCOM_BCM_VOTER
help
@@ -67,8 +76,7 @@ config INTERCONNECT_QCOM_SDM845
config INTERCONNECT_QCOM_SM8150
tristate "Qualcomm SM8150 interconnect driver"
- depends on INTERCONNECT_QCOM
- depends on (QCOM_RPMH && QCOM_COMMAND_DB && OF) || COMPILE_TEST
+ depends on INTERCONNECT_QCOM_RPMH_POSSIBLE
select INTERCONNECT_QCOM_RPMH
select INTERCONNECT_QCOM_BCM_VOTER
help
@@ -77,8 +85,7 @@ config INTERCONNECT_QCOM_SM8150
config INTERCONNECT_QCOM_SM8250
tristate "Qualcomm SM8250 interconnect driver"
- depends on INTERCONNECT_QCOM
- depends on (QCOM_RPMH && QCOM_COMMAND_DB && OF) || COMPILE_TEST
+ depends on INTERCONNECT_QCOM_RPMH_POSSIBLE
select INTERCONNECT_QCOM_RPMH
select INTERCONNECT_QCOM_BCM_VOTER
help
diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
index f54cd79b43e4..6a1f7048dacc 100644
--- a/drivers/iommu/amd/init.c
+++ b/drivers/iommu/amd/init.c
@@ -1973,8 +1973,6 @@ static int iommu_setup_msi(struct amd_iommu *iommu)
return r;
}
- iommu->int_enabled = true;
-
return 0;
}
@@ -2169,6 +2167,7 @@ static int iommu_init_irq(struct amd_iommu *iommu)
if (ret)
return ret;
+ iommu->int_enabled = true;
enable_faults:
iommu_feature_enable(iommu, CONTROL_EVT_INT_EN);
diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
index 7e2c445a1fae..f0adbc48fd17 100644
--- a/drivers/iommu/amd/iommu.c
+++ b/drivers/iommu/amd/iommu.c
@@ -3854,6 +3854,9 @@ static int irq_remapping_select(struct irq_domain *d, struct irq_fwspec *fwspec,
struct amd_iommu *iommu;
int devid = -1;
+ if (!amd_iommu_irq_remap)
+ return 0;
+
if (x86_fwspec_is_ioapic(fwspec))
devid = get_ioapic_devid(fwspec->param[0]);
else if (x86_fwspec_is_hpet(fwspec))
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
index 5dff7ffbef11..bcda17012aee 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
@@ -196,6 +196,8 @@ static int qcom_smmu_cfg_probe(struct arm_smmu_device *smmu)
set_bit(qsmmu->bypass_cbndx, smmu->context_map);
+ arm_smmu_cb_write(smmu, qsmmu->bypass_cbndx, ARM_SMMU_CB_SCTLR, 0);
+
reg = FIELD_PREP(ARM_SMMU_CBAR_TYPE, CBAR_TYPE_S1_TRANS_S2_BYPASS);
arm_smmu_gr1_write(smmu, ARM_SMMU_GR1_CBAR(qsmmu->bypass_cbndx), reg);
}
@@ -323,7 +325,9 @@ static struct arm_smmu_device *qcom_smmu_create(struct arm_smmu_device *smmu,
}
static const struct of_device_id __maybe_unused qcom_smmu_impl_of_match[] = {
+ { .compatible = "qcom,msm8998-smmu-v2" },
{ .compatible = "qcom,sc7180-smmu-500" },
+ { .compatible = "qcom,sdm630-smmu-v2" },
{ .compatible = "qcom,sdm845-smmu-500" },
{ .compatible = "qcom,sm8150-smmu-500" },
{ .compatible = "qcom,sm8250-smmu-500" },
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index f0305e6aac1b..4078358ed66e 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -863,33 +863,6 @@ static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents,
unsigned int cur_len = 0, max_len = dma_get_max_seg_size(dev);
int i, count = 0;
- /*
- * The Intel graphic driver is used to assume that the returned
- * sg list is not combound. This blocks the efforts of converting
- * Intel IOMMU driver to dma-iommu api's. Add this quirk to make the
- * device driver work and should be removed once it's fixed in i915
- * driver.
- */
- if (IS_ENABLED(CONFIG_DRM_I915) && dev_is_pci(dev) &&
- to_pci_dev(dev)->vendor == PCI_VENDOR_ID_INTEL &&
- (to_pci_dev(dev)->class >> 16) == PCI_BASE_CLASS_DISPLAY) {
- for_each_sg(sg, s, nents, i) {
- unsigned int s_iova_off = sg_dma_address(s);
- unsigned int s_length = sg_dma_len(s);
- unsigned int s_iova_len = s->length;
-
- s->offset += s_iova_off;
- s->length = s_length;
- sg_dma_address(s) = dma_addr + s_iova_off;
- sg_dma_len(s) = s_length;
- dma_addr += s_iova_len;
-
- pr_info_once("sg combining disabled due to i915 driver\n");
- }
-
- return nents;
- }
-
for_each_sg(sg, s, nents, i) {
/* Restore this segment's original unaligned fields first */
unsigned int s_iova_off = sg_dma_address(s);
diff --git a/drivers/iommu/intel/dmar.c b/drivers/iommu/intel/dmar.c
index b46dbfa6d0ed..004feaed3c72 100644
--- a/drivers/iommu/intel/dmar.c
+++ b/drivers/iommu/intel/dmar.c
@@ -1461,8 +1461,8 @@ void qi_flush_piotlb(struct intel_iommu *iommu, u16 did, u32 pasid, u64 addr,
int mask = ilog2(__roundup_pow_of_two(npages));
unsigned long align = (1ULL << (VTD_PAGE_SHIFT + mask));
- if (WARN_ON_ONCE(!ALIGN(addr, align)))
- addr &= ~(align - 1);
+ if (WARN_ON_ONCE(!IS_ALIGNED(addr, align)))
+ addr = ALIGN_DOWN(addr, align);
desc.qw0 = QI_EIOTLB_PASID(pasid) |
QI_EIOTLB_DID(did) |
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index 788119c5b021..f665322a0991 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -38,7 +38,6 @@
#include <linux/dmi.h>
#include <linux/pci-ats.h>
#include <linux/memblock.h>
-#include <linux/dma-map-ops.h>
#include <linux/dma-direct.h>
#include <linux/crash_dump.h>
#include <linux/numa.h>
@@ -719,6 +718,8 @@ static int domain_update_device_node(struct dmar_domain *domain)
return nid;
}
+static void domain_update_iotlb(struct dmar_domain *domain);
+
/* Some capabilities may be different across iommus */
static void domain_update_iommu_cap(struct dmar_domain *domain)
{
@@ -744,6 +745,8 @@ static void domain_update_iommu_cap(struct dmar_domain *domain)
domain->domain.geometry.aperture_end = __DOMAIN_MAX_ADDR(domain->gaw - 1);
else
domain->domain.geometry.aperture_end = __DOMAIN_MAX_ADDR(domain->gaw);
+
+ domain_update_iotlb(domain);
}
struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus,
@@ -1464,17 +1467,22 @@ static void domain_update_iotlb(struct dmar_domain *domain)
assert_spin_locked(&device_domain_lock);
- list_for_each_entry(info, &domain->devices, link) {
- struct pci_dev *pdev;
-
- if (!info->dev || !dev_is_pci(info->dev))
- continue;
-
- pdev = to_pci_dev(info->dev);
- if (pdev->ats_enabled) {
+ list_for_each_entry(info, &domain->devices, link)
+ if (info->ats_enabled) {
has_iotlb_device = true;
break;
}
+
+ if (!has_iotlb_device) {
+ struct subdev_domain_info *sinfo;
+
+ list_for_each_entry(sinfo, &domain->subdevices, link_domain) {
+ info = get_domain_info(sinfo->pdev);
+ if (info && info->ats_enabled) {
+ has_iotlb_device = true;
+ break;
+ }
+ }
}
domain->has_iotlb_device = has_iotlb_device;
@@ -1555,25 +1563,37 @@ static void iommu_disable_dev_iotlb(struct device_domain_info *info)
#endif
}
+static void __iommu_flush_dev_iotlb(struct device_domain_info *info,
+ u64 addr, unsigned int mask)
+{
+ u16 sid, qdep;
+
+ if (!info || !info->ats_enabled)
+ return;
+
+ sid = info->bus << 8 | info->devfn;
+ qdep = info->ats_qdep;
+ qi_flush_dev_iotlb(info->iommu, sid, info->pfsid,
+ qdep, addr, mask);
+}
+
static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
u64 addr, unsigned mask)
{
- u16 sid, qdep;
unsigned long flags;
struct device_domain_info *info;
+ struct subdev_domain_info *sinfo;
if (!domain->has_iotlb_device)
return;
spin_lock_irqsave(&device_domain_lock, flags);
- list_for_each_entry(info, &domain->devices, link) {
- if (!info->ats_enabled)
- continue;
+ list_for_each_entry(info, &domain->devices, link)
+ __iommu_flush_dev_iotlb(info, addr, mask);
- sid = info->bus << 8 | info->devfn;
- qdep = info->ats_qdep;
- qi_flush_dev_iotlb(info->iommu, sid, info->pfsid,
- qdep, addr, mask);
+ list_for_each_entry(sinfo, &domain->subdevices, link_domain) {
+ info = get_domain_info(sinfo->pdev);
+ __iommu_flush_dev_iotlb(info, addr, mask);
}
spin_unlock_irqrestore(&device_domain_lock, flags);
}
@@ -1877,6 +1897,7 @@ static struct dmar_domain *alloc_domain(int flags)
domain->flags |= DOMAIN_FLAG_USE_FIRST_LEVEL;
domain->has_iotlb_device = false;
INIT_LIST_HEAD(&domain->devices);
+ INIT_LIST_HEAD(&domain->subdevices);
return domain;
}
@@ -2547,7 +2568,7 @@ static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
info->iommu = iommu;
info->pasid_table = NULL;
info->auxd_enabled = 0;
- INIT_LIST_HEAD(&info->auxiliary_domains);
+ INIT_LIST_HEAD(&info->subdevices);
if (dev && dev_is_pci(dev)) {
struct pci_dev *pdev = to_pci_dev(info->dev);
@@ -4475,33 +4496,61 @@ is_aux_domain(struct device *dev, struct iommu_domain *domain)
domain->type == IOMMU_DOMAIN_UNMANAGED;
}
-static void auxiliary_link_device(struct dmar_domain *domain,
- struct device *dev)
+static inline struct subdev_domain_info *
+lookup_subdev_info(struct dmar_domain *domain, struct device *dev)
+{
+ struct subdev_domain_info *sinfo;
+
+ if (!list_empty(&domain->subdevices)) {
+ list_for_each_entry(sinfo, &domain->subdevices, link_domain) {
+ if (sinfo->pdev == dev)
+ return sinfo;
+ }
+ }
+
+ return NULL;
+}
+
+static int auxiliary_link_device(struct dmar_domain *domain,
+ struct device *dev)
{
struct device_domain_info *info = get_domain_info(dev);
+ struct subdev_domain_info *sinfo = lookup_subdev_info(domain, dev);
assert_spin_locked(&device_domain_lock);
if (WARN_ON(!info))
- return;
+ return -EINVAL;
- domain->auxd_refcnt++;
- list_add(&domain->auxd, &info->auxiliary_domains);
+ if (!sinfo) {
+ sinfo = kzalloc(sizeof(*sinfo), GFP_ATOMIC);
+ sinfo->domain = domain;
+ sinfo->pdev = dev;
+ list_add(&sinfo->link_phys, &info->subdevices);
+ list_add(&sinfo->link_domain, &domain->subdevices);
+ }
+
+ return ++sinfo->users;
}
-static void auxiliary_unlink_device(struct dmar_domain *domain,
- struct device *dev)
+static int auxiliary_unlink_device(struct dmar_domain *domain,
+ struct device *dev)
{
struct device_domain_info *info = get_domain_info(dev);
+ struct subdev_domain_info *sinfo = lookup_subdev_info(domain, dev);
+ int ret;
assert_spin_locked(&device_domain_lock);
- if (WARN_ON(!info))
- return;
+ if (WARN_ON(!info || !sinfo || sinfo->users <= 0))
+ return -EINVAL;
- list_del(&domain->auxd);
- domain->auxd_refcnt--;
+ ret = --sinfo->users;
+ if (!ret) {
+ list_del(&sinfo->link_phys);
+ list_del(&sinfo->link_domain);
+ kfree(sinfo);
+ }
- if (!domain->auxd_refcnt && domain->default_pasid > 0)
- ioasid_put(domain->default_pasid);
+ return ret;
}
static int aux_domain_add_dev(struct dmar_domain *domain,
@@ -4530,6 +4579,19 @@ static int aux_domain_add_dev(struct dmar_domain *domain,
}
spin_lock_irqsave(&device_domain_lock, flags);
+ ret = auxiliary_link_device(domain, dev);
+ if (ret <= 0)
+ goto link_failed;
+
+ /*
+ * Subdevices from the same physical device can be attached to the
+ * same domain. For such cases, only the first subdevice attachment
+ * needs to go through the full steps in this function. So if ret >
+ * 1, just goto out.
+ */
+ if (ret > 1)
+ goto out;
+
/*
* iommu->lock must be held to attach domain to iommu and setup the
* pasid entry for second level translation.
@@ -4548,10 +4610,9 @@ static int aux_domain_add_dev(struct dmar_domain *domain,
domain->default_pasid);
if (ret)
goto table_failed;
- spin_unlock(&iommu->lock);
-
- auxiliary_link_device(domain, dev);
+ spin_unlock(&iommu->lock);
+out:
spin_unlock_irqrestore(&device_domain_lock, flags);
return 0;
@@ -4560,8 +4621,10 @@ table_failed:
domain_detach_iommu(domain, iommu);
attach_failed:
spin_unlock(&iommu->lock);
+ auxiliary_unlink_device(domain, dev);
+link_failed:
spin_unlock_irqrestore(&device_domain_lock, flags);
- if (!domain->auxd_refcnt && domain->default_pasid > 0)
+ if (list_empty(&domain->subdevices) && domain->default_pasid > 0)
ioasid_put(domain->default_pasid);
return ret;
@@ -4581,14 +4644,18 @@ static void aux_domain_remove_dev(struct dmar_domain *domain,
info = get_domain_info(dev);
iommu = info->iommu;
- auxiliary_unlink_device(domain, dev);
-
- spin_lock(&iommu->lock);
- intel_pasid_tear_down_entry(iommu, dev, domain->default_pasid, false);
- domain_detach_iommu(domain, iommu);
- spin_unlock(&iommu->lock);
+ if (!auxiliary_unlink_device(domain, dev)) {
+ spin_lock(&iommu->lock);
+ intel_pasid_tear_down_entry(iommu, dev,
+ domain->default_pasid, false);
+ domain_detach_iommu(domain, iommu);
+ spin_unlock(&iommu->lock);
+ }
spin_unlock_irqrestore(&device_domain_lock, flags);
+
+ if (list_empty(&domain->subdevices) && domain->default_pasid > 0)
+ ioasid_put(domain->default_pasid);
}
static int prepare_domain_attach_device(struct iommu_domain *domain,
diff --git a/drivers/iommu/intel/irq_remapping.c b/drivers/iommu/intel/irq_remapping.c
index aeffda92b10b..685200a5cff0 100644
--- a/drivers/iommu/intel/irq_remapping.c
+++ b/drivers/iommu/intel/irq_remapping.c
@@ -1353,6 +1353,8 @@ static int intel_irq_remapping_alloc(struct irq_domain *domain,
irq_data = irq_domain_get_irq_data(domain, virq + i);
irq_cfg = irqd_cfg(irq_data);
if (!irq_data || !irq_cfg) {
+ if (!i)
+ kfree(data);
ret = -EINVAL;
goto out_free_data;
}
diff --git a/drivers/iommu/intel/svm.c b/drivers/iommu/intel/svm.c
index 4fa248b98031..18a9f05df407 100644
--- a/drivers/iommu/intel/svm.c
+++ b/drivers/iommu/intel/svm.c
@@ -118,8 +118,10 @@ void intel_svm_check(struct intel_iommu *iommu)
iommu->flags |= VTD_FLAG_SVM_CAPABLE;
}
-static void intel_flush_svm_range_dev (struct intel_svm *svm, struct intel_svm_dev *sdev,
- unsigned long address, unsigned long pages, int ih)
+static void __flush_svm_range_dev(struct intel_svm *svm,
+ struct intel_svm_dev *sdev,
+ unsigned long address,
+ unsigned long pages, int ih)
{
struct qi_desc desc;
@@ -142,7 +144,7 @@ static void intel_flush_svm_range_dev (struct intel_svm *svm, struct intel_svm_d
}
desc.qw2 = 0;
desc.qw3 = 0;
- qi_submit_sync(svm->iommu, &desc, 1, 0);
+ qi_submit_sync(sdev->iommu, &desc, 1, 0);
if (sdev->dev_iotlb) {
desc.qw0 = QI_DEV_EIOTLB_PASID(svm->pasid) |
@@ -166,7 +168,23 @@ static void intel_flush_svm_range_dev (struct intel_svm *svm, struct intel_svm_d
}
desc.qw2 = 0;
desc.qw3 = 0;
- qi_submit_sync(svm->iommu, &desc, 1, 0);
+ qi_submit_sync(sdev->iommu, &desc, 1, 0);
+ }
+}
+
+static void intel_flush_svm_range_dev(struct intel_svm *svm,
+ struct intel_svm_dev *sdev,
+ unsigned long address,
+ unsigned long pages, int ih)
+{
+ unsigned long shift = ilog2(__roundup_pow_of_two(pages));
+ unsigned long align = (1ULL << (VTD_PAGE_SHIFT + shift));
+ unsigned long start = ALIGN_DOWN(address, align);
+ unsigned long end = ALIGN(address + (pages << VTD_PAGE_SHIFT), align);
+
+ while (start < end) {
+ __flush_svm_range_dev(svm, sdev, start, align >> VTD_PAGE_SHIFT, ih);
+ start += align;
}
}
@@ -211,7 +229,7 @@ static void intel_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
*/
rcu_read_lock();
list_for_each_entry_rcu(sdev, &svm->devs, list)
- intel_pasid_tear_down_entry(svm->iommu, sdev->dev,
+ intel_pasid_tear_down_entry(sdev->iommu, sdev->dev,
svm->pasid, true);
rcu_read_unlock();
@@ -281,6 +299,7 @@ int intel_svm_bind_gpasid(struct iommu_domain *domain, struct device *dev,
struct dmar_domain *dmar_domain;
struct device_domain_info *info;
struct intel_svm *svm = NULL;
+ unsigned long iflags;
int ret = 0;
if (WARN_ON(!iommu) || !data)
@@ -363,6 +382,7 @@ int intel_svm_bind_gpasid(struct iommu_domain *domain, struct device *dev,
}
sdev->dev = dev;
sdev->sid = PCI_DEVID(info->bus, info->devfn);
+ sdev->iommu = iommu;
/* Only count users if device has aux domains */
if (iommu_dev_feature_enabled(dev, IOMMU_DEV_FEAT_AUX))
@@ -381,12 +401,12 @@ int intel_svm_bind_gpasid(struct iommu_domain *domain, struct device *dev,
* each bind of a new device even with an existing PASID, we need to
* call the nested mode setup function here.
*/
- spin_lock(&iommu->lock);
+ spin_lock_irqsave(&iommu->lock, iflags);
ret = intel_pasid_setup_nested(iommu, dev,
(pgd_t *)(uintptr_t)data->gpgd,
data->hpasid, &data->vendor.vtd, dmar_domain,
data->addr_width);
- spin_unlock(&iommu->lock);
+ spin_unlock_irqrestore(&iommu->lock, iflags);
if (ret) {
dev_err_ratelimited(dev, "Failed to set up PASID %llu in nested mode, Err %d\n",
data->hpasid, ret);
@@ -486,6 +506,7 @@ intel_svm_bind_mm(struct device *dev, unsigned int flags,
struct device_domain_info *info;
struct intel_svm_dev *sdev;
struct intel_svm *svm = NULL;
+ unsigned long iflags;
int pasid_max;
int ret;
@@ -546,6 +567,7 @@ intel_svm_bind_mm(struct device *dev, unsigned int flags,
goto out;
}
sdev->dev = dev;
+ sdev->iommu = iommu;
ret = intel_iommu_enable_pasid(iommu, dev);
if (ret) {
@@ -575,7 +597,6 @@ intel_svm_bind_mm(struct device *dev, unsigned int flags,
kfree(sdev);
goto out;
}
- svm->iommu = iommu;
if (pasid_max > intel_pasid_max_id)
pasid_max = intel_pasid_max_id;
@@ -605,14 +626,14 @@ intel_svm_bind_mm(struct device *dev, unsigned int flags,
}
}
- spin_lock(&iommu->lock);
+ spin_lock_irqsave(&iommu->lock, iflags);
ret = intel_pasid_setup_first_level(iommu, dev,
mm ? mm->pgd : init_mm.pgd,
svm->pasid, FLPT_DEFAULT_DID,
(mm ? 0 : PASID_FLAG_SUPERVISOR_MODE) |
(cpu_feature_enabled(X86_FEATURE_LA57) ?
PASID_FLAG_FL5LP : 0));
- spin_unlock(&iommu->lock);
+ spin_unlock_irqrestore(&iommu->lock, iflags);
if (ret) {
if (mm)
mmu_notifier_unregister(&svm->notifier, mm);
@@ -632,14 +653,14 @@ intel_svm_bind_mm(struct device *dev, unsigned int flags,
* Binding a new device with existing PASID, need to setup
* the PASID entry.
*/
- spin_lock(&iommu->lock);
+ spin_lock_irqsave(&iommu->lock, iflags);
ret = intel_pasid_setup_first_level(iommu, dev,
mm ? mm->pgd : init_mm.pgd,
svm->pasid, FLPT_DEFAULT_DID,
(mm ? 0 : PASID_FLAG_SUPERVISOR_MODE) |
(cpu_feature_enabled(X86_FEATURE_LA57) ?
PASID_FLAG_FL5LP : 0));
- spin_unlock(&iommu->lock);
+ spin_unlock_irqrestore(&iommu->lock, iflags);
if (ret) {
kfree(sdev);
goto out;
diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
index 4bb3293ae4d7..d20b8b333d30 100644
--- a/drivers/iommu/iova.c
+++ b/drivers/iommu/iova.c
@@ -358,7 +358,7 @@ static void private_free_iova(struct iova_domain *iovad, struct iova *iova)
* @iovad: - iova domain in question.
* @pfn: - page frame number
* This function finds and returns an iova belonging to the
- * given doamin which matches the given pfn.
+ * given domain which matches the given pfn.
*/
struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn)
{
@@ -601,7 +601,7 @@ void queue_iova(struct iova_domain *iovad,
EXPORT_SYMBOL_GPL(queue_iova);
/**
- * put_iova_domain - destroys the iova doamin
+ * put_iova_domain - destroys the iova domain
* @iovad: - iova domain in question.
* All the iova's in that domain are destroyed.
*/
@@ -712,9 +712,9 @@ EXPORT_SYMBOL_GPL(reserve_iova);
/**
* copy_reserved_iova - copies the reserved between domains
- * @from: - source doamin from where to copy
+ * @from: - source domain from where to copy
* @to: - destination domin where to copy
- * This function copies reserved iova's from one doamin to
+ * This function copies reserved iova's from one domain to
* other.
*/
void
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index 94920a51c628..b147f22a78f4 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -493,8 +493,9 @@ config TI_SCI_INTA_IRQCHIP
TI System Controller, say Y here. Otherwise, say N.
config TI_PRUSS_INTC
- tristate "TI PRU-ICSS Interrupt Controller"
- depends on ARCH_DAVINCI || SOC_AM33XX || SOC_AM43XX || SOC_DRA7XX || ARCH_KEYSTONE || ARCH_K3
+ tristate
+ depends on TI_PRUSS
+ default TI_PRUSS
select IRQ_DOMAIN
help
This enables support for the PRU-ICSS Local Interrupt Controller
diff --git a/drivers/irqchip/irq-bcm2836.c b/drivers/irqchip/irq-bcm2836.c
index 5f5eb8877c41..25c9a9c06e41 100644
--- a/drivers/irqchip/irq-bcm2836.c
+++ b/drivers/irqchip/irq-bcm2836.c
@@ -167,7 +167,7 @@ static void bcm2836_arm_irqchip_handle_ipi(struct irq_desc *desc)
chained_irq_exit(chip, desc);
}
-static void bcm2836_arm_irqchip_ipi_eoi(struct irq_data *d)
+static void bcm2836_arm_irqchip_ipi_ack(struct irq_data *d)
{
int cpu = smp_processor_id();
@@ -195,7 +195,7 @@ static struct irq_chip bcm2836_arm_irqchip_ipi = {
.name = "IPI",
.irq_mask = bcm2836_arm_irqchip_dummy_op,
.irq_unmask = bcm2836_arm_irqchip_dummy_op,
- .irq_eoi = bcm2836_arm_irqchip_ipi_eoi,
+ .irq_ack = bcm2836_arm_irqchip_ipi_ack,
.ipi_send_mask = bcm2836_arm_irqchip_ipi_send_mask,
};
diff --git a/drivers/irqchip/irq-loongson-liointc.c b/drivers/irqchip/irq-loongson-liointc.c
index 9ed1bc473663..09b91b81851c 100644
--- a/drivers/irqchip/irq-loongson-liointc.c
+++ b/drivers/irqchip/irq-loongson-liointc.c
@@ -142,8 +142,8 @@ static void liointc_resume(struct irq_chip_generic *gc)
static const char * const parent_names[] = {"int0", "int1", "int2", "int3"};
-int __init liointc_of_init(struct device_node *node,
- struct device_node *parent)
+static int __init liointc_of_init(struct device_node *node,
+ struct device_node *parent)
{
struct irq_chip_generic *gc;
struct irq_domain *domain;
diff --git a/drivers/irqchip/irq-mips-cpu.c b/drivers/irqchip/irq-mips-cpu.c
index 95d4fd8f7a96..0bbb0b2d0dd5 100644
--- a/drivers/irqchip/irq-mips-cpu.c
+++ b/drivers/irqchip/irq-mips-cpu.c
@@ -197,6 +197,13 @@ static int mips_cpu_ipi_alloc(struct irq_domain *domain, unsigned int virq,
if (ret)
return ret;
+ ret = irq_domain_set_hwirq_and_chip(domain->parent, virq + i, hwirq,
+ &mips_mt_cpu_irq_controller,
+ NULL);
+
+ if (ret)
+ return ret;
+
ret = irq_set_irq_type(virq + i, IRQ_TYPE_LEVEL_HIGH);
if (ret)
return ret;
diff --git a/drivers/irqchip/irq-sl28cpld.c b/drivers/irqchip/irq-sl28cpld.c
index 0aa50d025ef6..fbb354413ffa 100644
--- a/drivers/irqchip/irq-sl28cpld.c
+++ b/drivers/irqchip/irq-sl28cpld.c
@@ -66,7 +66,7 @@ static int sl28cpld_intc_probe(struct platform_device *pdev)
irqchip->chip.num_regs = 1;
irqchip->chip.status_base = base + INTC_IP;
irqchip->chip.mask_base = base + INTC_IE;
- irqchip->chip.mask_invert = true,
+ irqchip->chip.mask_invert = true;
irqchip->chip.ack_base = base + INTC_IP;
return devm_regmap_add_irq_chip_fwnode(dev, dev_fwnode(dev),
diff --git a/drivers/isdn/mISDN/Kconfig b/drivers/isdn/mISDN/Kconfig
index 26cf0ac9c4ad..c9a53c222472 100644
--- a/drivers/isdn/mISDN/Kconfig
+++ b/drivers/isdn/mISDN/Kconfig
@@ -13,6 +13,7 @@ if MISDN != n
config MISDN_DSP
tristate "Digital Audio Processing of transparent data"
depends on MISDN
+ select BITREVERSE
help
Enable support for digital audio processing capability.
diff --git a/drivers/lightnvm/Kconfig b/drivers/lightnvm/Kconfig
index 8f39f9ba5c80..4c2ce210c123 100644
--- a/drivers/lightnvm/Kconfig
+++ b/drivers/lightnvm/Kconfig
@@ -19,6 +19,7 @@ if NVM
config NVM_PBLK
tristate "Physical Block Device Open-Channel SSD target"
+ select CRC32
help
Allows an open-channel SSD to be exposed as a block device to the
host. The target assumes the device exposes raw flash and must be
diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c
index c1bcac71008c..28ddcaa5358b 100644
--- a/drivers/lightnvm/core.c
+++ b/drivers/lightnvm/core.c
@@ -844,11 +844,10 @@ static int nvm_bb_chunk_sense(struct nvm_dev *dev, struct ppa_addr ppa)
rqd.ppa_addr = generic_to_dev_addr(dev, ppa);
ret = nvm_submit_io_sync_raw(dev, &rqd);
+ __free_page(page);
if (ret)
return ret;
- __free_page(page);
-
return rqd.error;
}
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index b7e2d9666614..9e44c09f6410 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -605,6 +605,7 @@ config DM_INTEGRITY
select BLK_DEV_INTEGRITY
select DM_BUFIO
select CRYPTO
+ select CRYPTO_SKCIPHER
select ASYNC_XOR
help
This device-mapper target emulates a block device that has
@@ -622,6 +623,7 @@ config DM_ZONED
tristate "Drive-managed zoned block device target support"
depends on BLK_DEV_DM
depends on BLK_DEV_ZONED
+ select CRC32
help
This device-mapper target takes a host-managed or host-aware zoned
block device and exposes most of its capacity as a regular block
diff --git a/drivers/md/bcache/features.c b/drivers/md/bcache/features.c
index 6469223f0b77..d636b7b2d070 100644
--- a/drivers/md/bcache/features.c
+++ b/drivers/md/bcache/features.c
@@ -17,7 +17,7 @@ struct feature {
};
static struct feature feature_list[] = {
- {BCH_FEATURE_INCOMPAT, BCH_FEATURE_INCOMPAT_LARGE_BUCKET,
+ {BCH_FEATURE_INCOMPAT, BCH_FEATURE_INCOMPAT_LOG_LARGE_BUCKET_SIZE,
"large_bucket"},
{0, 0, 0 },
};
diff --git a/drivers/md/bcache/features.h b/drivers/md/bcache/features.h
index a1653c478041..84fc2c0f0101 100644
--- a/drivers/md/bcache/features.h
+++ b/drivers/md/bcache/features.h
@@ -13,11 +13,15 @@
/* Feature set definition */
/* Incompat feature set */
-#define BCH_FEATURE_INCOMPAT_LARGE_BUCKET 0x0001 /* 32bit bucket size */
+/* 32bit bucket size, obsoleted */
+#define BCH_FEATURE_INCOMPAT_OBSO_LARGE_BUCKET 0x0001
+/* real bucket size is (1 << bucket_size) */
+#define BCH_FEATURE_INCOMPAT_LOG_LARGE_BUCKET_SIZE 0x0002
-#define BCH_FEATURE_COMPAT_SUUP 0
-#define BCH_FEATURE_RO_COMPAT_SUUP 0
-#define BCH_FEATURE_INCOMPAT_SUUP BCH_FEATURE_INCOMPAT_LARGE_BUCKET
+#define BCH_FEATURE_COMPAT_SUPP 0
+#define BCH_FEATURE_RO_COMPAT_SUPP 0
+#define BCH_FEATURE_INCOMPAT_SUPP (BCH_FEATURE_INCOMPAT_OBSO_LARGE_BUCKET| \
+ BCH_FEATURE_INCOMPAT_LOG_LARGE_BUCKET_SIZE)
#define BCH_HAS_COMPAT_FEATURE(sb, mask) \
((sb)->feature_compat & (mask))
@@ -77,7 +81,23 @@ static inline void bch_clear_feature_##name(struct cache_sb *sb) \
~BCH##_FEATURE_INCOMPAT_##flagname; \
}
-BCH_FEATURE_INCOMPAT_FUNCS(large_bucket, LARGE_BUCKET);
+BCH_FEATURE_INCOMPAT_FUNCS(obso_large_bucket, OBSO_LARGE_BUCKET);
+BCH_FEATURE_INCOMPAT_FUNCS(large_bucket, LOG_LARGE_BUCKET_SIZE);
+
+static inline bool bch_has_unknown_compat_features(struct cache_sb *sb)
+{
+ return ((sb->feature_compat & ~BCH_FEATURE_COMPAT_SUPP) != 0);
+}
+
+static inline bool bch_has_unknown_ro_compat_features(struct cache_sb *sb)
+{
+ return ((sb->feature_ro_compat & ~BCH_FEATURE_RO_COMPAT_SUPP) != 0);
+}
+
+static inline bool bch_has_unknown_incompat_features(struct cache_sb *sb)
+{
+ return ((sb->feature_incompat & ~BCH_FEATURE_INCOMPAT_SUPP) != 0);
+}
int bch_print_cache_set_feature_compat(struct cache_set *c, char *buf, int size);
int bch_print_cache_set_feature_ro_compat(struct cache_set *c, char *buf, int size);
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index a4752ac410dc..2047a9cccdb5 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -64,9 +64,25 @@ static unsigned int get_bucket_size(struct cache_sb *sb, struct cache_sb_disk *s
{
unsigned int bucket_size = le16_to_cpu(s->bucket_size);
- if (sb->version >= BCACHE_SB_VERSION_CDEV_WITH_FEATURES &&
- bch_has_feature_large_bucket(sb))
- bucket_size |= le16_to_cpu(s->bucket_size_hi) << 16;
+ if (sb->version >= BCACHE_SB_VERSION_CDEV_WITH_FEATURES) {
+ if (bch_has_feature_large_bucket(sb)) {
+ unsigned int max, order;
+
+ max = sizeof(unsigned int) * BITS_PER_BYTE - 1;
+ order = le16_to_cpu(s->bucket_size);
+ /*
+ * bcache tool will make sure the overflow won't
+ * happen, an error message here is enough.
+ */
+ if (order > max)
+ pr_err("Bucket size (1 << %u) overflows\n",
+ order);
+ bucket_size = 1 << order;
+ } else if (bch_has_feature_obso_large_bucket(sb)) {
+ bucket_size +=
+ le16_to_cpu(s->obso_bucket_size_hi) << 16;
+ }
+ }
return bucket_size;
}
@@ -228,6 +244,20 @@ static const char *read_super(struct cache_sb *sb, struct block_device *bdev,
sb->feature_compat = le64_to_cpu(s->feature_compat);
sb->feature_incompat = le64_to_cpu(s->feature_incompat);
sb->feature_ro_compat = le64_to_cpu(s->feature_ro_compat);
+
+ /* Check incompatible features */
+ err = "Unsupported compatible feature found";
+ if (bch_has_unknown_compat_features(sb))
+ goto err;
+
+ err = "Unsupported read-only compatible feature found";
+ if (bch_has_unknown_ro_compat_features(sb))
+ goto err;
+
+ err = "Unsupported incompatible feature found";
+ if (bch_has_unknown_incompat_features(sb))
+ goto err;
+
err = read_super_common(sb, bdev, s);
if (err)
goto err;
@@ -1302,6 +1332,12 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
bcache_device_link(&dc->disk, c, "bdev");
atomic_inc(&c->attached_dev_nr);
+ if (bch_has_feature_obso_large_bucket(&(c->cache->sb))) {
+ pr_err("The obsoleted large bucket layout is unsupported, set the bcache device into read-only\n");
+ pr_err("Please update to the latest bcache-tools to create the cache device\n");
+ set_disk_ro(dc->disk.disk, 1);
+ }
+
/* Allow the writeback thread to proceed */
up_write(&dc->writeback_lock);
@@ -1524,6 +1560,12 @@ static int flash_dev_run(struct cache_set *c, struct uuid_entry *u)
bcache_device_link(d, c, "volume");
+ if (bch_has_feature_obso_large_bucket(&c->cache->sb)) {
+ pr_err("The obsoleted large bucket layout is unsupported, set the bcache device into read-only\n");
+ pr_err("Please update to the latest bcache-tools to create the cache device\n");
+ set_disk_ro(d->disk, 1);
+ }
+
return 0;
err:
kobject_put(&d->kobj);
@@ -2083,6 +2125,9 @@ static int run_cache_set(struct cache_set *c)
c->cache->sb.last_mount = (u32)ktime_get_real_seconds();
bcache_write_super(c);
+ if (bch_has_feature_obso_large_bucket(&c->cache->sb))
+ pr_err("Detect obsoleted large bucket layout, all attached bcache device will be read-only\n");
+
list_for_each_entry_safe(dc, t, &uncached_devices, list)
bch_cached_dev_attach(dc, c, NULL);
@@ -2644,8 +2689,8 @@ static ssize_t bch_pending_bdevs_cleanup(struct kobject *k,
}
list_for_each_entry_safe(pdev, tpdev, &pending_devs, list) {
+ char *pdev_set_uuid = pdev->dc->sb.set_uuid;
list_for_each_entry_safe(c, tc, &bch_cache_sets, list) {
- char *pdev_set_uuid = pdev->dc->sb.set_uuid;
char *set_uuid = c->set_uuid;
if (!memcmp(pdev_set_uuid, set_uuid, 16)) {
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index 9c1a86bde658..fce4cbf9529d 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -1534,6 +1534,12 @@ sector_t dm_bufio_get_device_size(struct dm_bufio_client *c)
}
EXPORT_SYMBOL_GPL(dm_bufio_get_device_size);
+struct dm_io_client *dm_bufio_get_dm_io_client(struct dm_bufio_client *c)
+{
+ return c->dm_io;
+}
+EXPORT_SYMBOL_GPL(dm_bufio_get_dm_io_client);
+
sector_t dm_bufio_get_block_number(struct dm_buffer *b)
{
return b->block;
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 5f9f9b3a226d..5a55617a08e6 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -1454,13 +1454,16 @@ static int crypt_convert_block_skcipher(struct crypt_config *cc,
static void kcryptd_async_done(struct crypto_async_request *async_req,
int error);
-static void crypt_alloc_req_skcipher(struct crypt_config *cc,
+static int crypt_alloc_req_skcipher(struct crypt_config *cc,
struct convert_context *ctx)
{
unsigned key_index = ctx->cc_sector & (cc->tfms_count - 1);
- if (!ctx->r.req)
- ctx->r.req = mempool_alloc(&cc->req_pool, GFP_NOIO);
+ if (!ctx->r.req) {
+ ctx->r.req = mempool_alloc(&cc->req_pool, in_interrupt() ? GFP_ATOMIC : GFP_NOIO);
+ if (!ctx->r.req)
+ return -ENOMEM;
+ }
skcipher_request_set_tfm(ctx->r.req, cc->cipher_tfm.tfms[key_index]);
@@ -1471,13 +1474,18 @@ static void crypt_alloc_req_skcipher(struct crypt_config *cc,
skcipher_request_set_callback(ctx->r.req,
CRYPTO_TFM_REQ_MAY_BACKLOG,
kcryptd_async_done, dmreq_of_req(cc, ctx->r.req));
+
+ return 0;
}
-static void crypt_alloc_req_aead(struct crypt_config *cc,
+static int crypt_alloc_req_aead(struct crypt_config *cc,
struct convert_context *ctx)
{
- if (!ctx->r.req_aead)
- ctx->r.req_aead = mempool_alloc(&cc->req_pool, GFP_NOIO);
+ if (!ctx->r.req_aead) {
+ ctx->r.req_aead = mempool_alloc(&cc->req_pool, in_interrupt() ? GFP_ATOMIC : GFP_NOIO);
+ if (!ctx->r.req_aead)
+ return -ENOMEM;
+ }
aead_request_set_tfm(ctx->r.req_aead, cc->cipher_tfm.tfms_aead[0]);
@@ -1488,15 +1496,17 @@ static void crypt_alloc_req_aead(struct crypt_config *cc,
aead_request_set_callback(ctx->r.req_aead,
CRYPTO_TFM_REQ_MAY_BACKLOG,
kcryptd_async_done, dmreq_of_req(cc, ctx->r.req_aead));
+
+ return 0;
}
-static void crypt_alloc_req(struct crypt_config *cc,
+static int crypt_alloc_req(struct crypt_config *cc,
struct convert_context *ctx)
{
if (crypt_integrity_aead(cc))
- crypt_alloc_req_aead(cc, ctx);
+ return crypt_alloc_req_aead(cc, ctx);
else
- crypt_alloc_req_skcipher(cc, ctx);
+ return crypt_alloc_req_skcipher(cc, ctx);
}
static void crypt_free_req_skcipher(struct crypt_config *cc,
@@ -1529,17 +1539,28 @@ static void crypt_free_req(struct crypt_config *cc, void *req, struct bio *base_
* Encrypt / decrypt data from one bio to another one (can be the same one)
*/
static blk_status_t crypt_convert(struct crypt_config *cc,
- struct convert_context *ctx, bool atomic)
+ struct convert_context *ctx, bool atomic, bool reset_pending)
{
unsigned int tag_offset = 0;
unsigned int sector_step = cc->sector_size >> SECTOR_SHIFT;
int r;
- atomic_set(&ctx->cc_pending, 1);
+ /*
+ * if reset_pending is set we are dealing with the bio for the first time,
+ * else we're continuing to work on the previous bio, so don't mess with
+ * the cc_pending counter
+ */
+ if (reset_pending)
+ atomic_set(&ctx->cc_pending, 1);
while (ctx->iter_in.bi_size && ctx->iter_out.bi_size) {
- crypt_alloc_req(cc, ctx);
+ r = crypt_alloc_req(cc, ctx);
+ if (r) {
+ complete(&ctx->restart);
+ return BLK_STS_DEV_RESOURCE;
+ }
+
atomic_inc(&ctx->cc_pending);
if (crypt_integrity_aead(cc))
@@ -1553,7 +1574,25 @@ static blk_status_t crypt_convert(struct crypt_config *cc,
* but the driver request queue is full, let's wait.
*/
case -EBUSY:
- wait_for_completion(&ctx->restart);
+ if (in_interrupt()) {
+ if (try_wait_for_completion(&ctx->restart)) {
+ /*
+ * we don't have to block to wait for completion,
+ * so proceed
+ */
+ } else {
+ /*
+ * we can't wait for completion without blocking
+ * exit and continue processing in a workqueue
+ */
+ ctx->r.req = NULL;
+ ctx->cc_sector += sector_step;
+ tag_offset++;
+ return BLK_STS_DEV_RESOURCE;
+ }
+ } else {
+ wait_for_completion(&ctx->restart);
+ }
reinit_completion(&ctx->restart);
fallthrough;
/*
@@ -1691,6 +1730,12 @@ static void crypt_inc_pending(struct dm_crypt_io *io)
atomic_inc(&io->io_pending);
}
+static void kcryptd_io_bio_endio(struct work_struct *work)
+{
+ struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
+ bio_endio(io->base_bio);
+}
+
/*
* One of the bios was finished. Check for completion of
* the whole request and correctly clean up the buffer.
@@ -1713,7 +1758,23 @@ static void crypt_dec_pending(struct dm_crypt_io *io)
kfree(io->integrity_metadata);
base_bio->bi_status = error;
- bio_endio(base_bio);
+
+ /*
+ * If we are running this function from our tasklet,
+ * we can't call bio_endio() here, because it will call
+ * clone_endio() from dm.c, which in turn will
+ * free the current struct dm_crypt_io structure with
+ * our tasklet. In this case we need to delay bio_endio()
+ * execution to after the tasklet is done and dequeued.
+ */
+ if (tasklet_trylock(&io->tasklet)) {
+ tasklet_unlock(&io->tasklet);
+ bio_endio(base_bio);
+ return;
+ }
+
+ INIT_WORK(&io->work, kcryptd_io_bio_endio);
+ queue_work(cc->io_queue, &io->work);
}
/*
@@ -1945,6 +2006,37 @@ static bool kcryptd_crypt_write_inline(struct crypt_config *cc,
}
}
+static void kcryptd_crypt_write_continue(struct work_struct *work)
+{
+ struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
+ struct crypt_config *cc = io->cc;
+ struct convert_context *ctx = &io->ctx;
+ int crypt_finished;
+ sector_t sector = io->sector;
+ blk_status_t r;
+
+ wait_for_completion(&ctx->restart);
+ reinit_completion(&ctx->restart);
+
+ r = crypt_convert(cc, &io->ctx, true, false);
+ if (r)
+ io->error = r;
+ crypt_finished = atomic_dec_and_test(&ctx->cc_pending);
+ if (!crypt_finished && kcryptd_crypt_write_inline(cc, ctx)) {
+ /* Wait for completion signaled by kcryptd_async_done() */
+ wait_for_completion(&ctx->restart);
+ crypt_finished = 1;
+ }
+
+ /* Encryption was already finished, submit io now */
+ if (crypt_finished) {
+ kcryptd_crypt_write_io_submit(io, 0);
+ io->sector = sector;
+ }
+
+ crypt_dec_pending(io);
+}
+
static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
{
struct crypt_config *cc = io->cc;
@@ -1973,7 +2065,17 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
crypt_inc_pending(io);
r = crypt_convert(cc, ctx,
- test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags));
+ test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags), true);
+ /*
+ * Crypto API backlogged the request, because its queue was full
+ * and we're in softirq context, so continue from a workqueue
+ * (TODO: is it actually possible to be in softirq in the write path?)
+ */
+ if (r == BLK_STS_DEV_RESOURCE) {
+ INIT_WORK(&io->work, kcryptd_crypt_write_continue);
+ queue_work(cc->crypt_queue, &io->work);
+ return;
+ }
if (r)
io->error = r;
crypt_finished = atomic_dec_and_test(&ctx->cc_pending);
@@ -1998,6 +2100,25 @@ static void kcryptd_crypt_read_done(struct dm_crypt_io *io)
crypt_dec_pending(io);
}
+static void kcryptd_crypt_read_continue(struct work_struct *work)
+{
+ struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
+ struct crypt_config *cc = io->cc;
+ blk_status_t r;
+
+ wait_for_completion(&io->ctx.restart);
+ reinit_completion(&io->ctx.restart);
+
+ r = crypt_convert(cc, &io->ctx, true, false);
+ if (r)
+ io->error = r;
+
+ if (atomic_dec_and_test(&io->ctx.cc_pending))
+ kcryptd_crypt_read_done(io);
+
+ crypt_dec_pending(io);
+}
+
static void kcryptd_crypt_read_convert(struct dm_crypt_io *io)
{
struct crypt_config *cc = io->cc;
@@ -2009,7 +2130,16 @@ static void kcryptd_crypt_read_convert(struct dm_crypt_io *io)
io->sector);
r = crypt_convert(cc, &io->ctx,
- test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags));
+ test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags), true);
+ /*
+ * Crypto API backlogged the request, because its queue was full
+ * and we're in softirq context, so continue from a workqueue
+ */
+ if (r == BLK_STS_DEV_RESOURCE) {
+ INIT_WORK(&io->work, kcryptd_crypt_read_continue);
+ queue_work(cc->crypt_queue, &io->work);
+ return;
+ }
if (r)
io->error = r;
@@ -2091,8 +2221,12 @@ static void kcryptd_queue_crypt(struct dm_crypt_io *io)
if ((bio_data_dir(io->base_bio) == READ && test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags)) ||
(bio_data_dir(io->base_bio) == WRITE && test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags))) {
- if (in_irq()) {
- /* Crypto API's "skcipher_walk_first() refuses to work in hard IRQ context */
+ /*
+ * in_irq(): Crypto API's skcipher_walk_first() refuses to work in hard IRQ context.
+ * irqs_disabled(): the kernel may run some IO completion from the idle thread, but
+ * it is being executed with irqs disabled.
+ */
+ if (in_irq() || irqs_disabled()) {
tasklet_init(&io->tasklet, kcryptd_crypt_tasklet, (unsigned long)&io->work);
tasklet_schedule(&io->tasklet);
return;
@@ -3166,12 +3300,11 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
}
if (test_bit(DM_CRYPT_SAME_CPU, &cc->flags))
- cc->crypt_queue = alloc_workqueue("kcryptd-%s", WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM,
+ cc->crypt_queue = alloc_workqueue("kcryptd/%s", WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM,
1, devname);
else
- cc->crypt_queue = alloc_workqueue("kcryptd-%s",
- WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM |
- WQ_UNBOUND | WQ_SYSFS,
+ cc->crypt_queue = alloc_workqueue("kcryptd/%s",
+ WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM | WQ_UNBOUND,
num_online_cpus(), devname);
if (!cc->crypt_queue) {
ti->error = "Couldn't create kcryptd queue";
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index 5a7a1b90e671..b64fede032dc 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -257,8 +257,9 @@ struct dm_integrity_c {
bool journal_uptodate;
bool just_formatted;
bool recalculate_flag;
- bool fix_padding;
bool discard;
+ bool fix_padding;
+ bool legacy_recalculate;
struct alg_spec internal_hash_alg;
struct alg_spec journal_crypt_alg;
@@ -386,6 +387,14 @@ static int dm_integrity_failed(struct dm_integrity_c *ic)
return READ_ONCE(ic->failed);
}
+static bool dm_integrity_disable_recalculate(struct dm_integrity_c *ic)
+{
+ if ((ic->internal_hash_alg.key || ic->journal_mac_alg.key) &&
+ !ic->legacy_recalculate)
+ return true;
+ return false;
+}
+
static commit_id_t dm_integrity_commit_id(struct dm_integrity_c *ic, unsigned i,
unsigned j, unsigned char seq)
{
@@ -1379,12 +1388,52 @@ thorough_test:
#undef MAY_BE_HASH
}
-static void dm_integrity_flush_buffers(struct dm_integrity_c *ic)
+struct flush_request {
+ struct dm_io_request io_req;
+ struct dm_io_region io_reg;
+ struct dm_integrity_c *ic;
+ struct completion comp;
+};
+
+static void flush_notify(unsigned long error, void *fr_)
+{
+ struct flush_request *fr = fr_;
+ if (unlikely(error != 0))
+ dm_integrity_io_error(fr->ic, "flusing disk cache", -EIO);
+ complete(&fr->comp);
+}
+
+static void dm_integrity_flush_buffers(struct dm_integrity_c *ic, bool flush_data)
{
int r;
+
+ struct flush_request fr;
+
+ if (!ic->meta_dev)
+ flush_data = false;
+ if (flush_data) {
+ fr.io_req.bi_op = REQ_OP_WRITE,
+ fr.io_req.bi_op_flags = REQ_PREFLUSH | REQ_SYNC,
+ fr.io_req.mem.type = DM_IO_KMEM,
+ fr.io_req.mem.ptr.addr = NULL,
+ fr.io_req.notify.fn = flush_notify,
+ fr.io_req.notify.context = &fr;
+ fr.io_req.client = dm_bufio_get_dm_io_client(ic->bufio),
+ fr.io_reg.bdev = ic->dev->bdev,
+ fr.io_reg.sector = 0,
+ fr.io_reg.count = 0,
+ fr.ic = ic;
+ init_completion(&fr.comp);
+ r = dm_io(&fr.io_req, 1, &fr.io_reg, NULL);
+ BUG_ON(r);
+ }
+
r = dm_bufio_write_dirty_buffers(ic->bufio);
if (unlikely(r))
dm_integrity_io_error(ic, "writing tags", r);
+
+ if (flush_data)
+ wait_for_completion(&fr.comp);
}
static void sleep_on_endio_wait(struct dm_integrity_c *ic)
@@ -2110,7 +2159,7 @@ offload_to_thread:
if (unlikely(dio->op == REQ_OP_DISCARD) && likely(ic->mode != 'D')) {
integrity_metadata(&dio->work);
- dm_integrity_flush_buffers(ic);
+ dm_integrity_flush_buffers(ic, false);
dio->in_flight = (atomic_t)ATOMIC_INIT(1);
dio->completion = NULL;
@@ -2195,7 +2244,7 @@ static void integrity_commit(struct work_struct *w)
flushes = bio_list_get(&ic->flush_bio_list);
if (unlikely(ic->mode != 'J')) {
spin_unlock_irq(&ic->endio_wait.lock);
- dm_integrity_flush_buffers(ic);
+ dm_integrity_flush_buffers(ic, true);
goto release_flush_bios;
}
@@ -2409,7 +2458,7 @@ skip_io:
complete_journal_op(&comp);
wait_for_completion_io(&comp.comp);
- dm_integrity_flush_buffers(ic);
+ dm_integrity_flush_buffers(ic, true);
}
static void integrity_writer(struct work_struct *w)
@@ -2451,7 +2500,7 @@ static void recalc_write_super(struct dm_integrity_c *ic)
{
int r;
- dm_integrity_flush_buffers(ic);
+ dm_integrity_flush_buffers(ic, false);
if (dm_integrity_failed(ic))
return;
@@ -2654,7 +2703,7 @@ static void bitmap_flush_work(struct work_struct *work)
unsigned long limit;
struct bio *bio;
- dm_integrity_flush_buffers(ic);
+ dm_integrity_flush_buffers(ic, false);
range.logical_sector = 0;
range.n_sectors = ic->provided_data_sectors;
@@ -2663,9 +2712,7 @@ static void bitmap_flush_work(struct work_struct *work)
add_new_range_and_wait(ic, &range);
spin_unlock_irq(&ic->endio_wait.lock);
- dm_integrity_flush_buffers(ic);
- if (ic->meta_dev)
- blkdev_issue_flush(ic->dev->bdev, GFP_NOIO);
+ dm_integrity_flush_buffers(ic, true);
limit = ic->provided_data_sectors;
if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) {
@@ -2934,11 +2981,11 @@ static void dm_integrity_postsuspend(struct dm_target *ti)
if (ic->meta_dev)
queue_work(ic->writer_wq, &ic->writer_work);
drain_workqueue(ic->writer_wq);
- dm_integrity_flush_buffers(ic);
+ dm_integrity_flush_buffers(ic, true);
}
if (ic->mode == 'B') {
- dm_integrity_flush_buffers(ic);
+ dm_integrity_flush_buffers(ic, true);
#if 1
/* set to 0 to test bitmap replay code */
init_journal(ic, 0, ic->journal_sections, 0);
@@ -3102,6 +3149,7 @@ static void dm_integrity_status(struct dm_target *ti, status_type_t type,
arg_count += !!ic->journal_crypt_alg.alg_string;
arg_count += !!ic->journal_mac_alg.alg_string;
arg_count += (ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING)) != 0;
+ arg_count += ic->legacy_recalculate;
DMEMIT("%s %llu %u %c %u", ic->dev->name, ic->start,
ic->tag_size, ic->mode, arg_count);
if (ic->meta_dev)
@@ -3125,6 +3173,8 @@ static void dm_integrity_status(struct dm_target *ti, status_type_t type,
}
if ((ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING)) != 0)
DMEMIT(" fix_padding");
+ if (ic->legacy_recalculate)
+ DMEMIT(" legacy_recalculate");
#define EMIT_ALG(a, n) \
do { \
@@ -3754,7 +3804,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
unsigned extra_args;
struct dm_arg_set as;
static const struct dm_arg _args[] = {
- {0, 9, "Invalid number of feature args"},
+ {0, 16, "Invalid number of feature args"},
};
unsigned journal_sectors, interleave_sectors, buffer_sectors, journal_watermark, sync_msec;
bool should_write_sb;
@@ -3902,6 +3952,8 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
ic->discard = true;
} else if (!strcmp(opt_string, "fix_padding")) {
ic->fix_padding = true;
+ } else if (!strcmp(opt_string, "legacy_recalculate")) {
+ ic->legacy_recalculate = true;
} else {
r = -EINVAL;
ti->error = "Invalid argument";
@@ -4197,6 +4249,20 @@ try_smaller_buffer:
r = -ENOMEM;
goto bad;
}
+ } else {
+ if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) {
+ ti->error = "Recalculate can only be specified with internal_hash";
+ r = -EINVAL;
+ goto bad;
+ }
+ }
+
+ if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING) &&
+ le64_to_cpu(ic->sb->recalc_sector) < ic->provided_data_sectors &&
+ dm_integrity_disable_recalculate(ic)) {
+ ti->error = "Recalculating with HMAC is disabled for security reasons - if you really need it, use the argument \"legacy_recalculate\"";
+ r = -EOPNOTSUPP;
+ goto bad;
}
ic->bufio = dm_bufio_client_create(ic->meta_dev ? ic->meta_dev->bdev : ic->dev->bdev,
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index 23c38777e8f6..cab12b2251ba 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -3729,10 +3729,10 @@ static void raid_io_hints(struct dm_target *ti, struct queue_limits *limits)
blk_limits_io_opt(limits, chunk_size_bytes * mddev_data_stripes(rs));
/*
- * RAID1 and RAID10 personalities require bio splitting,
- * RAID0/4/5/6 don't and process large discard bios properly.
+ * RAID0 and RAID10 personalities require bio splitting,
+ * RAID1/4/5/6 don't and process large discard bios properly.
*/
- if (rs_is_raid1(rs) || rs_is_raid10(rs)) {
+ if (rs_is_raid0(rs) || rs_is_raid10(rs)) {
limits->discard_granularity = chunk_size_bytes;
limits->max_discard_sectors = rs->md.chunk_sectors;
}
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index 4668b2cd98f4..11890db71f3f 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -141,6 +141,11 @@ struct dm_snapshot {
* for them to be committed.
*/
struct bio_list bios_queued_during_merge;
+
+ /*
+ * Flush data after merge.
+ */
+ struct bio flush_bio;
};
/*
@@ -1121,6 +1126,17 @@ shut:
static void error_bios(struct bio *bio);
+static int flush_data(struct dm_snapshot *s)
+{
+ struct bio *flush_bio = &s->flush_bio;
+
+ bio_reset(flush_bio);
+ bio_set_dev(flush_bio, s->origin->bdev);
+ flush_bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
+
+ return submit_bio_wait(flush_bio);
+}
+
static void merge_callback(int read_err, unsigned long write_err, void *context)
{
struct dm_snapshot *s = context;
@@ -1134,6 +1150,11 @@ static void merge_callback(int read_err, unsigned long write_err, void *context)
goto shut;
}
+ if (flush_data(s) < 0) {
+ DMERR("Flush after merge failed: shutting down merge");
+ goto shut;
+ }
+
if (s->store->type->commit_merge(s->store,
s->num_merging_chunks) < 0) {
DMERR("Write error in exception store: shutting down merge");
@@ -1318,6 +1339,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
s->first_merging_chunk = 0;
s->num_merging_chunks = 0;
bio_list_init(&s->bios_queued_during_merge);
+ bio_init(&s->flush_bio, NULL, 0);
/* Allocate hash table for COW data */
if (init_hash_tables(s)) {
@@ -1504,6 +1526,8 @@ static void snapshot_dtr(struct dm_target *ti)
dm_exception_store_destroy(s->store);
+ bio_uninit(&s->flush_bio);
+
dm_put_device(ti, s->cow);
dm_put_device(ti, s->origin);
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 188f41287f18..4acf2342f7ad 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -363,14 +363,23 @@ int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode,
{
int r;
dev_t dev;
+ unsigned int major, minor;
+ char dummy;
struct dm_dev_internal *dd;
struct dm_table *t = ti->table;
BUG_ON(!t);
- dev = dm_get_dev_t(path);
- if (!dev)
- return -ENODEV;
+ if (sscanf(path, "%u:%u%c", &major, &minor, &dummy) == 2) {
+ /* Extract the major/minor numbers */
+ dev = MKDEV(major, minor);
+ if (MAJOR(dev) != major || MINOR(dev) != minor)
+ return -EOVERFLOW;
+ } else {
+ dev = dm_get_dev_t(path);
+ if (!dev)
+ return -ENODEV;
+ }
dd = find_device(&t->devices, dev);
if (!dd) {
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index b3c3c8b4cb42..7bac564f3faa 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -562,7 +562,7 @@ static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode,
* subset of the parent bdev; require extra privileges.
*/
if (!capable(CAP_SYS_RAWIO)) {
- DMWARN_LIMIT(
+ DMDEBUG_LIMIT(
"%s: sending ioctl %x to DM device without required privilege.",
current->comm, cmd);
r = -ENOIOCTLCMD;
diff --git a/drivers/md/md.c b/drivers/md/md.c
index ca409428b4fc..04384452a7ab 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -639,8 +639,10 @@ static void md_submit_flush_data(struct work_struct *ws)
* could wait for this and below md_handle_request could wait for those
* bios because of suspend check
*/
+ spin_lock_irq(&mddev->lock);
mddev->prev_flush_start = mddev->start_flush;
mddev->flush_bio = NULL;
+ spin_unlock_irq(&mddev->lock);
wake_up(&mddev->sb_wait);
if (bio->bi_iter.bi_size == 0) {
diff --git a/drivers/misc/cardreader/rtsx_pcr.c b/drivers/misc/cardreader/rtsx_pcr.c
index 2aa6648fa41f..5a491d2cd1ae 100644
--- a/drivers/misc/cardreader/rtsx_pcr.c
+++ b/drivers/misc/cardreader/rtsx_pcr.c
@@ -1512,6 +1512,7 @@ static int rtsx_pci_probe(struct pci_dev *pcidev,
struct pcr_handle *handle;
u32 base, len;
int ret, i, bar = 0;
+ u8 val;
dev_dbg(&(pcidev->dev),
": Realtek PCI-E Card Reader found at %s [%04x:%04x] (rev %x)\n",
@@ -1577,7 +1578,11 @@ static int rtsx_pci_probe(struct pci_dev *pcidev,
pcr->host_cmds_addr = pcr->rtsx_resv_buf_addr;
pcr->host_sg_tbl_ptr = pcr->rtsx_resv_buf + HOST_CMDS_BUF_LEN;
pcr->host_sg_tbl_addr = pcr->rtsx_resv_buf_addr + HOST_CMDS_BUF_LEN;
-
+ rtsx_pci_read_register(pcr, ASPM_FORCE_CTL, &val);
+ if (val & FORCE_ASPM_CTL0 && val & FORCE_ASPM_CTL1)
+ pcr->aspm_enabled = false;
+ else
+ pcr->aspm_enabled = true;
pcr->card_inserted = 0;
pcr->card_removed = 0;
INIT_DELAYED_WORK(&pcr->carddet_work, rtsx_pci_card_detect);
diff --git a/drivers/misc/habanalabs/common/command_submission.c b/drivers/misc/habanalabs/common/command_submission.c
index beb482310a58..b2b3d2b0f808 100644
--- a/drivers/misc/habanalabs/common/command_submission.c
+++ b/drivers/misc/habanalabs/common/command_submission.c
@@ -472,8 +472,11 @@ static int allocate_cs(struct hl_device *hdev, struct hl_ctx *ctx,
cntr = &hdev->aggregated_cs_counters;
cs = kzalloc(sizeof(*cs), GFP_ATOMIC);
- if (!cs)
+ if (!cs) {
+ atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
+ atomic64_inc(&cntr->out_of_mem_drop_cnt);
return -ENOMEM;
+ }
cs->ctx = ctx;
cs->submitted = false;
@@ -486,6 +489,8 @@ static int allocate_cs(struct hl_device *hdev, struct hl_ctx *ctx,
cs_cmpl = kmalloc(sizeof(*cs_cmpl), GFP_ATOMIC);
if (!cs_cmpl) {
+ atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
+ atomic64_inc(&cntr->out_of_mem_drop_cnt);
rc = -ENOMEM;
goto free_cs;
}
@@ -513,6 +518,8 @@ static int allocate_cs(struct hl_device *hdev, struct hl_ctx *ctx,
cs->jobs_in_queue_cnt = kcalloc(hdev->asic_prop.max_queues,
sizeof(*cs->jobs_in_queue_cnt), GFP_ATOMIC);
if (!cs->jobs_in_queue_cnt) {
+ atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
+ atomic64_inc(&cntr->out_of_mem_drop_cnt);
rc = -ENOMEM;
goto free_fence;
}
@@ -562,7 +569,7 @@ void hl_cs_rollback_all(struct hl_device *hdev)
for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)
flush_workqueue(hdev->cq_wq[i]);
- /* Make sure we don't have leftovers in the H/W queues mirror list */
+ /* Make sure we don't have leftovers in the CS mirror list */
list_for_each_entry_safe(cs, tmp, &hdev->cs_mirror_list, mirror_node) {
cs_get(cs);
cs->aborted = true;
@@ -764,11 +771,14 @@ static int hl_cs_sanity_checks(struct hl_fpriv *hpriv, union hl_cs_args *args)
static int hl_cs_copy_chunk_array(struct hl_device *hdev,
struct hl_cs_chunk **cs_chunk_array,
- void __user *chunks, u32 num_chunks)
+ void __user *chunks, u32 num_chunks,
+ struct hl_ctx *ctx)
{
u32 size_to_copy;
if (num_chunks > HL_MAX_JOBS_PER_CS) {
+ atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
+ atomic64_inc(&hdev->aggregated_cs_counters.validation_drop_cnt);
dev_err(hdev->dev,
"Number of chunks can NOT be larger than %d\n",
HL_MAX_JOBS_PER_CS);
@@ -777,11 +787,16 @@ static int hl_cs_copy_chunk_array(struct hl_device *hdev,
*cs_chunk_array = kmalloc_array(num_chunks, sizeof(**cs_chunk_array),
GFP_ATOMIC);
- if (!*cs_chunk_array)
+ if (!*cs_chunk_array) {
+ atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
+ atomic64_inc(&hdev->aggregated_cs_counters.out_of_mem_drop_cnt);
return -ENOMEM;
+ }
size_to_copy = num_chunks * sizeof(struct hl_cs_chunk);
if (copy_from_user(*cs_chunk_array, chunks, size_to_copy)) {
+ atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
+ atomic64_inc(&hdev->aggregated_cs_counters.validation_drop_cnt);
dev_err(hdev->dev, "Failed to copy cs chunk array from user\n");
kfree(*cs_chunk_array);
return -EFAULT;
@@ -797,6 +812,7 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks,
struct hl_device *hdev = hpriv->hdev;
struct hl_cs_chunk *cs_chunk_array;
struct hl_cs_counters_atomic *cntr;
+ struct hl_ctx *ctx = hpriv->ctx;
struct hl_cs_job *job;
struct hl_cs *cs;
struct hl_cb *cb;
@@ -805,7 +821,8 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks,
cntr = &hdev->aggregated_cs_counters;
*cs_seq = ULLONG_MAX;
- rc = hl_cs_copy_chunk_array(hdev, &cs_chunk_array, chunks, num_chunks);
+ rc = hl_cs_copy_chunk_array(hdev, &cs_chunk_array, chunks, num_chunks,
+ hpriv->ctx);
if (rc)
goto out;
@@ -832,8 +849,8 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks,
rc = validate_queue_index(hdev, chunk, &queue_type,
&is_kernel_allocated_cb);
if (rc) {
- atomic64_inc(&hpriv->ctx->cs_counters.parsing_drop_cnt);
- atomic64_inc(&cntr->parsing_drop_cnt);
+ atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
+ atomic64_inc(&cntr->validation_drop_cnt);
goto free_cs_object;
}
@@ -841,8 +858,8 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks,
cb = get_cb_from_cs_chunk(hdev, &hpriv->cb_mgr, chunk);
if (!cb) {
atomic64_inc(
- &hpriv->ctx->cs_counters.parsing_drop_cnt);
- atomic64_inc(&cntr->parsing_drop_cnt);
+ &ctx->cs_counters.validation_drop_cnt);
+ atomic64_inc(&cntr->validation_drop_cnt);
rc = -EINVAL;
goto free_cs_object;
}
@@ -856,8 +873,7 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks,
job = hl_cs_allocate_job(hdev, queue_type,
is_kernel_allocated_cb);
if (!job) {
- atomic64_inc(
- &hpriv->ctx->cs_counters.out_of_mem_drop_cnt);
+ atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
atomic64_inc(&cntr->out_of_mem_drop_cnt);
dev_err(hdev->dev, "Failed to allocate a new job\n");
rc = -ENOMEM;
@@ -891,7 +907,7 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks,
rc = cs_parser(hpriv, job);
if (rc) {
- atomic64_inc(&hpriv->ctx->cs_counters.parsing_drop_cnt);
+ atomic64_inc(&ctx->cs_counters.parsing_drop_cnt);
atomic64_inc(&cntr->parsing_drop_cnt);
dev_err(hdev->dev,
"Failed to parse JOB %d.%llu.%d, err %d, rejecting the CS\n",
@@ -901,8 +917,8 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks,
}
if (int_queues_only) {
- atomic64_inc(&hpriv->ctx->cs_counters.parsing_drop_cnt);
- atomic64_inc(&cntr->parsing_drop_cnt);
+ atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
+ atomic64_inc(&cntr->validation_drop_cnt);
dev_err(hdev->dev,
"Reject CS %d.%llu because only internal queues jobs are present\n",
cs->ctx->asid, cs->sequence);
@@ -1042,7 +1058,7 @@ out:
}
static int cs_ioctl_extract_signal_seq(struct hl_device *hdev,
- struct hl_cs_chunk *chunk, u64 *signal_seq)
+ struct hl_cs_chunk *chunk, u64 *signal_seq, struct hl_ctx *ctx)
{
u64 *signal_seq_arr = NULL;
u32 size_to_copy, signal_seq_arr_len;
@@ -1052,6 +1068,8 @@ static int cs_ioctl_extract_signal_seq(struct hl_device *hdev,
/* currently only one signal seq is supported */
if (signal_seq_arr_len != 1) {
+ atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
+ atomic64_inc(&hdev->aggregated_cs_counters.validation_drop_cnt);
dev_err(hdev->dev,
"Wait for signal CS supports only one signal CS seq\n");
return -EINVAL;
@@ -1060,13 +1078,18 @@ static int cs_ioctl_extract_signal_seq(struct hl_device *hdev,
signal_seq_arr = kmalloc_array(signal_seq_arr_len,
sizeof(*signal_seq_arr),
GFP_ATOMIC);
- if (!signal_seq_arr)
+ if (!signal_seq_arr) {
+ atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
+ atomic64_inc(&hdev->aggregated_cs_counters.out_of_mem_drop_cnt);
return -ENOMEM;
+ }
size_to_copy = chunk->num_signal_seq_arr * sizeof(*signal_seq_arr);
if (copy_from_user(signal_seq_arr,
u64_to_user_ptr(chunk->signal_seq_arr),
size_to_copy)) {
+ atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
+ atomic64_inc(&hdev->aggregated_cs_counters.validation_drop_cnt);
dev_err(hdev->dev,
"Failed to copy signal seq array from user\n");
rc = -EFAULT;
@@ -1153,6 +1176,7 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
struct hl_device *hdev = hpriv->hdev;
struct hl_cs_compl *sig_waitcs_cmpl;
u32 q_idx, collective_engine_id = 0;
+ struct hl_cs_counters_atomic *cntr;
struct hl_fence *sig_fence = NULL;
struct hl_ctx *ctx = hpriv->ctx;
enum hl_queue_type q_type;
@@ -1160,9 +1184,11 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
u64 signal_seq;
int rc;
+ cntr = &hdev->aggregated_cs_counters;
*cs_seq = ULLONG_MAX;
- rc = hl_cs_copy_chunk_array(hdev, &cs_chunk_array, chunks, num_chunks);
+ rc = hl_cs_copy_chunk_array(hdev, &cs_chunk_array, chunks, num_chunks,
+ ctx);
if (rc)
goto out;
@@ -1170,6 +1196,8 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
chunk = &cs_chunk_array[0];
if (chunk->queue_index >= hdev->asic_prop.max_queues) {
+ atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
+ atomic64_inc(&cntr->validation_drop_cnt);
dev_err(hdev->dev, "Queue index %d is invalid\n",
chunk->queue_index);
rc = -EINVAL;
@@ -1181,6 +1209,8 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
q_type = hw_queue_prop->type;
if (!hw_queue_prop->supports_sync_stream) {
+ atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
+ atomic64_inc(&cntr->validation_drop_cnt);
dev_err(hdev->dev,
"Queue index %d does not support sync stream operations\n",
q_idx);
@@ -1190,6 +1220,8 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
if (cs_type == CS_TYPE_COLLECTIVE_WAIT) {
if (!(hw_queue_prop->collective_mode == HL_COLLECTIVE_MASTER)) {
+ atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
+ atomic64_inc(&cntr->validation_drop_cnt);
dev_err(hdev->dev,
"Queue index %d is invalid\n", q_idx);
rc = -EINVAL;
@@ -1200,12 +1232,14 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
}
if (cs_type == CS_TYPE_WAIT || cs_type == CS_TYPE_COLLECTIVE_WAIT) {
- rc = cs_ioctl_extract_signal_seq(hdev, chunk, &signal_seq);
+ rc = cs_ioctl_extract_signal_seq(hdev, chunk, &signal_seq, ctx);
if (rc)
goto free_cs_chunk_array;
sig_fence = hl_ctx_get_fence(ctx, signal_seq);
if (IS_ERR(sig_fence)) {
+ atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
+ atomic64_inc(&cntr->validation_drop_cnt);
dev_err(hdev->dev,
"Failed to get signal CS with seq 0x%llx\n",
signal_seq);
@@ -1223,6 +1257,8 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
container_of(sig_fence, struct hl_cs_compl, base_fence);
if (sig_waitcs_cmpl->type != CS_TYPE_SIGNAL) {
+ atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
+ atomic64_inc(&cntr->validation_drop_cnt);
dev_err(hdev->dev,
"CS seq 0x%llx is not of a signal CS\n",
signal_seq);
@@ -1270,8 +1306,11 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
else if (cs_type == CS_TYPE_COLLECTIVE_WAIT)
rc = hdev->asic_funcs->collective_wait_create_jobs(hdev, ctx,
cs, q_idx, collective_engine_id);
- else
+ else {
+ atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
+ atomic64_inc(&cntr->validation_drop_cnt);
rc = -EINVAL;
+ }
if (rc)
goto free_cs_object;
diff --git a/drivers/misc/habanalabs/common/device.c b/drivers/misc/habanalabs/common/device.c
index 5871162a8442..69d04eca767f 100644
--- a/drivers/misc/habanalabs/common/device.c
+++ b/drivers/misc/habanalabs/common/device.c
@@ -17,12 +17,12 @@ enum hl_device_status hl_device_status(struct hl_device *hdev)
{
enum hl_device_status status;
- if (hdev->disabled)
- status = HL_DEVICE_STATUS_MALFUNCTION;
- else if (atomic_read(&hdev->in_reset))
+ if (atomic_read(&hdev->in_reset))
status = HL_DEVICE_STATUS_IN_RESET;
else if (hdev->needs_reset)
status = HL_DEVICE_STATUS_NEEDS_RESET;
+ else if (hdev->disabled)
+ status = HL_DEVICE_STATUS_MALFUNCTION;
else
status = HL_DEVICE_STATUS_OPERATIONAL;
@@ -1037,7 +1037,7 @@ kill_processes:
if (hard_reset) {
/* Release kernel context */
- if (hl_ctx_put(hdev->kernel_ctx) == 1)
+ if (hdev->kernel_ctx && hl_ctx_put(hdev->kernel_ctx) == 1)
hdev->kernel_ctx = NULL;
hl_vm_fini(hdev);
hl_mmu_fini(hdev);
@@ -1092,6 +1092,7 @@ kill_processes:
GFP_KERNEL);
if (!hdev->kernel_ctx) {
rc = -ENOMEM;
+ hl_mmu_fini(hdev);
goto out_err;
}
@@ -1103,6 +1104,7 @@ kill_processes:
"failed to init kernel ctx in hard reset\n");
kfree(hdev->kernel_ctx);
hdev->kernel_ctx = NULL;
+ hl_mmu_fini(hdev);
goto out_err;
}
}
@@ -1485,6 +1487,15 @@ void hl_device_fini(struct hl_device *hdev)
}
}
+ /* Disable PCI access from device F/W so it won't send us additional
+ * interrupts. We disable MSI/MSI-X at the halt_engines function and we
+ * can't have the F/W sending us interrupts after that. We need to
+ * disable the access here because if the device is marked disable, the
+ * message won't be send. Also, in case of heartbeat, the device CPU is
+ * marked as disable so this message won't be sent
+ */
+ hl_fw_send_pci_access_msg(hdev, CPUCP_PACKET_DISABLE_PCI_ACCESS);
+
/* Mark device as disabled */
hdev->disabled = true;
diff --git a/drivers/misc/habanalabs/common/firmware_if.c b/drivers/misc/habanalabs/common/firmware_if.c
index 0e1c629e9800..c9a12980218a 100644
--- a/drivers/misc/habanalabs/common/firmware_if.c
+++ b/drivers/misc/habanalabs/common/firmware_if.c
@@ -402,6 +402,10 @@ int hl_fw_cpucp_pci_counters_get(struct hl_device *hdev,
}
counters->rx_throughput = result;
+ memset(&pkt, 0, sizeof(pkt));
+ pkt.ctl = cpu_to_le32(CPUCP_PACKET_PCIE_THROUGHPUT_GET <<
+ CPUCP_PKT_CTL_OPCODE_SHIFT);
+
/* Fetch PCI tx counter */
pkt.index = cpu_to_le32(cpucp_pcie_throughput_tx);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
@@ -414,6 +418,7 @@ int hl_fw_cpucp_pci_counters_get(struct hl_device *hdev,
counters->tx_throughput = result;
/* Fetch PCI replay counter */
+ memset(&pkt, 0, sizeof(pkt));
pkt.ctl = cpu_to_le32(CPUCP_PACKET_PCIE_REPLAY_CNT_GET <<
CPUCP_PKT_CTL_OPCODE_SHIFT);
@@ -627,25 +632,38 @@ int hl_fw_read_preboot_status(struct hl_device *hdev, u32 cpu_boot_status_reg,
security_status = RREG32(cpu_security_boot_status_reg);
/* We read security status multiple times during boot:
- * 1. preboot - we check if fw security feature is supported
- * 2. boot cpu - we get boot cpu security status
- * 3. FW application - we get FW application security status
+ * 1. preboot - a. Check whether the security status bits are valid
+ * b. Check whether fw security is enabled
+ * c. Check whether hard reset is done by preboot
+ * 2. boot cpu - a. Fetch boot cpu security status
+ * b. Check whether hard reset is done by boot cpu
+ * 3. FW application - a. Fetch fw application security status
+ * b. Check whether hard reset is done by fw app
*
* Preboot:
* Check security status bit (CPU_BOOT_DEV_STS0_ENABLED), if it is set
* check security enabled bit (CPU_BOOT_DEV_STS0_SECURITY_EN)
*/
if (security_status & CPU_BOOT_DEV_STS0_ENABLED) {
- hdev->asic_prop.fw_security_status_valid = 1;
- prop->fw_security_disabled =
- !(security_status & CPU_BOOT_DEV_STS0_SECURITY_EN);
+ prop->fw_security_status_valid = 1;
+
+ if (security_status & CPU_BOOT_DEV_STS0_SECURITY_EN)
+ prop->fw_security_disabled = false;
+ else
+ prop->fw_security_disabled = true;
+
+ if (security_status & CPU_BOOT_DEV_STS0_FW_HARD_RST_EN)
+ prop->hard_reset_done_by_fw = true;
} else {
- hdev->asic_prop.fw_security_status_valid = 0;
+ prop->fw_security_status_valid = 0;
prop->fw_security_disabled = true;
}
+ dev_dbg(hdev->dev, "Firmware preboot hard-reset is %s\n",
+ prop->hard_reset_done_by_fw ? "enabled" : "disabled");
+
dev_info(hdev->dev, "firmware-level security is %s\n",
- prop->fw_security_disabled ? "disabled" : "enabled");
+ prop->fw_security_disabled ? "disabled" : "enabled");
return 0;
}
@@ -655,6 +673,7 @@ int hl_fw_init_cpu(struct hl_device *hdev, u32 cpu_boot_status_reg,
u32 cpu_security_boot_status_reg, u32 boot_err0_reg,
bool skip_bmc, u32 cpu_timeout, u32 boot_fit_timeout)
{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
u32 status;
int rc;
@@ -723,11 +742,22 @@ int hl_fw_init_cpu(struct hl_device *hdev, u32 cpu_boot_status_reg,
/* Read U-Boot version now in case we will later fail */
hdev->asic_funcs->read_device_fw_version(hdev, FW_COMP_UBOOT);
+ /* Clear reset status since we need to read it again from boot CPU */
+ prop->hard_reset_done_by_fw = false;
+
/* Read boot_cpu security bits */
- if (hdev->asic_prop.fw_security_status_valid)
- hdev->asic_prop.fw_boot_cpu_security_map =
+ if (prop->fw_security_status_valid) {
+ prop->fw_boot_cpu_security_map =
RREG32(cpu_security_boot_status_reg);
+ if (prop->fw_boot_cpu_security_map &
+ CPU_BOOT_DEV_STS0_FW_HARD_RST_EN)
+ prop->hard_reset_done_by_fw = true;
+ }
+
+ dev_dbg(hdev->dev, "Firmware boot CPU hard-reset is %s\n",
+ prop->hard_reset_done_by_fw ? "enabled" : "disabled");
+
if (rc) {
detect_cpu_boot_status(hdev, status);
rc = -EIO;
@@ -796,18 +826,21 @@ int hl_fw_init_cpu(struct hl_device *hdev, u32 cpu_boot_status_reg,
goto out;
}
+ /* Clear reset status since we need to read again from app */
+ prop->hard_reset_done_by_fw = false;
+
/* Read FW application security bits */
- if (hdev->asic_prop.fw_security_status_valid) {
- hdev->asic_prop.fw_app_security_map =
+ if (prop->fw_security_status_valid) {
+ prop->fw_app_security_map =
RREG32(cpu_security_boot_status_reg);
- if (hdev->asic_prop.fw_app_security_map &
+ if (prop->fw_app_security_map &
CPU_BOOT_DEV_STS0_FW_HARD_RST_EN)
- hdev->asic_prop.hard_reset_done_by_fw = true;
+ prop->hard_reset_done_by_fw = true;
}
- dev_dbg(hdev->dev, "Firmware hard-reset is %s\n",
- hdev->asic_prop.hard_reset_done_by_fw ? "enabled" : "disabled");
+ dev_dbg(hdev->dev, "Firmware application CPU hard-reset is %s\n",
+ prop->hard_reset_done_by_fw ? "enabled" : "disabled");
dev_info(hdev->dev, "Successfully loaded firmware to device\n");
diff --git a/drivers/misc/habanalabs/common/habanalabs.h b/drivers/misc/habanalabs/common/habanalabs.h
index 571eda6ef5ab..60e16dc4bcac 100644
--- a/drivers/misc/habanalabs/common/habanalabs.h
+++ b/drivers/misc/habanalabs/common/habanalabs.h
@@ -944,7 +944,7 @@ struct hl_asic_funcs {
u32 (*get_signal_cb_size)(struct hl_device *hdev);
u32 (*get_wait_cb_size)(struct hl_device *hdev);
u32 (*gen_signal_cb)(struct hl_device *hdev, void *data, u16 sob_id,
- u32 size);
+ u32 size, bool eb);
u32 (*gen_wait_cb)(struct hl_device *hdev,
struct hl_gen_wait_properties *prop);
void (*reset_sob)(struct hl_device *hdev, void *data);
@@ -1000,6 +1000,7 @@ struct hl_va_range {
* @queue_full_drop_cnt: dropped due to queue full
* @device_in_reset_drop_cnt: dropped due to device in reset
* @max_cs_in_flight_drop_cnt: dropped due to maximum CS in-flight
+ * @validation_drop_cnt: dropped due to error in validation
*/
struct hl_cs_counters_atomic {
atomic64_t out_of_mem_drop_cnt;
@@ -1007,6 +1008,7 @@ struct hl_cs_counters_atomic {
atomic64_t queue_full_drop_cnt;
atomic64_t device_in_reset_drop_cnt;
atomic64_t max_cs_in_flight_drop_cnt;
+ atomic64_t validation_drop_cnt;
};
/**
@@ -2180,6 +2182,7 @@ void hl_mmu_v1_set_funcs(struct hl_device *hdev, struct hl_mmu_funcs *mmu);
int hl_mmu_va_to_pa(struct hl_ctx *ctx, u64 virt_addr, u64 *phys_addr);
int hl_mmu_get_tlb_info(struct hl_ctx *ctx, u64 virt_addr,
struct hl_mmu_hop_info *hops);
+bool hl_is_dram_va(struct hl_device *hdev, u64 virt_addr);
int hl_fw_load_fw_to_device(struct hl_device *hdev, const char *fw_name,
void __iomem *dst, u32 src_offset, u32 size);
diff --git a/drivers/misc/habanalabs/common/habanalabs_drv.c b/drivers/misc/habanalabs/common/habanalabs_drv.c
index 6bbb6bca6860..032d114f01ea 100644
--- a/drivers/misc/habanalabs/common/habanalabs_drv.c
+++ b/drivers/misc/habanalabs/common/habanalabs_drv.c
@@ -544,6 +544,7 @@ static struct pci_driver hl_pci_driver = {
.id_table = ids,
.probe = hl_pci_probe,
.remove = hl_pci_remove,
+ .shutdown = hl_pci_remove,
.driver.pm = &hl_pm_ops,
.err_handler = &hl_pci_err_handler,
};
diff --git a/drivers/misc/habanalabs/common/habanalabs_ioctl.c b/drivers/misc/habanalabs/common/habanalabs_ioctl.c
index 32e6af1db4e3..d25892d61ec9 100644
--- a/drivers/misc/habanalabs/common/habanalabs_ioctl.c
+++ b/drivers/misc/habanalabs/common/habanalabs_ioctl.c
@@ -133,6 +133,8 @@ static int hw_idle(struct hl_device *hdev, struct hl_info_args *args)
hw_idle.is_idle = hdev->asic_funcs->is_device_idle(hdev,
&hw_idle.busy_engines_mask_ext, NULL);
+ hw_idle.busy_engines_mask =
+ lower_32_bits(hw_idle.busy_engines_mask_ext);
return copy_to_user(out, &hw_idle,
min((size_t) max_size, sizeof(hw_idle))) ? -EFAULT : 0;
@@ -335,6 +337,8 @@ static int cs_counters_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
atomic64_read(&cntr->device_in_reset_drop_cnt);
cs_counters.total_max_cs_in_flight_drop_cnt =
atomic64_read(&cntr->max_cs_in_flight_drop_cnt);
+ cs_counters.total_validation_drop_cnt =
+ atomic64_read(&cntr->validation_drop_cnt);
if (hpriv->ctx) {
cs_counters.ctx_out_of_mem_drop_cnt =
@@ -352,6 +356,9 @@ static int cs_counters_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
cs_counters.ctx_max_cs_in_flight_drop_cnt =
atomic64_read(
&hpriv->ctx->cs_counters.max_cs_in_flight_drop_cnt);
+ cs_counters.ctx_validation_drop_cnt =
+ atomic64_read(
+ &hpriv->ctx->cs_counters.validation_drop_cnt);
}
return copy_to_user(out, &cs_counters,
@@ -406,7 +413,7 @@ static int total_energy_consumption_info(struct hl_fpriv *hpriv,
static int pll_frequency_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
{
struct hl_device *hdev = hpriv->hdev;
- struct hl_pll_frequency_info freq_info = {0};
+ struct hl_pll_frequency_info freq_info = { {0} };
u32 max_size = args->return_size;
void __user *out = (void __user *) (uintptr_t) args->return_pointer;
int rc;
diff --git a/drivers/misc/habanalabs/common/hw_queue.c b/drivers/misc/habanalabs/common/hw_queue.c
index 7caf868d1585..76217258780a 100644
--- a/drivers/misc/habanalabs/common/hw_queue.c
+++ b/drivers/misc/habanalabs/common/hw_queue.c
@@ -418,8 +418,11 @@ static void init_signal_cs(struct hl_device *hdev,
"generate signal CB, sob_id: %d, sob val: 0x%x, q_idx: %d\n",
cs_cmpl->hw_sob->sob_id, cs_cmpl->sob_val, q_idx);
+ /* we set an EB since we must make sure all oeprations are done
+ * when sending the signal
+ */
hdev->asic_funcs->gen_signal_cb(hdev, job->patched_cb,
- cs_cmpl->hw_sob->sob_id, 0);
+ cs_cmpl->hw_sob->sob_id, 0, true);
kref_get(&hw_sob->kref);
diff --git a/drivers/misc/habanalabs/common/memory.c b/drivers/misc/habanalabs/common/memory.c
index cbe9da4e0211..5d4fbdcaefe3 100644
--- a/drivers/misc/habanalabs/common/memory.c
+++ b/drivers/misc/habanalabs/common/memory.c
@@ -886,8 +886,10 @@ static void unmap_phys_pg_pack(struct hl_ctx *ctx, u64 vaddr,
{
struct hl_device *hdev = ctx->hdev;
u64 next_vaddr, i;
+ bool is_host_addr;
u32 page_size;
+ is_host_addr = !hl_is_dram_va(hdev, vaddr);
page_size = phys_pg_pack->page_size;
next_vaddr = vaddr;
@@ -900,9 +902,13 @@ static void unmap_phys_pg_pack(struct hl_ctx *ctx, u64 vaddr,
/*
* unmapping on Palladium can be really long, so avoid a CPU
* soft lockup bug by sleeping a little between unmapping pages
+ *
+ * In addition, when unmapping host memory we pass through
+ * the Linux kernel to unpin the pages and that takes a long
+ * time. Therefore, sleep every 32K pages to avoid soft lockup
*/
- if (hdev->pldm)
- usleep_range(500, 1000);
+ if (hdev->pldm || (is_host_addr && (i & 0x7FFF) == 0))
+ usleep_range(50, 200);
}
}
diff --git a/drivers/misc/habanalabs/common/mmu.c b/drivers/misc/habanalabs/common/mmu.c
index 33ae953d3a36..28a4638741d8 100644
--- a/drivers/misc/habanalabs/common/mmu.c
+++ b/drivers/misc/habanalabs/common/mmu.c
@@ -9,7 +9,7 @@
#include "habanalabs.h"
-static bool is_dram_va(struct hl_device *hdev, u64 virt_addr)
+bool hl_is_dram_va(struct hl_device *hdev, u64 virt_addr)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
@@ -156,7 +156,7 @@ int hl_mmu_unmap_page(struct hl_ctx *ctx, u64 virt_addr, u32 page_size,
if (!hdev->mmu_enable)
return 0;
- is_dram_addr = is_dram_va(hdev, virt_addr);
+ is_dram_addr = hl_is_dram_va(hdev, virt_addr);
if (is_dram_addr)
mmu_prop = &prop->dmmu;
@@ -236,7 +236,7 @@ int hl_mmu_map_page(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr,
if (!hdev->mmu_enable)
return 0;
- is_dram_addr = is_dram_va(hdev, virt_addr);
+ is_dram_addr = hl_is_dram_va(hdev, virt_addr);
if (is_dram_addr)
mmu_prop = &prop->dmmu;
diff --git a/drivers/misc/habanalabs/common/mmu_v1.c b/drivers/misc/habanalabs/common/mmu_v1.c
index 2ce6ea89d4fa..06d8a44dd5d4 100644
--- a/drivers/misc/habanalabs/common/mmu_v1.c
+++ b/drivers/misc/habanalabs/common/mmu_v1.c
@@ -467,8 +467,16 @@ static void hl_mmu_v1_fini(struct hl_device *hdev)
{
/* MMU H/W fini was already done in device hw_fini() */
- kvfree(hdev->mmu_priv.dr.mmu_shadow_hop0);
- gen_pool_destroy(hdev->mmu_priv.dr.mmu_pgt_pool);
+ if (!ZERO_OR_NULL_PTR(hdev->mmu_priv.hr.mmu_shadow_hop0)) {
+ kvfree(hdev->mmu_priv.dr.mmu_shadow_hop0);
+ gen_pool_destroy(hdev->mmu_priv.dr.mmu_pgt_pool);
+ }
+
+ /* Make sure that if we arrive here again without init was called we
+ * won't cause kernel panic. This can happen for example if we fail
+ * during hard reset code at certain points
+ */
+ hdev->mmu_priv.dr.mmu_shadow_hop0 = NULL;
}
/**
diff --git a/drivers/misc/habanalabs/common/pci.c b/drivers/misc/habanalabs/common/pci.c
index 923b2606e29f..b4725e6101f6 100644
--- a/drivers/misc/habanalabs/common/pci.c
+++ b/drivers/misc/habanalabs/common/pci.c
@@ -130,10 +130,8 @@ static int hl_pci_elbi_write(struct hl_device *hdev, u64 addr, u32 data)
if ((val & PCI_CONFIG_ELBI_STS_MASK) == PCI_CONFIG_ELBI_STS_DONE)
return 0;
- if (val & PCI_CONFIG_ELBI_STS_ERR) {
- dev_err(hdev->dev, "Error writing to ELBI\n");
+ if (val & PCI_CONFIG_ELBI_STS_ERR)
return -EIO;
- }
if (!(val & PCI_CONFIG_ELBI_STS_MASK)) {
dev_err(hdev->dev, "ELBI write didn't finish in time\n");
@@ -160,8 +158,12 @@ int hl_pci_iatu_write(struct hl_device *hdev, u32 addr, u32 data)
dbi_offset = addr & 0xFFF;
- rc = hl_pci_elbi_write(hdev, prop->pcie_aux_dbi_reg_addr, 0x00300000);
- rc |= hl_pci_elbi_write(hdev, prop->pcie_dbi_base_address + dbi_offset,
+ /* Ignore result of writing to pcie_aux_dbi_reg_addr as it could fail
+ * in case the firmware security is enabled
+ */
+ hl_pci_elbi_write(hdev, prop->pcie_aux_dbi_reg_addr, 0x00300000);
+
+ rc = hl_pci_elbi_write(hdev, prop->pcie_dbi_base_address + dbi_offset,
data);
if (rc)
@@ -244,9 +246,11 @@ int hl_pci_set_inbound_region(struct hl_device *hdev, u8 region,
rc |= hl_pci_iatu_write(hdev, offset + 0x4, ctrl_reg_val);
- /* Return the DBI window to the default location */
- rc |= hl_pci_elbi_write(hdev, prop->pcie_aux_dbi_reg_addr, 0);
- rc |= hl_pci_elbi_write(hdev, prop->pcie_aux_dbi_reg_addr + 4, 0);
+ /* Return the DBI window to the default location
+ * Ignore result of writing to pcie_aux_dbi_reg_addr as it could fail
+ * in case the firmware security is enabled
+ */
+ hl_pci_elbi_write(hdev, prop->pcie_aux_dbi_reg_addr, 0);
if (rc)
dev_err(hdev->dev, "failed to map bar %u to 0x%08llx\n",
@@ -294,9 +298,11 @@ int hl_pci_set_outbound_region(struct hl_device *hdev,
/* Enable */
rc |= hl_pci_iatu_write(hdev, 0x004, 0x80000000);
- /* Return the DBI window to the default location */
- rc |= hl_pci_elbi_write(hdev, prop->pcie_aux_dbi_reg_addr, 0);
- rc |= hl_pci_elbi_write(hdev, prop->pcie_aux_dbi_reg_addr + 4, 0);
+ /* Return the DBI window to the default location
+ * Ignore result of writing to pcie_aux_dbi_reg_addr as it could fail
+ * in case the firmware security is enabled
+ */
+ hl_pci_elbi_write(hdev, prop->pcie_aux_dbi_reg_addr, 0);
return rc;
}
diff --git a/drivers/misc/habanalabs/gaudi/gaudi.c b/drivers/misc/habanalabs/gaudi/gaudi.c
index 1f1926607c5e..b328ddaa64ee 100644
--- a/drivers/misc/habanalabs/gaudi/gaudi.c
+++ b/drivers/misc/habanalabs/gaudi/gaudi.c
@@ -151,19 +151,6 @@ static const u16 gaudi_packet_sizes[MAX_PACKET_ID] = {
[PACKET_LOAD_AND_EXE] = sizeof(struct packet_load_and_exe)
};
-static const u32 gaudi_pll_base_addresses[GAUDI_PLL_MAX] = {
- [CPU_PLL] = mmPSOC_CPU_PLL_NR,
- [PCI_PLL] = mmPSOC_PCI_PLL_NR,
- [SRAM_PLL] = mmSRAM_W_PLL_NR,
- [HBM_PLL] = mmPSOC_HBM_PLL_NR,
- [NIC_PLL] = mmNIC0_PLL_NR,
- [DMA_PLL] = mmDMA_W_PLL_NR,
- [MESH_PLL] = mmMESH_W_PLL_NR,
- [MME_PLL] = mmPSOC_MME_PLL_NR,
- [TPC_PLL] = mmPSOC_TPC_PLL_NR,
- [IF_PLL] = mmIF_W_PLL_NR
-};
-
static inline bool validate_packet_id(enum packet_id id)
{
switch (id) {
@@ -374,7 +361,7 @@ static int gaudi_cpucp_info_get(struct hl_device *hdev);
static void gaudi_disable_clock_gating(struct hl_device *hdev);
static void gaudi_mmu_prepare(struct hl_device *hdev, u32 asid);
static u32 gaudi_gen_signal_cb(struct hl_device *hdev, void *data, u16 sob_id,
- u32 size);
+ u32 size, bool eb);
static u32 gaudi_gen_wait_cb(struct hl_device *hdev,
struct hl_gen_wait_properties *prop);
@@ -667,12 +654,6 @@ static int gaudi_early_init(struct hl_device *hdev)
if (rc)
goto free_queue_props;
- if (gaudi_get_hw_state(hdev) == HL_DEVICE_HW_STATE_DIRTY) {
- dev_info(hdev->dev,
- "H/W state is dirty, must reset before initializing\n");
- hdev->asic_funcs->hw_fini(hdev, true);
- }
-
/* Before continuing in the initialization, we need to read the preboot
* version to determine whether we run with a security-enabled firmware
*/
@@ -685,6 +666,12 @@ static int gaudi_early_init(struct hl_device *hdev)
goto pci_fini;
}
+ if (gaudi_get_hw_state(hdev) == HL_DEVICE_HW_STATE_DIRTY) {
+ dev_info(hdev->dev,
+ "H/W state is dirty, must reset before initializing\n");
+ hdev->asic_funcs->hw_fini(hdev, true);
+ }
+
return 0;
pci_fini:
@@ -703,93 +690,60 @@ static int gaudi_early_fini(struct hl_device *hdev)
}
/**
- * gaudi_fetch_pll_frequency - Fetch PLL frequency values
+ * gaudi_fetch_psoc_frequency - Fetch PSOC frequency values
*
* @hdev: pointer to hl_device structure
- * @pll_index: index of the pll to fetch frequency from
- * @pll_freq: pointer to store the pll frequency in MHz in each of the available
- * outputs. if a certain output is not available a 0 will be set
*
*/
-static int gaudi_fetch_pll_frequency(struct hl_device *hdev,
- enum gaudi_pll_index pll_index,
- u16 *pll_freq_arr)
+static int gaudi_fetch_psoc_frequency(struct hl_device *hdev)
{
- u32 nr = 0, nf = 0, od = 0, pll_clk = 0, div_fctr, div_sel,
- pll_base_addr = gaudi_pll_base_addresses[pll_index];
- u16 freq = 0;
- int i, rc;
-
- if (hdev->asic_prop.fw_security_status_valid &&
- (hdev->asic_prop.fw_app_security_map &
- CPU_BOOT_DEV_STS0_PLL_INFO_EN)) {
- rc = hl_fw_cpucp_pll_info_get(hdev, pll_index, pll_freq_arr);
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ u32 nr = 0, nf = 0, od = 0, div_fctr = 0, pll_clk, div_sel;
+ u16 pll_freq_arr[HL_PLL_NUM_OUTPUTS], freq;
+ int rc;
- if (rc)
- return rc;
- } else if (hdev->asic_prop.fw_security_disabled) {
+ if (hdev->asic_prop.fw_security_disabled) {
/* Backward compatibility */
- nr = RREG32(pll_base_addr + PLL_NR_OFFSET);
- nf = RREG32(pll_base_addr + PLL_NF_OFFSET);
- od = RREG32(pll_base_addr + PLL_OD_OFFSET);
-
- for (i = 0; i < HL_PLL_NUM_OUTPUTS; i++) {
- div_fctr = RREG32(pll_base_addr +
- PLL_DIV_FACTOR_0_OFFSET + i * 4);
- div_sel = RREG32(pll_base_addr +
- PLL_DIV_SEL_0_OFFSET + i * 4);
+ div_fctr = RREG32(mmPSOC_CPU_PLL_DIV_FACTOR_2);
+ div_sel = RREG32(mmPSOC_CPU_PLL_DIV_SEL_2);
+ nr = RREG32(mmPSOC_CPU_PLL_NR);
+ nf = RREG32(mmPSOC_CPU_PLL_NF);
+ od = RREG32(mmPSOC_CPU_PLL_OD);
- if (div_sel == DIV_SEL_REF_CLK ||
+ if (div_sel == DIV_SEL_REF_CLK ||
div_sel == DIV_SEL_DIVIDED_REF) {
- if (div_sel == DIV_SEL_REF_CLK)
- freq = PLL_REF_CLK;
- else
- freq = PLL_REF_CLK / (div_fctr + 1);
- } else if (div_sel == DIV_SEL_PLL_CLK ||
- div_sel == DIV_SEL_DIVIDED_PLL) {
- pll_clk = PLL_REF_CLK * (nf + 1) /
- ((nr + 1) * (od + 1));
- if (div_sel == DIV_SEL_PLL_CLK)
- freq = pll_clk;
- else
- freq = pll_clk / (div_fctr + 1);
- } else {
- dev_warn(hdev->dev,
- "Received invalid div select value: %d",
- div_sel);
- }
-
- pll_freq_arr[i] = freq;
+ if (div_sel == DIV_SEL_REF_CLK)
+ freq = PLL_REF_CLK;
+ else
+ freq = PLL_REF_CLK / (div_fctr + 1);
+ } else if (div_sel == DIV_SEL_PLL_CLK ||
+ div_sel == DIV_SEL_DIVIDED_PLL) {
+ pll_clk = PLL_REF_CLK * (nf + 1) /
+ ((nr + 1) * (od + 1));
+ if (div_sel == DIV_SEL_PLL_CLK)
+ freq = pll_clk;
+ else
+ freq = pll_clk / (div_fctr + 1);
+ } else {
+ dev_warn(hdev->dev,
+ "Received invalid div select value: %d",
+ div_sel);
+ freq = 0;
}
} else {
- dev_err(hdev->dev, "Failed to fetch PLL frequency values\n");
- return -EIO;
- }
+ rc = hl_fw_cpucp_pll_info_get(hdev, CPU_PLL, pll_freq_arr);
- return 0;
-}
-
-/**
- * gaudi_fetch_psoc_frequency - Fetch PSOC frequency values
- *
- * @hdev: pointer to hl_device structure
- *
- */
-static int gaudi_fetch_psoc_frequency(struct hl_device *hdev)
-{
- struct asic_fixed_properties *prop = &hdev->asic_prop;
- u16 pll_freq[HL_PLL_NUM_OUTPUTS];
- int rc;
+ if (rc)
+ return rc;
- rc = gaudi_fetch_pll_frequency(hdev, CPU_PLL, pll_freq);
- if (rc)
- return rc;
+ freq = pll_freq_arr[2];
+ }
- prop->psoc_timestamp_frequency = pll_freq[2];
- prop->psoc_pci_pll_nr = 0;
- prop->psoc_pci_pll_nf = 0;
- prop->psoc_pci_pll_od = 0;
- prop->psoc_pci_pll_div_factor = 0;
+ prop->psoc_timestamp_frequency = freq;
+ prop->psoc_pci_pll_nr = nr;
+ prop->psoc_pci_pll_nf = nf;
+ prop->psoc_pci_pll_od = od;
+ prop->psoc_pci_pll_div_factor = div_fctr;
return 0;
}
@@ -884,11 +838,17 @@ static int gaudi_init_tpc_mem(struct hl_device *hdev)
size_t fw_size;
void *cpu_addr;
dma_addr_t dma_handle;
- int rc;
+ int rc, count = 5;
+again:
rc = request_firmware(&fw, GAUDI_TPC_FW_FILE, hdev->dev);
+ if (rc == -EINTR && count-- > 0) {
+ msleep(50);
+ goto again;
+ }
+
if (rc) {
- dev_err(hdev->dev, "Firmware file %s is not found!\n",
+ dev_err(hdev->dev, "Failed to load firmware file %s\n",
GAUDI_TPC_FW_FILE);
goto out;
}
@@ -1110,7 +1070,7 @@ static void gaudi_collective_slave_init_job(struct hl_device *hdev,
prop->collective_sob_id, queue_id);
cb_size += gaudi_gen_signal_cb(hdev, job->user_cb,
- prop->collective_sob_id, cb_size);
+ prop->collective_sob_id, cb_size, false);
}
static void gaudi_collective_wait_init_cs(struct hl_cs *cs)
@@ -2449,8 +2409,6 @@ static void gaudi_init_golden_registers(struct hl_device *hdev)
gaudi_init_e2e(hdev);
gaudi_init_hbm_cred(hdev);
- hdev->asic_funcs->disable_clock_gating(hdev);
-
for (tpc_id = 0, tpc_offset = 0;
tpc_id < TPC_NUMBER_OF_ENGINES;
tpc_id++, tpc_offset += TPC_CFG_OFFSET) {
@@ -3462,6 +3420,9 @@ static void gaudi_set_clock_gating(struct hl_device *hdev)
if (hdev->in_debug)
return;
+ if (!hdev->asic_prop.fw_security_disabled)
+ return;
+
for (i = GAUDI_PCI_DMA_1, qman_offset = 0 ; i < GAUDI_HBM_DMA_1 ; i++) {
enable = !!(hdev->clock_gating_mask &
(BIT_ULL(gaudi_dma_assignment[i])));
@@ -3513,7 +3474,7 @@ static void gaudi_disable_clock_gating(struct hl_device *hdev)
u32 qman_offset;
int i;
- if (!(gaudi->hw_cap_initialized & HW_CAP_CLK_GATE))
+ if (!hdev->asic_prop.fw_security_disabled)
return;
for (i = 0, qman_offset = 0 ; i < DMA_NUMBER_OF_CHANNELS ; i++) {
@@ -3806,7 +3767,7 @@ static int gaudi_init_cpu_queues(struct hl_device *hdev, u32 cpu_timeout)
static void gaudi_pre_hw_init(struct hl_device *hdev)
{
/* Perform read from the device to make sure device is up */
- RREG32(mmPCIE_DBI_DEVICE_ID_VENDOR_ID_REG);
+ RREG32(mmHW_STATE);
if (hdev->asic_prop.fw_security_disabled) {
/* Set the access through PCI bars (Linux driver only) as
@@ -3847,6 +3808,13 @@ static int gaudi_hw_init(struct hl_device *hdev)
return rc;
}
+ /* In case the clock gating was enabled in preboot we need to disable
+ * it here before touching the MME/TPC registers.
+ * There is no need to take clk gating mutex because when this function
+ * runs, no other relevant code can run
+ */
+ hdev->asic_funcs->disable_clock_gating(hdev);
+
/* SRAM scrambler must be initialized after CPU is running from HBM */
gaudi_init_scrambler_sram(hdev);
@@ -3885,7 +3853,7 @@ static int gaudi_hw_init(struct hl_device *hdev)
}
/* Perform read from the device to flush all configuration */
- RREG32(mmPCIE_DBI_DEVICE_ID_VENDOR_ID_REG);
+ RREG32(mmHW_STATE);
return 0;
@@ -3927,7 +3895,10 @@ static void gaudi_hw_fini(struct hl_device *hdev, bool hard_reset)
/* I don't know what is the state of the CPU so make sure it is
* stopped in any means necessary
*/
- WREG32(mmPSOC_GLOBAL_CONF_KMD_MSG_TO_CPU, KMD_MSG_GOTO_WFE);
+ if (hdev->asic_prop.hard_reset_done_by_fw)
+ WREG32(mmPSOC_GLOBAL_CONF_KMD_MSG_TO_CPU, KMD_MSG_RST_DEV);
+ else
+ WREG32(mmPSOC_GLOBAL_CONF_KMD_MSG_TO_CPU, KMD_MSG_GOTO_WFE);
WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR, GAUDI_EVENT_HALT_MACHINE);
@@ -3971,11 +3942,15 @@ static void gaudi_hw_fini(struct hl_device *hdev, bool hard_reset)
WREG32(mmPSOC_GLOBAL_CONF_SW_ALL_RST,
1 << PSOC_GLOBAL_CONF_SW_ALL_RST_IND_SHIFT);
- }
- dev_info(hdev->dev,
- "Issued HARD reset command, going to wait %dms\n",
- reset_timeout_ms);
+ dev_info(hdev->dev,
+ "Issued HARD reset command, going to wait %dms\n",
+ reset_timeout_ms);
+ } else {
+ dev_info(hdev->dev,
+ "Firmware performs HARD reset, going to wait %dms\n",
+ reset_timeout_ms);
+ }
/*
* After hard reset, we can't poll the BTM_FSM register because the PSOC
@@ -4027,7 +4002,8 @@ static int gaudi_cb_mmap(struct hl_device *hdev, struct vm_area_struct *vma,
vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP |
VM_DONTCOPY | VM_NORESERVE;
- rc = dma_mmap_coherent(hdev->dev, vma, cpu_addr, dma_addr, size);
+ rc = dma_mmap_coherent(hdev->dev, vma, cpu_addr,
+ (dma_addr - HOST_PHYS_BASE), size);
if (rc)
dev_err(hdev->dev, "dma_mmap_coherent error %d", rc);
@@ -7936,7 +7912,7 @@ static u32 gaudi_get_wait_cb_size(struct hl_device *hdev)
}
static u32 gaudi_gen_signal_cb(struct hl_device *hdev, void *data, u16 sob_id,
- u32 size)
+ u32 size, bool eb)
{
struct hl_cb *cb = (struct hl_cb *) data;
struct packet_msg_short *pkt;
@@ -7953,7 +7929,7 @@ static u32 gaudi_gen_signal_cb(struct hl_device *hdev, void *data, u16 sob_id,
ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_OP_MASK, 0); /* write the value */
ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_BASE_MASK, 3); /* W_S SOB base */
ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_OPCODE_MASK, PACKET_MSG_SHORT);
- ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_EB_MASK, 1);
+ ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_EB_MASK, eb);
ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_RB_MASK, 1);
ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_MB_MASK, 1);
diff --git a/drivers/misc/habanalabs/gaudi/gaudiP.h b/drivers/misc/habanalabs/gaudi/gaudiP.h
index f2d91f4fcffe..a7ab2d7e57d4 100644
--- a/drivers/misc/habanalabs/gaudi/gaudiP.h
+++ b/drivers/misc/habanalabs/gaudi/gaudiP.h
@@ -105,13 +105,6 @@
#define MME_ACC_OFFSET (mmMME1_ACC_BASE - mmMME0_ACC_BASE)
#define SRAM_BANK_OFFSET (mmSRAM_Y0_X1_RTR_BASE - mmSRAM_Y0_X0_RTR_BASE)
-#define PLL_NR_OFFSET 0
-#define PLL_NF_OFFSET (mmPSOC_CPU_PLL_NF - mmPSOC_CPU_PLL_NR)
-#define PLL_OD_OFFSET (mmPSOC_CPU_PLL_OD - mmPSOC_CPU_PLL_NR)
-#define PLL_DIV_FACTOR_0_OFFSET (mmPSOC_CPU_PLL_DIV_FACTOR_0 - \
- mmPSOC_CPU_PLL_NR)
-#define PLL_DIV_SEL_0_OFFSET (mmPSOC_CPU_PLL_DIV_SEL_0 - mmPSOC_CPU_PLL_NR)
-
#define NUM_OF_SOB_IN_BLOCK \
(((mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_2047 - \
mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_0) + 4) >> 2)
diff --git a/drivers/misc/habanalabs/gaudi/gaudi_coresight.c b/drivers/misc/habanalabs/gaudi/gaudi_coresight.c
index 2e3612e1ee28..88a09d42e111 100644
--- a/drivers/misc/habanalabs/gaudi/gaudi_coresight.c
+++ b/drivers/misc/habanalabs/gaudi/gaudi_coresight.c
@@ -9,6 +9,7 @@
#include "../include/gaudi/gaudi_coresight.h"
#include "../include/gaudi/asic_reg/gaudi_regs.h"
#include "../include/gaudi/gaudi_masks.h"
+#include "../include/gaudi/gaudi_reg_map.h"
#include <uapi/misc/habanalabs.h>
#define SPMU_SECTION_SIZE MME0_ACC_SPMU_MAX_OFFSET
@@ -874,7 +875,7 @@ int gaudi_debug_coresight(struct hl_device *hdev, void *data)
}
/* Perform read from the device to flush all configuration */
- RREG32(mmPCIE_DBI_DEVICE_ID_VENDOR_ID_REG);
+ RREG32(mmHW_STATE);
return rc;
}
diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c
index 3e5eb9e3d7bd..63679a747d2c 100644
--- a/drivers/misc/habanalabs/goya/goya.c
+++ b/drivers/misc/habanalabs/goya/goya.c
@@ -613,12 +613,6 @@ static int goya_early_init(struct hl_device *hdev)
if (rc)
goto free_queue_props;
- if (goya_get_hw_state(hdev) == HL_DEVICE_HW_STATE_DIRTY) {
- dev_info(hdev->dev,
- "H/W state is dirty, must reset before initializing\n");
- hdev->asic_funcs->hw_fini(hdev, true);
- }
-
/* Before continuing in the initialization, we need to read the preboot
* version to determine whether we run with a security-enabled firmware
*/
@@ -631,6 +625,12 @@ static int goya_early_init(struct hl_device *hdev)
goto pci_fini;
}
+ if (goya_get_hw_state(hdev) == HL_DEVICE_HW_STATE_DIRTY) {
+ dev_info(hdev->dev,
+ "H/W state is dirty, must reset before initializing\n");
+ hdev->asic_funcs->hw_fini(hdev, true);
+ }
+
if (!hdev->pldm) {
val = RREG32(mmPSOC_GLOBAL_CONF_BOOT_STRAP_PINS);
if (val & PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_SRIOV_EN_MASK)
@@ -694,32 +694,47 @@ static void goya_qman0_set_security(struct hl_device *hdev, bool secure)
static void goya_fetch_psoc_frequency(struct hl_device *hdev)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
- u32 trace_freq = 0;
- u32 pll_clk = 0;
- u32 div_fctr = RREG32(mmPSOC_PCI_PLL_DIV_FACTOR_1);
- u32 div_sel = RREG32(mmPSOC_PCI_PLL_DIV_SEL_1);
- u32 nr = RREG32(mmPSOC_PCI_PLL_NR);
- u32 nf = RREG32(mmPSOC_PCI_PLL_NF);
- u32 od = RREG32(mmPSOC_PCI_PLL_OD);
-
- if (div_sel == DIV_SEL_REF_CLK || div_sel == DIV_SEL_DIVIDED_REF) {
- if (div_sel == DIV_SEL_REF_CLK)
- trace_freq = PLL_REF_CLK;
- else
- trace_freq = PLL_REF_CLK / (div_fctr + 1);
- } else if (div_sel == DIV_SEL_PLL_CLK ||
- div_sel == DIV_SEL_DIVIDED_PLL) {
- pll_clk = PLL_REF_CLK * (nf + 1) / ((nr + 1) * (od + 1));
- if (div_sel == DIV_SEL_PLL_CLK)
- trace_freq = pll_clk;
- else
- trace_freq = pll_clk / (div_fctr + 1);
+ u32 nr = 0, nf = 0, od = 0, div_fctr = 0, pll_clk, div_sel;
+ u16 pll_freq_arr[HL_PLL_NUM_OUTPUTS], freq;
+ int rc;
+
+ if (hdev->asic_prop.fw_security_disabled) {
+ div_fctr = RREG32(mmPSOC_PCI_PLL_DIV_FACTOR_1);
+ div_sel = RREG32(mmPSOC_PCI_PLL_DIV_SEL_1);
+ nr = RREG32(mmPSOC_PCI_PLL_NR);
+ nf = RREG32(mmPSOC_PCI_PLL_NF);
+ od = RREG32(mmPSOC_PCI_PLL_OD);
+
+ if (div_sel == DIV_SEL_REF_CLK ||
+ div_sel == DIV_SEL_DIVIDED_REF) {
+ if (div_sel == DIV_SEL_REF_CLK)
+ freq = PLL_REF_CLK;
+ else
+ freq = PLL_REF_CLK / (div_fctr + 1);
+ } else if (div_sel == DIV_SEL_PLL_CLK ||
+ div_sel == DIV_SEL_DIVIDED_PLL) {
+ pll_clk = PLL_REF_CLK * (nf + 1) /
+ ((nr + 1) * (od + 1));
+ if (div_sel == DIV_SEL_PLL_CLK)
+ freq = pll_clk;
+ else
+ freq = pll_clk / (div_fctr + 1);
+ } else {
+ dev_warn(hdev->dev,
+ "Received invalid div select value: %d",
+ div_sel);
+ freq = 0;
+ }
} else {
- dev_warn(hdev->dev,
- "Received invalid div select value: %d", div_sel);
+ rc = hl_fw_cpucp_pll_info_get(hdev, PCI_PLL, pll_freq_arr);
+
+ if (rc)
+ return;
+
+ freq = pll_freq_arr[1];
}
- prop->psoc_timestamp_frequency = trace_freq;
+ prop->psoc_timestamp_frequency = freq;
prop->psoc_pci_pll_nr = nr;
prop->psoc_pci_pll_nf = nf;
prop->psoc_pci_pll_od = od;
@@ -2704,7 +2719,8 @@ static int goya_cb_mmap(struct hl_device *hdev, struct vm_area_struct *vma,
vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP |
VM_DONTCOPY | VM_NORESERVE;
- rc = dma_mmap_coherent(hdev->dev, vma, cpu_addr, dma_addr, size);
+ rc = dma_mmap_coherent(hdev->dev, vma, cpu_addr,
+ (dma_addr - HOST_PHYS_BASE), size);
if (rc)
dev_err(hdev->dev, "dma_mmap_coherent error %d", rc);
@@ -5324,7 +5340,7 @@ static u32 goya_get_wait_cb_size(struct hl_device *hdev)
}
static u32 goya_gen_signal_cb(struct hl_device *hdev, void *data, u16 sob_id,
- u32 size)
+ u32 size, bool eb)
{
return 0;
}
diff --git a/drivers/misc/habanalabs/include/common/hl_boot_if.h b/drivers/misc/habanalabs/include/common/hl_boot_if.h
index e5801ecf0cb2..b637dfd69f6e 100644
--- a/drivers/misc/habanalabs/include/common/hl_boot_if.h
+++ b/drivers/misc/habanalabs/include/common/hl_boot_if.h
@@ -145,11 +145,15 @@
* implemented. This means that FW will
* perform hard reset procedure on
* receiving the halt-machine event.
- * Initialized in: linux
+ * Initialized in: preboot, u-boot, linux
*
* CPU_BOOT_DEV_STS0_PLL_INFO_EN FW retrieval of PLL info is enabled.
* Initialized in: linux
*
+ * CPU_BOOT_DEV_STS0_CLK_GATE_EN Clock Gating enabled.
+ * FW initialized Clock Gating.
+ * Initialized in: preboot
+ *
* CPU_BOOT_DEV_STS0_ENABLED Device status register enabled.
* This is a main indication that the
* running FW populates the device status
@@ -171,6 +175,7 @@
#define CPU_BOOT_DEV_STS0_DRAM_SCR_EN (1 << 9)
#define CPU_BOOT_DEV_STS0_FW_HARD_RST_EN (1 << 10)
#define CPU_BOOT_DEV_STS0_PLL_INFO_EN (1 << 11)
+#define CPU_BOOT_DEV_STS0_CLK_GATE_EN (1 << 13)
#define CPU_BOOT_DEV_STS0_ENABLED (1 << 31)
enum cpu_boot_status {
@@ -204,6 +209,8 @@ enum kmd_msg {
KMD_MSG_GOTO_WFE,
KMD_MSG_FIT_RDY,
KMD_MSG_SKIP_BMC,
+ RESERVED,
+ KMD_MSG_RST_DEV,
};
enum cpu_msg_status {
diff --git a/drivers/misc/mei/hdcp/mei_hdcp.c b/drivers/misc/mei/hdcp/mei_hdcp.c
index 9ae9669e46ea..3506a3534294 100644
--- a/drivers/misc/mei/hdcp/mei_hdcp.c
+++ b/drivers/misc/mei/hdcp/mei_hdcp.c
@@ -569,8 +569,7 @@ static int mei_hdcp_verify_mprime(struct device *dev,
verify_mprime_in->header.api_version = HDCP_API_VERSION;
verify_mprime_in->header.command_id = WIRED_REPEATER_AUTH_STREAM_REQ;
verify_mprime_in->header.status = ME_HDCP_STATUS_SUCCESS;
- verify_mprime_in->header.buffer_len =
- WIRED_CMD_BUF_LEN_REPEATER_AUTH_STREAM_REQ_MIN_IN;
+ verify_mprime_in->header.buffer_len = cmd_size - sizeof(verify_mprime_in->header);
verify_mprime_in->port.integrated_port_type = data->port_type;
verify_mprime_in->port.physical_port = (u8)data->fw_ddi;
diff --git a/drivers/misc/pvpanic.c b/drivers/misc/pvpanic.c
index 951b37da5e3c..41cab297d66e 100644
--- a/drivers/misc/pvpanic.c
+++ b/drivers/misc/pvpanic.c
@@ -55,12 +55,23 @@ static int pvpanic_mmio_probe(struct platform_device *pdev)
struct resource *res;
res = platform_get_mem_or_io(pdev, 0);
- if (res && resource_type(res) == IORESOURCE_IO)
+ if (!res)
+ return -EINVAL;
+
+ switch (resource_type(res)) {
+ case IORESOURCE_IO:
base = devm_ioport_map(dev, res->start, resource_size(res));
- else
+ if (!base)
+ return -ENOMEM;
+ break;
+ case IORESOURCE_MEM:
base = devm_ioremap_resource(dev, res);
- if (IS_ERR(base))
- return PTR_ERR(base);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+ break;
+ default:
+ return -EINVAL;
+ }
atomic_notifier_chain_register(&panic_notifier_list,
&pvpanic_panic_nb);
diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c
index de7cb0369c30..002426e3cf76 100644
--- a/drivers/mmc/core/queue.c
+++ b/drivers/mmc/core/queue.c
@@ -384,8 +384,10 @@ static void mmc_setup_queue(struct mmc_queue *mq, struct mmc_card *card)
"merging was advertised but not possible");
blk_queue_max_segments(mq->queue, mmc_get_max_segments(host));
- if (mmc_card_mmc(card))
+ if (mmc_card_mmc(card) && card->ext_csd.data_sector_size) {
block_size = card->ext_csd.data_sector_size;
+ WARN_ON(block_size != 512 && block_size != 4096);
+ }
blk_queue_logical_block_size(mq->queue, block_size);
/*
diff --git a/drivers/mmc/host/sdhci-brcmstb.c b/drivers/mmc/host/sdhci-brcmstb.c
index bbf3496f4495..f9780c65ebe9 100644
--- a/drivers/mmc/host/sdhci-brcmstb.c
+++ b/drivers/mmc/host/sdhci-brcmstb.c
@@ -314,11 +314,7 @@ err_clk:
static void sdhci_brcmstb_shutdown(struct platform_device *pdev)
{
- int ret;
-
- ret = sdhci_pltfm_unregister(pdev);
- if (ret)
- dev_err(&pdev->dev, "failed to shutdown\n");
+ sdhci_pltfm_suspend(&pdev->dev);
}
MODULE_DEVICE_TABLE(of, sdhci_brcm_of_match);
diff --git a/drivers/mmc/host/sdhci-of-dwcmshc.c b/drivers/mmc/host/sdhci-of-dwcmshc.c
index 4b673792b5a4..d90020ed3622 100644
--- a/drivers/mmc/host/sdhci-of-dwcmshc.c
+++ b/drivers/mmc/host/sdhci-of-dwcmshc.c
@@ -16,6 +16,8 @@
#include "sdhci-pltfm.h"
+#define SDHCI_DWCMSHC_ARG2_STUFF GENMASK(31, 16)
+
/* DWCMSHC specific Mode Select value */
#define DWCMSHC_CTRL_HS400 0x7
@@ -49,6 +51,29 @@ static void dwcmshc_adma_write_desc(struct sdhci_host *host, void **desc,
sdhci_adma_write_desc(host, desc, addr, len, cmd);
}
+static void dwcmshc_check_auto_cmd23(struct mmc_host *mmc,
+ struct mmc_request *mrq)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+
+ /*
+ * No matter V4 is enabled or not, ARGUMENT2 register is 32-bit
+ * block count register which doesn't support stuff bits of
+ * CMD23 argument on dwcmsch host controller.
+ */
+ if (mrq->sbc && (mrq->sbc->arg & SDHCI_DWCMSHC_ARG2_STUFF))
+ host->flags &= ~SDHCI_AUTO_CMD23;
+ else
+ host->flags |= SDHCI_AUTO_CMD23;
+}
+
+static void dwcmshc_request(struct mmc_host *mmc, struct mmc_request *mrq)
+{
+ dwcmshc_check_auto_cmd23(mmc, mrq);
+
+ sdhci_request(mmc, mrq);
+}
+
static void dwcmshc_set_uhs_signaling(struct sdhci_host *host,
unsigned int timing)
{
@@ -133,6 +158,8 @@ static int dwcmshc_probe(struct platform_device *pdev)
sdhci_get_of_property(pdev);
+ host->mmc_host_ops.request = dwcmshc_request;
+
err = sdhci_add_host(host);
if (err)
goto err_clk;
diff --git a/drivers/mmc/host/sdhci-xenon.c b/drivers/mmc/host/sdhci-xenon.c
index c67611fdaa8a..d19eef5f725f 100644
--- a/drivers/mmc/host/sdhci-xenon.c
+++ b/drivers/mmc/host/sdhci-xenon.c
@@ -168,7 +168,12 @@ static void xenon_reset_exit(struct sdhci_host *host,
/* Disable tuning request and auto-retuning again */
xenon_retune_setup(host);
- xenon_set_acg(host, true);
+ /*
+ * The ACG should be turned off at the early init time, in order
+ * to solve a possible issues with the 1.8V regulator stabilization.
+ * The feature is enabled in later stage.
+ */
+ xenon_set_acg(host, false);
xenon_set_sdclk_off_idle(host, sdhc_id, false);
diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
index 5cdf05bcbf8f..3fa8c22d3f36 100644
--- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
+++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
@@ -1615,7 +1615,7 @@ static int gpmi_ecc_read_page_raw(struct nand_chip *chip, uint8_t *buf,
/* Extract interleaved payload data and ECC bits */
for (step = 0; step < nfc_geo->ecc_chunk_count; step++) {
if (buf)
- nand_extract_bits(buf, step * eccsize, tmp_buf,
+ nand_extract_bits(buf, step * eccsize * 8, tmp_buf,
src_bit_off, eccsize * 8);
src_bit_off += eccsize * 8;
diff --git a/drivers/mtd/nand/raw/intel-nand-controller.c b/drivers/mtd/nand/raw/intel-nand-controller.c
index fdb112e8a90d..a304fda5d1fa 100644
--- a/drivers/mtd/nand/raw/intel-nand-controller.c
+++ b/drivers/mtd/nand/raw/intel-nand-controller.c
@@ -579,7 +579,7 @@ static int ebu_nand_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct ebu_nand_controller *ebu_host;
struct nand_chip *nand;
- struct mtd_info *mtd = NULL;
+ struct mtd_info *mtd;
struct resource *res;
char *resname;
int ret;
@@ -647,12 +647,13 @@ static int ebu_nand_probe(struct platform_device *pdev)
ebu_host->ebu + EBU_ADDR_SEL(cs));
nand_set_flash_node(&ebu_host->chip, dev->of_node);
+
+ mtd = nand_to_mtd(&ebu_host->chip);
if (!mtd->name) {
dev_err(ebu_host->dev, "NAND label property is mandatory\n");
return -EINVAL;
}
- mtd = nand_to_mtd(&ebu_host->chip);
mtd->dev.parent = dev;
ebu_host->dev = dev;
diff --git a/drivers/mtd/nand/raw/nandsim.c b/drivers/mtd/nand/raw/nandsim.c
index f2b9250c0ea8..0750121ac371 100644
--- a/drivers/mtd/nand/raw/nandsim.c
+++ b/drivers/mtd/nand/raw/nandsim.c
@@ -2210,6 +2210,9 @@ static int ns_attach_chip(struct nand_chip *chip)
{
unsigned int eccsteps, eccbytes;
+ chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
+ chip->ecc.algo = bch ? NAND_ECC_ALGO_BCH : NAND_ECC_ALGO_HAMMING;
+
if (!bch)
return 0;
@@ -2233,8 +2236,6 @@ static int ns_attach_chip(struct nand_chip *chip)
return -EINVAL;
}
- chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
- chip->ecc.algo = NAND_ECC_ALGO_BCH;
chip->ecc.size = 512;
chip->ecc.strength = bch;
chip->ecc.bytes = eccbytes;
@@ -2273,8 +2274,6 @@ static int __init ns_init_module(void)
nsmtd = nand_to_mtd(chip);
nand_set_controller_data(chip, (void *)ns);
- chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
- chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
/* The NAND_SKIP_BBTSCAN option is necessary for 'overridesize' */
/* and 'badblocks' parameters to work */
chip->options |= NAND_SKIP_BBTSCAN;
diff --git a/drivers/mtd/nand/raw/omap2.c b/drivers/mtd/nand/raw/omap2.c
index fbb9955f2467..2c3e65cb68f3 100644
--- a/drivers/mtd/nand/raw/omap2.c
+++ b/drivers/mtd/nand/raw/omap2.c
@@ -15,6 +15,7 @@
#include <linux/jiffies.h>
#include <linux/sched.h>
#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand-ecc-sw-bch.h>
#include <linux/mtd/rawnand.h>
#include <linux/mtd/partitions.h>
#include <linux/omap-dma.h>
@@ -1866,18 +1867,19 @@ static const struct mtd_ooblayout_ops omap_ooblayout_ops = {
static int omap_sw_ooblayout_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *oobregion)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
+ struct nand_device *nand = mtd_to_nanddev(mtd);
+ const struct nand_ecc_sw_bch_conf *engine_conf = nand->ecc.ctx.priv;
int off = BADBLOCK_MARKER_LENGTH;
- if (section >= chip->ecc.steps)
+ if (section >= engine_conf->nsteps)
return -ERANGE;
/*
* When SW correction is employed, one OMAP specific marker byte is
* reserved after each ECC step.
*/
- oobregion->offset = off + (section * (chip->ecc.bytes + 1));
- oobregion->length = chip->ecc.bytes;
+ oobregion->offset = off + (section * (engine_conf->code_size + 1));
+ oobregion->length = engine_conf->code_size;
return 0;
}
@@ -1885,7 +1887,8 @@ static int omap_sw_ooblayout_ecc(struct mtd_info *mtd, int section,
static int omap_sw_ooblayout_free(struct mtd_info *mtd, int section,
struct mtd_oob_region *oobregion)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
+ struct nand_device *nand = mtd_to_nanddev(mtd);
+ const struct nand_ecc_sw_bch_conf *engine_conf = nand->ecc.ctx.priv;
int off = BADBLOCK_MARKER_LENGTH;
if (section)
@@ -1895,7 +1898,7 @@ static int omap_sw_ooblayout_free(struct mtd_info *mtd, int section,
* When SW correction is employed, one OMAP specific marker byte is
* reserved after each ECC step.
*/
- off += ((chip->ecc.bytes + 1) * chip->ecc.steps);
+ off += ((engine_conf->code_size + 1) * engine_conf->nsteps);
if (off >= mtd->oobsize)
return -ERANGE;
diff --git a/drivers/mtd/nand/spi/core.c b/drivers/mtd/nand/spi/core.c
index 8ea545bb924d..61d932c1b718 100644
--- a/drivers/mtd/nand/spi/core.c
+++ b/drivers/mtd/nand/spi/core.c
@@ -343,6 +343,7 @@ static int spinand_read_from_cache_op(struct spinand_device *spinand,
const struct nand_page_io_req *req)
{
struct nand_device *nand = spinand_to_nand(spinand);
+ struct mtd_info *mtd = spinand_to_mtd(spinand);
struct spi_mem_dirmap_desc *rdesc;
unsigned int nbytes = 0;
void *buf = NULL;
@@ -382,9 +383,16 @@ static int spinand_read_from_cache_op(struct spinand_device *spinand,
memcpy(req->databuf.in, spinand->databuf + req->dataoffs,
req->datalen);
- if (req->ooblen)
- memcpy(req->oobbuf.in, spinand->oobbuf + req->ooboffs,
- req->ooblen);
+ if (req->ooblen) {
+ if (req->mode == MTD_OPS_AUTO_OOB)
+ mtd_ooblayout_get_databytes(mtd, req->oobbuf.in,
+ spinand->oobbuf,
+ req->ooboffs,
+ req->ooblen);
+ else
+ memcpy(req->oobbuf.in, spinand->oobbuf + req->ooboffs,
+ req->ooblen);
+ }
return 0;
}
diff --git a/drivers/net/bareudp.c b/drivers/net/bareudp.c
index 85ebd2b7e446..85de5f96c02b 100644
--- a/drivers/net/bareudp.c
+++ b/drivers/net/bareudp.c
@@ -380,7 +380,7 @@ static int bareudp6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
goto free_dst;
min_headroom = LL_RESERVED_SPACE(dst->dev) + dst->header_len +
- BAREUDP_BASE_HLEN + info->options_len + sizeof(struct iphdr);
+ BAREUDP_BASE_HLEN + info->options_len + sizeof(struct ipv6hdr);
err = skb_cow_head(skb, min_headroom);
if (unlikely(err))
@@ -534,6 +534,7 @@ static void bareudp_setup(struct net_device *dev)
SET_NETDEV_DEVTYPE(dev, &bareudp_type);
dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM;
dev->features |= NETIF_F_RXCSUM;
+ dev->features |= NETIF_F_LLTX;
dev->features |= NETIF_F_GSO_SOFTWARE;
dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
dev->hw_features |= NETIF_F_GSO_SOFTWARE;
@@ -644,11 +645,20 @@ static int bareudp_link_config(struct net_device *dev,
return 0;
}
+static void bareudp_dellink(struct net_device *dev, struct list_head *head)
+{
+ struct bareudp_dev *bareudp = netdev_priv(dev);
+
+ list_del(&bareudp->next);
+ unregister_netdevice_queue(dev, head);
+}
+
static int bareudp_newlink(struct net *net, struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[],
struct netlink_ext_ack *extack)
{
struct bareudp_conf conf;
+ LIST_HEAD(list_kill);
int err;
err = bareudp2info(data, &conf, extack);
@@ -661,17 +671,14 @@ static int bareudp_newlink(struct net *net, struct net_device *dev,
err = bareudp_link_config(dev, tb);
if (err)
- return err;
+ goto err_unconfig;
return 0;
-}
-
-static void bareudp_dellink(struct net_device *dev, struct list_head *head)
-{
- struct bareudp_dev *bareudp = netdev_priv(dev);
- list_del(&bareudp->next);
- unregister_netdevice_queue(dev, head);
+err_unconfig:
+ bareudp_dellink(dev, &list_kill);
+ unregister_netdevice_many(&list_kill);
+ return err;
}
static size_t bareudp_get_size(const struct net_device *dev)
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index 424970939fd4..1c28eade6bec 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -123,6 +123,7 @@ config CAN_JANZ_ICAN3
config CAN_KVASER_PCIEFD
depends on PCI
tristate "Kvaser PCIe FD cards"
+ select CRC32
help
This is a driver for the Kvaser PCI Express CAN FD family.
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index 3486704c8a95..8b1ae023cb21 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -592,11 +592,11 @@ static void can_restart(struct net_device *dev)
cf->can_id |= CAN_ERR_RESTARTED;
- netif_rx_ni(skb);
-
stats->rx_packets++;
stats->rx_bytes += cf->len;
+ netif_rx_ni(skb);
+
restart:
netdev_dbg(dev, "restarted\n");
priv->can_stats.restarts++;
diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
index 2c9f12401276..da551fd0f502 100644
--- a/drivers/net/can/m_can/m_can.c
+++ b/drivers/net/can/m_can/m_can.c
@@ -1852,8 +1852,6 @@ EXPORT_SYMBOL_GPL(m_can_class_register);
void m_can_class_unregister(struct m_can_classdev *cdev)
{
unregister_candev(cdev->net);
-
- m_can_clk_stop(cdev);
}
EXPORT_SYMBOL_GPL(m_can_class_unregister);
diff --git a/drivers/net/can/m_can/tcan4x5x.c b/drivers/net/can/m_can/tcan4x5x.c
index 24c737c4fc44..970f0e9d19bf 100644
--- a/drivers/net/can/m_can/tcan4x5x.c
+++ b/drivers/net/can/m_can/tcan4x5x.c
@@ -131,30 +131,6 @@ static inline struct tcan4x5x_priv *cdev_to_priv(struct m_can_classdev *cdev)
}
-static struct can_bittiming_const tcan4x5x_bittiming_const = {
- .name = DEVICE_NAME,
- .tseg1_min = 2,
- .tseg1_max = 31,
- .tseg2_min = 2,
- .tseg2_max = 16,
- .sjw_max = 16,
- .brp_min = 1,
- .brp_max = 32,
- .brp_inc = 1,
-};
-
-static struct can_bittiming_const tcan4x5x_data_bittiming_const = {
- .name = DEVICE_NAME,
- .tseg1_min = 1,
- .tseg1_max = 32,
- .tseg2_min = 1,
- .tseg2_max = 16,
- .sjw_max = 16,
- .brp_min = 1,
- .brp_max = 32,
- .brp_inc = 1,
-};
-
static void tcan4x5x_check_wake(struct tcan4x5x_priv *priv)
{
int wake_state = 0;
@@ -469,8 +445,6 @@ static int tcan4x5x_can_probe(struct spi_device *spi)
mcan_class->dev = &spi->dev;
mcan_class->ops = &tcan4x5x_ops;
mcan_class->is_peripheral = true;
- mcan_class->bit_timing = &tcan4x5x_bittiming_const;
- mcan_class->data_timing = &tcan4x5x_data_bittiming_const;
mcan_class->net->irq = spi->irq;
spi_set_drvdata(spi, priv);
diff --git a/drivers/net/can/rcar/Kconfig b/drivers/net/can/rcar/Kconfig
index 8d36101b78e3..29cabc20109e 100644
--- a/drivers/net/can/rcar/Kconfig
+++ b/drivers/net/can/rcar/Kconfig
@@ -1,10 +1,10 @@
# SPDX-License-Identifier: GPL-2.0
config CAN_RCAR
- tristate "Renesas R-Car CAN controller"
+ tristate "Renesas R-Car and RZ/G CAN controller"
depends on ARCH_RENESAS || ARM
help
Say Y here if you want to use CAN controller found on Renesas R-Car
- SoCs.
+ or RZ/G SoCs.
To compile this driver as a module, choose M here: the module will
be called rcar_can.
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
index 77129d5f410b..f07e8b737d31 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
@@ -1368,13 +1368,10 @@ static int mcp251xfd_handle_tefif(struct mcp251xfd_priv *priv)
struct mcp251xfd_tx_ring *tx_ring = priv->tx;
struct spi_transfer *last_xfer;
- tx_ring->tail += len;
-
/* Increment the TEF FIFO tail pointer 'len' times in
* a single SPI message.
- */
-
- /* Note:
+ *
+ * Note:
*
* "cs_change == 1" on the last transfer results in an
* active chip select after the complete SPI
@@ -1391,6 +1388,8 @@ static int mcp251xfd_handle_tefif(struct mcp251xfd_priv *priv)
if (err)
return err;
+ tx_ring->tail += len;
+
err = mcp251xfd_check_tef_tail(priv);
if (err)
return err;
@@ -1492,7 +1491,7 @@ mcp251xfd_handle_rxif_one(struct mcp251xfd_priv *priv,
else
skb = alloc_can_skb(priv->ndev, (struct can_frame **)&cfd);
- if (!cfd) {
+ if (!skb) {
stats->rx_dropped++;
return 0;
}
@@ -1553,10 +1552,8 @@ mcp251xfd_handle_rxif_ring(struct mcp251xfd_priv *priv,
/* Increment the RX FIFO tail pointer 'len' times in a
* single SPI message.
- */
- ring->tail += len;
-
- /* Note:
+ *
+ * Note:
*
* "cs_change == 1" on the last transfer results in an
* active chip select after the complete SPI
@@ -1572,6 +1569,8 @@ mcp251xfd_handle_rxif_ring(struct mcp251xfd_priv *priv,
last_xfer->cs_change = 1;
if (err)
return err;
+
+ ring->tail += len;
}
return 0;
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
index 61631f4fd92a..f347ecc79aef 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
@@ -514,11 +514,11 @@ static int pcan_usb_fd_decode_canmsg(struct pcan_usb_fd_if *usb_if,
else
memcpy(cfd->data, rm->d, cfd->len);
- peak_usb_netif_rx(skb, &usb_if->time_ref, le32_to_cpu(rm->ts_low));
-
netdev->stats.rx_packets++;
netdev->stats.rx_bytes += cfd->len;
+ peak_usb_netif_rx(skb, &usb_if->time_ref, le32_to_cpu(rm->ts_low));
+
return 0;
}
@@ -580,11 +580,11 @@ static int pcan_usb_fd_decode_status(struct pcan_usb_fd_if *usb_if,
if (!skb)
return -ENOMEM;
- peak_usb_netif_rx(skb, &usb_if->time_ref, le32_to_cpu(sm->ts_low));
-
netdev->stats.rx_packets++;
netdev->stats.rx_bytes += cf->len;
+ peak_usb_netif_rx(skb, &usb_if->time_ref, le32_to_cpu(sm->ts_low));
+
return 0;
}
diff --git a/drivers/net/can/vxcan.c b/drivers/net/can/vxcan.c
index fa47bab510bb..f9a524c5f6d6 100644
--- a/drivers/net/can/vxcan.c
+++ b/drivers/net/can/vxcan.c
@@ -39,6 +39,7 @@ static netdev_tx_t vxcan_xmit(struct sk_buff *skb, struct net_device *dev)
struct net_device *peer;
struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
struct net_device_stats *peerstats, *srcstats = &dev->stats;
+ u8 len;
if (can_dropped_invalid_skb(dev, skb))
return NETDEV_TX_OK;
@@ -61,12 +62,13 @@ static netdev_tx_t vxcan_xmit(struct sk_buff *skb, struct net_device *dev)
skb->dev = peer;
skb->ip_summed = CHECKSUM_UNNECESSARY;
+ len = cfd->len;
if (netif_rx_ni(skb) == NET_RX_SUCCESS) {
srcstats->tx_packets++;
- srcstats->tx_bytes += cfd->len;
+ srcstats->tx_bytes += len;
peerstats = &peer->stats;
peerstats->rx_packets++;
- peerstats->rx_bytes += cfd->len;
+ peerstats->rx_bytes += len;
}
out_unlock:
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
index 288b5a5c3e0d..95c7fa171e35 100644
--- a/drivers/net/dsa/b53/b53_common.c
+++ b/drivers/net/dsa/b53/b53_common.c
@@ -1404,7 +1404,7 @@ int b53_vlan_prepare(struct dsa_switch *ds, int port,
!(vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED))
return -EINVAL;
- if (vlan->vid_end > dev->num_vlans)
+ if (vlan->vid_end >= dev->num_vlans)
return -ERANGE;
b53_enable_vlan(dev, true, ds->vlan_filtering);
diff --git a/drivers/net/dsa/hirschmann/Kconfig b/drivers/net/dsa/hirschmann/Kconfig
index 222dd35e2c9d..e01191107a4b 100644
--- a/drivers/net/dsa/hirschmann/Kconfig
+++ b/drivers/net/dsa/hirschmann/Kconfig
@@ -4,6 +4,7 @@ config NET_DSA_HIRSCHMANN_HELLCREEK
depends on HAS_IOMEM
depends on NET_DSA
depends on PTP_1588_CLOCK
+ depends on LEDS_CLASS
select NET_DSA_TAG_HELLCREEK
help
This driver adds support for Hirschmann Hellcreek TSN switches.
diff --git a/drivers/net/dsa/lantiq_gswip.c b/drivers/net/dsa/lantiq_gswip.c
index 09701c17f3f6..662e68a0e7e6 100644
--- a/drivers/net/dsa/lantiq_gswip.c
+++ b/drivers/net/dsa/lantiq_gswip.c
@@ -92,9 +92,7 @@
GSWIP_MDIO_PHY_FDUP_MASK)
/* GSWIP MII Registers */
-#define GSWIP_MII_CFG0 0x00
-#define GSWIP_MII_CFG1 0x02
-#define GSWIP_MII_CFG5 0x04
+#define GSWIP_MII_CFGp(p) (0x2 * (p))
#define GSWIP_MII_CFG_EN BIT(14)
#define GSWIP_MII_CFG_LDCLKDIS BIT(12)
#define GSWIP_MII_CFG_MODE_MIIP 0x0
@@ -392,17 +390,9 @@ static void gswip_mii_mask(struct gswip_priv *priv, u32 clear, u32 set,
static void gswip_mii_mask_cfg(struct gswip_priv *priv, u32 clear, u32 set,
int port)
{
- switch (port) {
- case 0:
- gswip_mii_mask(priv, clear, set, GSWIP_MII_CFG0);
- break;
- case 1:
- gswip_mii_mask(priv, clear, set, GSWIP_MII_CFG1);
- break;
- case 5:
- gswip_mii_mask(priv, clear, set, GSWIP_MII_CFG5);
- break;
- }
+ /* There's no MII_CFG register for the CPU port */
+ if (!dsa_is_cpu_port(priv->ds, port))
+ gswip_mii_mask(priv, clear, set, GSWIP_MII_CFGp(port));
}
static void gswip_mii_mask_pcdu(struct gswip_priv *priv, u32 clear, u32 set,
@@ -822,9 +812,8 @@ static int gswip_setup(struct dsa_switch *ds)
gswip_mdio_mask(priv, 0xff, 0x09, GSWIP_MDIO_MDC_CFG1);
/* Disable the xMII link */
- gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_EN, 0, 0);
- gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_EN, 0, 1);
- gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_EN, 0, 5);
+ for (i = 0; i < priv->hw_info->max_ports; i++)
+ gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_EN, 0, i);
/* enable special tag insertion on cpu port */
gswip_switch_mask(priv, 0, GSWIP_FDMA_PCTRL_STEN,
@@ -1447,11 +1436,12 @@ static void gswip_phylink_validate(struct dsa_switch *ds, int port,
phylink_set(mask, Pause);
phylink_set(mask, Asym_Pause);
- /* With the exclusion of MII and Reverse MII, we support Gigabit,
- * including Half duplex
+ /* With the exclusion of MII, Reverse MII and Reduced MII, we
+ * support Gigabit, including Half duplex
*/
if (state->interface != PHY_INTERFACE_MODE_MII &&
- state->interface != PHY_INTERFACE_MODE_REVMII) {
+ state->interface != PHY_INTERFACE_MODE_REVMII &&
+ state->interface != PHY_INTERFACE_MODE_RMII) {
phylink_set(mask, 1000baseT_Full);
phylink_set(mask, 1000baseT_Half);
}
@@ -1541,9 +1531,7 @@ static void gswip_phylink_mac_link_up(struct dsa_switch *ds, int port,
{
struct gswip_priv *priv = ds->priv;
- /* Enable the xMII interface only for the external PHY */
- if (interface != PHY_INTERFACE_MODE_INTERNAL)
- gswip_mii_mask_cfg(priv, 0, GSWIP_MII_CFG_EN, port);
+ gswip_mii_mask_cfg(priv, 0, GSWIP_MII_CFG_EN, port);
}
static void gswip_get_strings(struct dsa_switch *ds, int port, u32 stringset,
diff --git a/drivers/net/dsa/mv88e6xxx/global1_vtu.c b/drivers/net/dsa/mv88e6xxx/global1_vtu.c
index 66ddf67b8737..7b96396be609 100644
--- a/drivers/net/dsa/mv88e6xxx/global1_vtu.c
+++ b/drivers/net/dsa/mv88e6xxx/global1_vtu.c
@@ -351,6 +351,10 @@ int mv88e6250_g1_vtu_getnext(struct mv88e6xxx_chip *chip,
if (err)
return err;
+ err = mv88e6185_g1_stu_data_read(chip, entry);
+ if (err)
+ return err;
+
/* VTU DBNum[3:0] are located in VTU Operation 3:0
* VTU DBNum[5:4] are located in VTU Operation 9:8
*/
diff --git a/drivers/net/ethernet/aquantia/Kconfig b/drivers/net/ethernet/aquantia/Kconfig
index efb33c078a3c..cec2018c84a9 100644
--- a/drivers/net/ethernet/aquantia/Kconfig
+++ b/drivers/net/ethernet/aquantia/Kconfig
@@ -19,7 +19,6 @@ if NET_VENDOR_AQUANTIA
config AQTION
tristate "aQuantia AQtion(tm) Support"
depends on PCI
- depends on X86_64 || ARM64 || COMPILE_TEST
depends on MACSEC || MACSEC=n
help
This enables the support for the aQuantia AQtion(tm) Ethernet card.
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 0fdd19d99d99..0404aafd5ce5 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -2503,8 +2503,10 @@ static int bcm_sysport_probe(struct platform_device *pdev)
priv = netdev_priv(dev);
priv->clk = devm_clk_get_optional(&pdev->dev, "sw_sysport");
- if (IS_ERR(priv->clk))
- return PTR_ERR(priv->clk);
+ if (IS_ERR(priv->clk)) {
+ ret = PTR_ERR(priv->clk);
+ goto err_free_netdev;
+ }
/* Allocate number of TX rings */
priv->tx_rings = devm_kcalloc(&pdev->dev, txq,
@@ -2577,6 +2579,7 @@ static int bcm_sysport_probe(struct platform_device *pdev)
NETIF_F_HW_VLAN_CTAG_TX;
dev->hw_features |= dev->features;
dev->vlan_features |= dev->features;
+ dev->max_mtu = UMAC_MAX_MTU_SIZE;
/* Request the WOL interrupt and advertise suspend if available */
priv->wol_irq_disabled = 1;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 4edd6f8e017e..d10e4f85dd11 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -6790,8 +6790,10 @@ static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
ctx->tqm_fp_rings_count = resp->tqm_fp_rings_count;
if (!ctx->tqm_fp_rings_count)
ctx->tqm_fp_rings_count = bp->max_q;
+ else if (ctx->tqm_fp_rings_count > BNXT_MAX_TQM_FP_RINGS)
+ ctx->tqm_fp_rings_count = BNXT_MAX_TQM_FP_RINGS;
- tqm_rings = ctx->tqm_fp_rings_count + 1;
+ tqm_rings = ctx->tqm_fp_rings_count + BNXT_MAX_TQM_SP_RINGS;
ctx_pg = kcalloc(tqm_rings, sizeof(*ctx_pg), GFP_KERNEL);
if (!ctx_pg) {
kfree(ctx);
@@ -6925,7 +6927,8 @@ static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables)
pg_attr = &req.tqm_sp_pg_size_tqm_sp_lvl,
pg_dir = &req.tqm_sp_page_dir,
ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP;
- i < 9; i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) {
+ i < BNXT_MAX_TQM_RINGS;
+ i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) {
if (!(enables & ena))
continue;
@@ -12887,10 +12890,10 @@ static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
*/
static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
{
+ pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
struct net_device *netdev = pci_get_drvdata(pdev);
struct bnxt *bp = netdev_priv(netdev);
int err = 0, off;
- pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
netdev_info(bp->dev, "PCI Slot Reset\n");
@@ -12919,22 +12922,8 @@ static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
pci_save_state(pdev);
err = bnxt_hwrm_func_reset(bp);
- if (!err) {
- err = bnxt_hwrm_func_qcaps(bp);
- if (!err && netif_running(netdev))
- err = bnxt_open(netdev);
- }
- bnxt_ulp_start(bp, err);
- if (!err) {
- bnxt_reenable_sriov(bp);
+ if (!err)
result = PCI_ERS_RESULT_RECOVERED;
- }
- }
-
- if (result != PCI_ERS_RESULT_RECOVERED) {
- if (netif_running(netdev))
- dev_close(netdev);
- pci_disable_device(pdev);
}
rtnl_unlock();
@@ -12952,10 +12941,21 @@ static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
static void bnxt_io_resume(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
+ struct bnxt *bp = netdev_priv(netdev);
+ int err;
+ netdev_info(bp->dev, "PCI Slot Resume\n");
rtnl_lock();
- netif_device_attach(netdev);
+ err = bnxt_hwrm_func_qcaps(bp);
+ if (!err && netif_running(netdev))
+ err = bnxt_open(netdev);
+
+ bnxt_ulp_start(bp, err);
+ if (!err) {
+ bnxt_reenable_sriov(bp);
+ netif_device_attach(netdev);
+ }
rtnl_unlock();
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 950ea26ae0d2..51996c85547e 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -1436,6 +1436,11 @@ struct bnxt_ctx_pg_info {
struct bnxt_ctx_pg_info **ctx_pg_tbl;
};
+#define BNXT_MAX_TQM_SP_RINGS 1
+#define BNXT_MAX_TQM_FP_RINGS 8
+#define BNXT_MAX_TQM_RINGS \
+ (BNXT_MAX_TQM_SP_RINGS + BNXT_MAX_TQM_FP_RINGS)
+
struct bnxt_ctx_mem_info {
u32 qp_max_entries;
u16 qp_min_qp1_entries;
@@ -1474,7 +1479,7 @@ struct bnxt_ctx_mem_info {
struct bnxt_ctx_pg_info stat_mem;
struct bnxt_ctx_pg_info mrav_mem;
struct bnxt_ctx_pg_info tim_mem;
- struct bnxt_ctx_pg_info *tqm_mem[9];
+ struct bnxt_ctx_pg_info *tqm_mem[BNXT_MAX_TQM_RINGS];
};
struct bnxt_fw_health {
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 9ff79d5d14c4..2f8b193a772d 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -2532,7 +2532,7 @@ int bnxt_flash_package_from_fw_obj(struct net_device *dev, const struct firmware
if (rc && ((struct hwrm_err_output *)&resp)->cmd_err ==
NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR) {
- install.flags |=
+ install.flags =
cpu_to_le16(NVM_INSTALL_UPDATE_REQ_FLAGS_ALLOWED_TO_DEFRAG);
rc = _hwrm_send_message_silent(bp, &install,
@@ -2546,6 +2546,7 @@ int bnxt_flash_package_from_fw_obj(struct net_device *dev, const struct firmware
* UPDATE directory and try the flash again
*/
defrag_attempted = true;
+ install.flags = 0;
rc = __bnxt_flash_nvram(bp->dev,
BNX_DIR_TYPE_UPDATE,
BNX_DIR_ORDINAL_FIRST,
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
index 8c8368c2f335..64dbbb04b043 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
@@ -222,8 +222,12 @@ int bnxt_get_ulp_msix_base(struct bnxt *bp)
int bnxt_get_ulp_stat_ctxs(struct bnxt *bp)
{
- if (bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP))
- return BNXT_MIN_ROCE_STAT_CTXS;
+ if (bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP)) {
+ struct bnxt_en_dev *edev = bp->edev;
+
+ if (edev->ulp_tbl[BNXT_ROCE_ULP].msix_requested)
+ return BNXT_MIN_ROCE_STAT_CTXS;
+ }
return 0;
}
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index d5d910916c2e..814a5b10141d 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -467,7 +467,7 @@ static void macb_set_tx_clk(struct macb *bp, int speed)
{
long ferr, rate, rate_rounded;
- if (!bp->tx_clk || !(bp->caps & MACB_CAPS_CLK_HW_CHG))
+ if (!bp->tx_clk || (bp->caps & MACB_CAPS_CLK_HW_CHG))
return;
switch (speed) {
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_tcb.h b/drivers/net/ethernet/chelsio/cxgb4/t4_tcb.h
index 92473dda55d9..22a0220123ad 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_tcb.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_tcb.h
@@ -40,6 +40,13 @@
#define TCB_L2T_IX_M 0xfffULL
#define TCB_L2T_IX_V(x) ((x) << TCB_L2T_IX_S)
+#define TCB_T_FLAGS_W 1
+#define TCB_T_FLAGS_S 0
+#define TCB_T_FLAGS_M 0xffffffffffffffffULL
+#define TCB_T_FLAGS_V(x) ((__u64)(x) << TCB_T_FLAGS_S)
+
+#define TCB_FIELD_COOKIE_TFLAG 1
+
#define TCB_SMAC_SEL_W 0
#define TCB_SMAC_SEL_S 24
#define TCB_SMAC_SEL_M 0xffULL
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls.h b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls.h
index 72bb123d53db..9e2378013642 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls.h
+++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls.h
@@ -575,7 +575,11 @@ int send_tx_flowc_wr(struct sock *sk, int compl,
void chtls_tcp_push(struct sock *sk, int flags);
int chtls_push_frames(struct chtls_sock *csk, int comp);
int chtls_set_tcb_tflag(struct sock *sk, unsigned int bit_pos, int val);
+void chtls_set_tcb_field_rpl_skb(struct sock *sk, u16 word,
+ u64 mask, u64 val, u8 cookie,
+ int through_l2t);
int chtls_setkey(struct chtls_sock *csk, u32 keylen, u32 mode, int cipher_type);
+void chtls_set_quiesce_ctrl(struct sock *sk, int val);
void skb_entail(struct sock *sk, struct sk_buff *skb, int flags);
unsigned int keyid_to_addr(int start_addr, int keyid);
void free_tls_keyid(struct sock *sk);
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c
index a0e0d8a83681..e5cfbe196ba6 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c
@@ -32,6 +32,7 @@
#include "chtls.h"
#include "chtls_cm.h"
#include "clip_tbl.h"
+#include "t4_tcb.h"
/*
* State transitions and actions for close. Note that if we are in SYN_SENT
@@ -267,7 +268,9 @@ static void chtls_send_reset(struct sock *sk, int mode, struct sk_buff *skb)
if (sk->sk_state != TCP_SYN_RECV)
chtls_send_abort(sk, mode, skb);
else
- goto out;
+ chtls_set_tcb_field_rpl_skb(sk, TCB_T_FLAGS_W,
+ TCB_T_FLAGS_V(TCB_T_FLAGS_M), 0,
+ TCB_FIELD_COOKIE_TFLAG, 1);
return;
out:
@@ -621,7 +624,7 @@ static void chtls_reset_synq(struct listen_ctx *listen_ctx)
while (!skb_queue_empty(&listen_ctx->synq)) {
struct chtls_sock *csk =
- container_of((struct synq *)__skb_dequeue
+ container_of((struct synq *)skb_peek
(&listen_ctx->synq), struct chtls_sock, synq);
struct sock *child = csk->sk;
@@ -1109,6 +1112,7 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
const struct cpl_pass_accept_req *req,
struct chtls_dev *cdev)
{
+ struct adapter *adap = pci_get_drvdata(cdev->pdev);
struct neighbour *n = NULL;
struct inet_sock *newinet;
const struct iphdr *iph;
@@ -1118,9 +1122,10 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
struct dst_entry *dst;
struct tcp_sock *tp;
struct sock *newsk;
+ bool found = false;
u16 port_id;
int rxq_idx;
- int step;
+ int step, i;
iph = (const struct iphdr *)network_hdr;
newsk = tcp_create_openreq_child(lsk, oreq, cdev->askb);
@@ -1152,7 +1157,7 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
n = dst_neigh_lookup(dst, &ip6h->saddr);
#endif
}
- if (!n)
+ if (!n || !n->dev)
goto free_sk;
ndev = n->dev;
@@ -1161,6 +1166,13 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
if (is_vlan_dev(ndev))
ndev = vlan_dev_real_dev(ndev);
+ for_each_port(adap, i)
+ if (cdev->ports[i] == ndev)
+ found = true;
+
+ if (!found)
+ goto free_dst;
+
port_id = cxgb4_port_idx(ndev);
csk = chtls_sock_create(cdev);
@@ -1238,6 +1250,7 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
free_csk:
chtls_sock_release(&csk->kref);
free_dst:
+ neigh_release(n);
dst_release(dst);
free_sk:
inet_csk_prepare_forced_close(newsk);
@@ -1387,7 +1400,7 @@ static void chtls_pass_accept_request(struct sock *sk,
newsk = chtls_recv_sock(sk, oreq, network_hdr, req, cdev);
if (!newsk)
- goto free_oreq;
+ goto reject;
if (chtls_get_module(newsk))
goto reject;
@@ -1403,8 +1416,6 @@ static void chtls_pass_accept_request(struct sock *sk,
kfree_skb(skb);
return;
-free_oreq:
- chtls_reqsk_free(oreq);
reject:
mk_tid_release(reply_skb, 0, tid);
cxgb4_ofld_send(cdev->lldi->ports[0], reply_skb);
@@ -1589,6 +1600,11 @@ static int chtls_pass_establish(struct chtls_dev *cdev, struct sk_buff *skb)
sk_wake_async(sk, 0, POLL_OUT);
data = lookup_stid(cdev->tids, stid);
+ if (!data) {
+ /* listening server close */
+ kfree_skb(skb);
+ goto unlock;
+ }
lsk = ((struct listen_ctx *)data)->lsk;
bh_lock_sock(lsk);
@@ -1936,6 +1952,8 @@ static void chtls_close_con_rpl(struct sock *sk, struct sk_buff *skb)
else if (tcp_sk(sk)->linger2 < 0 &&
!csk_flag_nochk(csk, CSK_ABORT_SHUTDOWN))
chtls_abort_conn(sk, skb);
+ else if (csk_flag_nochk(csk, CSK_TX_DATA_SENT))
+ chtls_set_quiesce_ctrl(sk, 0);
break;
default:
pr_info("close_con_rpl in bad state %d\n", sk->sk_state);
@@ -1997,39 +2015,6 @@ static void t4_defer_reply(struct sk_buff *skb, struct chtls_dev *cdev,
spin_unlock_bh(&cdev->deferq.lock);
}
-static void send_abort_rpl(struct sock *sk, struct sk_buff *skb,
- struct chtls_dev *cdev, int status, int queue)
-{
- struct cpl_abort_req_rss *req = cplhdr(skb);
- struct sk_buff *reply_skb;
- struct chtls_sock *csk;
-
- csk = rcu_dereference_sk_user_data(sk);
-
- reply_skb = alloc_skb(sizeof(struct cpl_abort_rpl),
- GFP_KERNEL);
-
- if (!reply_skb) {
- req->status = (queue << 1);
- t4_defer_reply(skb, cdev, send_defer_abort_rpl);
- return;
- }
-
- set_abort_rpl_wr(reply_skb, GET_TID(req), status);
- kfree_skb(skb);
-
- set_wr_txq(reply_skb, CPL_PRIORITY_DATA, queue);
- if (csk_conn_inline(csk)) {
- struct l2t_entry *e = csk->l2t_entry;
-
- if (e && sk->sk_state != TCP_SYN_RECV) {
- cxgb4_l2t_send(csk->egress_dev, reply_skb, e);
- return;
- }
- }
- cxgb4_ofld_send(cdev->lldi->ports[0], reply_skb);
-}
-
static void chtls_send_abort_rpl(struct sock *sk, struct sk_buff *skb,
struct chtls_dev *cdev,
int status, int queue)
@@ -2078,9 +2063,9 @@ static void bl_abort_syn_rcv(struct sock *lsk, struct sk_buff *skb)
queue = csk->txq_idx;
skb->sk = NULL;
+ chtls_send_abort_rpl(child, skb, BLOG_SKB_CB(skb)->cdev,
+ CPL_ABORT_NO_RST, queue);
do_abort_syn_rcv(child, lsk);
- send_abort_rpl(child, skb, BLOG_SKB_CB(skb)->cdev,
- CPL_ABORT_NO_RST, queue);
}
static int abort_syn_rcv(struct sock *sk, struct sk_buff *skb)
@@ -2110,8 +2095,8 @@ static int abort_syn_rcv(struct sock *sk, struct sk_buff *skb)
if (!sock_owned_by_user(psk)) {
int queue = csk->txq_idx;
+ chtls_send_abort_rpl(sk, skb, cdev, CPL_ABORT_NO_RST, queue);
do_abort_syn_rcv(sk, psk);
- send_abort_rpl(sk, skb, cdev, CPL_ABORT_NO_RST, queue);
} else {
skb->sk = sk;
BLOG_SKB_CB(skb)->backlog_rcv = bl_abort_syn_rcv;
@@ -2129,9 +2114,6 @@ static void chtls_abort_req_rss(struct sock *sk, struct sk_buff *skb)
int queue = csk->txq_idx;
if (is_neg_adv(req->status)) {
- if (sk->sk_state == TCP_SYN_RECV)
- chtls_set_tcb_tflag(sk, 0, 0);
-
kfree_skb(skb);
return;
}
@@ -2158,12 +2140,12 @@ static void chtls_abort_req_rss(struct sock *sk, struct sk_buff *skb)
if (sk->sk_state == TCP_SYN_RECV && !abort_syn_rcv(sk, skb))
return;
- chtls_release_resources(sk);
- chtls_conn_done(sk);
}
chtls_send_abort_rpl(sk, skb, BLOG_SKB_CB(skb)->cdev,
rst_status, queue);
+ chtls_release_resources(sk);
+ chtls_conn_done(sk);
}
static void chtls_abort_rpl_rss(struct sock *sk, struct sk_buff *skb)
@@ -2315,6 +2297,28 @@ static int chtls_wr_ack(struct chtls_dev *cdev, struct sk_buff *skb)
return 0;
}
+static int chtls_set_tcb_rpl(struct chtls_dev *cdev, struct sk_buff *skb)
+{
+ struct cpl_set_tcb_rpl *rpl = cplhdr(skb) + RSS_HDR;
+ unsigned int hwtid = GET_TID(rpl);
+ struct sock *sk;
+
+ sk = lookup_tid(cdev->tids, hwtid);
+
+ /* return EINVAL if socket doesn't exist */
+ if (!sk)
+ return -EINVAL;
+
+ /* Reusing the skb as size of cpl_set_tcb_field structure
+ * is greater than cpl_abort_req
+ */
+ if (TCB_COOKIE_G(rpl->cookie) == TCB_FIELD_COOKIE_TFLAG)
+ chtls_send_abort(sk, CPL_ABORT_SEND_RST, NULL);
+
+ kfree_skb(skb);
+ return 0;
+}
+
chtls_handler_func chtls_handlers[NUM_CPL_CMDS] = {
[CPL_PASS_OPEN_RPL] = chtls_pass_open_rpl,
[CPL_CLOSE_LISTSRV_RPL] = chtls_close_listsrv_rpl,
@@ -2327,5 +2331,6 @@ chtls_handler_func chtls_handlers[NUM_CPL_CMDS] = {
[CPL_CLOSE_CON_RPL] = chtls_conn_cpl,
[CPL_ABORT_REQ_RSS] = chtls_conn_cpl,
[CPL_ABORT_RPL_RSS] = chtls_conn_cpl,
- [CPL_FW4_ACK] = chtls_wr_ack,
+ [CPL_FW4_ACK] = chtls_wr_ack,
+ [CPL_SET_TCB_RPL] = chtls_set_tcb_rpl,
};
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_hw.c b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_hw.c
index a4fb463af22a..1e67140b0f80 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_hw.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_hw.c
@@ -88,6 +88,24 @@ static int chtls_set_tcb_field(struct sock *sk, u16 word, u64 mask, u64 val)
return ret < 0 ? ret : 0;
}
+void chtls_set_tcb_field_rpl_skb(struct sock *sk, u16 word,
+ u64 mask, u64 val, u8 cookie,
+ int through_l2t)
+{
+ struct sk_buff *skb;
+ unsigned int wrlen;
+
+ wrlen = sizeof(struct cpl_set_tcb_field) + sizeof(struct ulptx_idata);
+ wrlen = roundup(wrlen, 16);
+
+ skb = alloc_skb(wrlen, GFP_KERNEL | __GFP_NOFAIL);
+ if (!skb)
+ return;
+
+ __set_tcb_field(sk, skb, word, mask, val, cookie, 0);
+ send_or_defer(sk, tcp_sk(sk), skb, through_l2t);
+}
+
/*
* Set one of the t_flags bits in the TCB.
*/
@@ -113,6 +131,29 @@ static int chtls_set_tcb_quiesce(struct sock *sk, int val)
TF_RX_QUIESCE_V(val));
}
+void chtls_set_quiesce_ctrl(struct sock *sk, int val)
+{
+ struct chtls_sock *csk;
+ struct sk_buff *skb;
+ unsigned int wrlen;
+ int ret;
+
+ wrlen = sizeof(struct cpl_set_tcb_field) + sizeof(struct ulptx_idata);
+ wrlen = roundup(wrlen, 16);
+
+ skb = alloc_skb(wrlen, GFP_ATOMIC);
+ if (!skb)
+ return;
+
+ csk = rcu_dereference_sk_user_data(sk);
+
+ __set_tcb_field(sk, skb, 1, TF_RX_QUIESCE_V(1), 0, 0, 1);
+ set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->port_id);
+ ret = cxgb4_ofld_send(csk->egress_dev, skb);
+ if (ret < 0)
+ kfree_skb(skb);
+}
+
/* TLS Key bitmap processing */
int chtls_init_kmap(struct chtls_dev *cdev, struct cxgb4_lld_info *lldi)
{
diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c
index 0981fe9652e5..3d9b0b161e24 100644
--- a/drivers/net/ethernet/ethoc.c
+++ b/drivers/net/ethernet/ethoc.c
@@ -1211,7 +1211,7 @@ static int ethoc_probe(struct platform_device *pdev)
ret = mdiobus_register(priv->mdio);
if (ret) {
dev_err(&netdev->dev, "failed to register MDIO bus\n");
- goto free2;
+ goto free3;
}
ret = ethoc_mdio_probe(netdev);
@@ -1243,6 +1243,7 @@ error2:
netif_napi_del(&priv->napi);
error:
mdiobus_unregister(priv->mdio);
+free3:
mdiobus_free(priv->mdio);
free2:
clk_disable_unprepare(priv->clk);
diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c
index c8e5d889bd81..21de56345503 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c
@@ -223,3 +223,4 @@ static struct platform_driver fs_enet_bb_mdio_driver = {
};
module_platform_driver(fs_enet_bb_mdio_driver);
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
index 8b51ee142fa3..152f4d83765a 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
@@ -224,3 +224,4 @@ static struct platform_driver fs_enet_fec_mdio_driver = {
};
module_platform_driver(fs_enet_fec_mdio_driver);
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index ba8869c3d891..6d853f018d53 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -3889,6 +3889,7 @@ static int ucc_geth_probe(struct platform_device* ofdev)
INIT_WORK(&ugeth->timeout_work, ucc_geth_timeout_work);
netif_napi_add(dev, &ugeth->napi, ucc_geth_poll, 64);
dev->mtu = 1500;
+ dev->max_mtu = 1518;
ugeth->msg_enable = netif_msg_init(debug.msg_enable, UGETH_MSG_DEFAULT);
ugeth->phy_interface = phy_interface;
@@ -3934,12 +3935,12 @@ static int ucc_geth_remove(struct platform_device* ofdev)
struct device_node *np = ofdev->dev.of_node;
unregister_netdev(dev);
- free_netdev(dev);
ucc_geth_memclean(ugeth);
if (of_phy_is_fixed_link(np))
of_phy_deregister_fixed_link(np);
of_node_put(ugeth->ug_info->tbi_node);
of_node_put(ugeth->ug_info->phy_node);
+ free_netdev(dev);
return 0;
}
diff --git a/drivers/net/ethernet/freescale/ucc_geth.h b/drivers/net/ethernet/freescale/ucc_geth.h
index 1a9bdf66a7d8..11d4bf5dc21f 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.h
+++ b/drivers/net/ethernet/freescale/ucc_geth.h
@@ -575,7 +575,14 @@ struct ucc_geth_tx_global_pram {
u32 vtagtable[0x8]; /* 8 4-byte VLAN tags */
u32 tqptr; /* a base pointer to the Tx Queues Memory
Region */
- u8 res2[0x80 - 0x74];
+ u8 res2[0x78 - 0x74];
+ u64 snums_en;
+ u32 l2l3baseptr; /* top byte consists of a few other bit fields */
+
+ u16 mtu[8];
+ u8 res3[0xa8 - 0x94];
+ u32 wrrtablebase; /* top byte is reserved */
+ u8 res4[0xc0 - 0xac];
} __packed;
/* structure representing Extended Filtering Global Parameters in PRAM */
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
index 7165da0ee9aa..a6e3f07caf99 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
@@ -415,6 +415,10 @@ static void __lb_other_process(struct hns_nic_ring_data *ring_data,
/* for mutl buffer*/
new_skb = skb_copy(skb, GFP_ATOMIC);
dev_kfree_skb_any(skb);
+ if (!new_skb) {
+ netdev_err(ndev, "skb alloc failed\n");
+ return;
+ }
skb = new_skb;
check_ok = 0;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
index fb5e8842983c..33defa4c180a 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
@@ -169,7 +169,7 @@ struct hclgevf_mbx_arq_ring {
#define hclge_mbx_ring_ptr_move_crq(crq) \
(crq->next_to_use = (crq->next_to_use + 1) % crq->desc_num)
#define hclge_mbx_tail_ptr_move_arq(arq) \
- (arq.tail = (arq.tail + 1) % HCLGE_MBX_MAX_ARQ_MSG_SIZE)
+ (arq.tail = (arq.tail + 1) % HCLGE_MBX_MAX_ARQ_MSG_NUM)
#define hclge_mbx_head_ptr_move_arq(arq) \
- (arq.head = (arq.head + 1) % HCLGE_MBX_MAX_ARQ_MSG_SIZE)
+ (arq.head = (arq.head + 1) % HCLGE_MBX_MAX_ARQ_MSG_NUM)
#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index e6f37f91c489..c242883fea5d 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -752,7 +752,8 @@ static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
- if (hdev->hw.mac.phydev) {
+ if (hdev->hw.mac.phydev && hdev->hw.mac.phydev->drv &&
+ hdev->hw.mac.phydev->drv->set_loopback) {
count += 1;
handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK;
}
@@ -4537,8 +4538,8 @@ static int hclge_set_rss_tuple(struct hnae3_handle *handle,
req->ipv4_sctp_en = tuple_sets;
break;
case SCTP_V6_FLOW:
- if ((nfc->data & RXH_L4_B_0_1) ||
- (nfc->data & RXH_L4_B_2_3))
+ if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 &&
+ (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)))
return -EINVAL;
req->ipv6_sctp_en = tuple_sets;
@@ -4730,6 +4731,8 @@ static void hclge_rss_init_cfg(struct hclge_dev *hdev)
vport[i].rss_tuple_sets.ipv6_udp_en =
HCLGE_RSS_INPUT_TUPLE_OTHER;
vport[i].rss_tuple_sets.ipv6_sctp_en =
+ hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 ?
+ HCLGE_RSS_INPUT_TUPLE_SCTP_NO_PORT :
HCLGE_RSS_INPUT_TUPLE_SCTP;
vport[i].rss_tuple_sets.ipv6_fragment_en =
HCLGE_RSS_INPUT_TUPLE_OTHER;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
index 50a294dfaff5..ca46bc9110d7 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
@@ -107,6 +107,8 @@
#define HCLGE_D_IP_BIT BIT(2)
#define HCLGE_S_IP_BIT BIT(3)
#define HCLGE_V_TAG_BIT BIT(4)
+#define HCLGE_RSS_INPUT_TUPLE_SCTP_NO_PORT \
+ (HCLGE_D_IP_BIT | HCLGE_S_IP_BIT | HCLGE_V_TAG_BIT)
#define HCLGE_RSS_TC_SIZE_0 1
#define HCLGE_RSS_TC_SIZE_1 2
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index 145757cb70f9..674b3a22e91f 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -917,8 +917,8 @@ static int hclgevf_set_rss_tuple(struct hnae3_handle *handle,
req->ipv4_sctp_en = tuple_sets;
break;
case SCTP_V6_FLOW:
- if ((nfc->data & RXH_L4_B_0_1) ||
- (nfc->data & RXH_L4_B_2_3))
+ if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 &&
+ (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)))
return -EINVAL;
req->ipv6_sctp_en = tuple_sets;
@@ -2502,7 +2502,10 @@ static void hclgevf_rss_init_cfg(struct hclgevf_dev *hdev)
tuple_sets->ipv4_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
tuple_sets->ipv6_tcp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
tuple_sets->ipv6_udp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
- tuple_sets->ipv6_sctp_en = HCLGEVF_RSS_INPUT_TUPLE_SCTP;
+ tuple_sets->ipv6_sctp_en =
+ hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 ?
+ HCLGEVF_RSS_INPUT_TUPLE_SCTP_NO_PORT :
+ HCLGEVF_RSS_INPUT_TUPLE_SCTP;
tuple_sets->ipv6_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
index 1b183bc35604..f6d817a3edcb 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
@@ -122,6 +122,8 @@
#define HCLGEVF_D_IP_BIT BIT(2)
#define HCLGEVF_S_IP_BIT BIT(3)
#define HCLGEVF_V_TAG_BIT BIT(4)
+#define HCLGEVF_RSS_INPUT_TUPLE_SCTP_NO_PORT \
+ (HCLGEVF_D_IP_BIT | HCLGEVF_S_IP_BIT | HCLGEVF_V_TAG_BIT)
#define HCLGEVF_STATS_TIMER_INTERVAL 36U
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index f302504faa8a..9778c83150f1 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -955,6 +955,7 @@ static void release_resources(struct ibmvnic_adapter *adapter)
release_rx_pools(adapter);
release_napi(adapter);
+ release_login_buffer(adapter);
release_login_rsp_buffer(adapter);
}
@@ -2341,8 +2342,7 @@ static void __ibmvnic_reset(struct work_struct *work)
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(60 * HZ);
}
- } else if (!(rwi->reset_reason == VNIC_RESET_FATAL &&
- adapter->from_passive_init)) {
+ } else {
rc = do_reset(adapter, rwi, reset_state);
}
kfree(rwi);
@@ -2981,9 +2981,7 @@ static int reset_one_sub_crq_queue(struct ibmvnic_adapter *adapter,
int rc;
if (!scrq) {
- netdev_dbg(adapter->netdev,
- "Invalid scrq reset. irq (%d) or msgs (%p).\n",
- scrq->irq, scrq->msgs);
+ netdev_dbg(adapter->netdev, "Invalid scrq reset.\n");
return -EINVAL;
}
@@ -3873,7 +3871,9 @@ static int send_login(struct ibmvnic_adapter *adapter)
return -1;
}
+ release_login_buffer(adapter);
release_login_rsp_buffer(adapter);
+
client_data_len = vnic_client_data_len(adapter);
buffer_size =
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index ba7a0f8f6937..5b2143f4b1f8 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -436,6 +436,7 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca);
#define FLAG2_DFLT_CRC_STRIPPING BIT(12)
#define FLAG2_CHECK_RX_HWTSTAMP BIT(13)
#define FLAG2_CHECK_SYSTIM_OVERFLOW BIT(14)
+#define FLAG2_ENABLE_S0IX_FLOWS BIT(15)
#define E1000_RX_DESC_PS(R, i) \
(&(((union e1000_rx_desc_packet_split *)((R).desc))[i]))
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index 03215b0aee4b..06442e6bef73 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -23,6 +23,13 @@ struct e1000_stats {
int stat_offset;
};
+static const char e1000e_priv_flags_strings[][ETH_GSTRING_LEN] = {
+#define E1000E_PRIV_FLAGS_S0IX_ENABLED BIT(0)
+ "s0ix-enabled",
+};
+
+#define E1000E_PRIV_FLAGS_STR_LEN ARRAY_SIZE(e1000e_priv_flags_strings)
+
#define E1000_STAT(str, m) { \
.stat_string = str, \
.type = E1000_STATS, \
@@ -1776,6 +1783,8 @@ static int e1000e_get_sset_count(struct net_device __always_unused *netdev,
return E1000_TEST_LEN;
case ETH_SS_STATS:
return E1000_STATS_LEN;
+ case ETH_SS_PRIV_FLAGS:
+ return E1000E_PRIV_FLAGS_STR_LEN;
default:
return -EOPNOTSUPP;
}
@@ -2097,6 +2106,10 @@ static void e1000_get_strings(struct net_device __always_unused *netdev,
p += ETH_GSTRING_LEN;
}
break;
+ case ETH_SS_PRIV_FLAGS:
+ memcpy(data, e1000e_priv_flags_strings,
+ E1000E_PRIV_FLAGS_STR_LEN * ETH_GSTRING_LEN);
+ break;
}
}
@@ -2305,6 +2318,37 @@ static int e1000e_get_ts_info(struct net_device *netdev,
return 0;
}
+static u32 e1000e_get_priv_flags(struct net_device *netdev)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ u32 priv_flags = 0;
+
+ if (adapter->flags2 & FLAG2_ENABLE_S0IX_FLOWS)
+ priv_flags |= E1000E_PRIV_FLAGS_S0IX_ENABLED;
+
+ return priv_flags;
+}
+
+static int e1000e_set_priv_flags(struct net_device *netdev, u32 priv_flags)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ unsigned int flags2 = adapter->flags2;
+
+ flags2 &= ~FLAG2_ENABLE_S0IX_FLOWS;
+ if (priv_flags & E1000E_PRIV_FLAGS_S0IX_ENABLED) {
+ struct e1000_hw *hw = &adapter->hw;
+
+ if (hw->mac.type < e1000_pch_cnp)
+ return -EINVAL;
+ flags2 |= FLAG2_ENABLE_S0IX_FLOWS;
+ }
+
+ if (flags2 != adapter->flags2)
+ adapter->flags2 = flags2;
+
+ return 0;
+}
+
static const struct ethtool_ops e1000_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS,
.get_drvinfo = e1000_get_drvinfo,
@@ -2336,6 +2380,8 @@ static const struct ethtool_ops e1000_ethtool_ops = {
.set_eee = e1000e_set_eee,
.get_link_ksettings = e1000_get_link_ksettings,
.set_link_ksettings = e1000_set_link_ksettings,
+ .get_priv_flags = e1000e_get_priv_flags,
+ .set_priv_flags = e1000e_set_priv_flags,
};
void e1000e_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index 9aa6fad8ed47..6fb46682b058 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -1240,6 +1240,9 @@ static s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force)
return 0;
if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID) {
+ struct e1000_adapter *adapter = hw->adapter;
+ bool firmware_bug = false;
+
if (force) {
/* Request ME un-configure ULP mode in the PHY */
mac_reg = er32(H2ME);
@@ -1248,16 +1251,24 @@ static s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force)
ew32(H2ME, mac_reg);
}
- /* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
+ /* Poll up to 2.5 seconds for ME to clear ULP_CFG_DONE.
+ * If this takes more than 1 second, show a warning indicating a
+ * firmware bug
+ */
while (er32(FWSM) & E1000_FWSM_ULP_CFG_DONE) {
- if (i++ == 30) {
+ if (i++ == 250) {
ret_val = -E1000_ERR_PHY;
goto out;
}
+ if (i > 100 && !firmware_bug)
+ firmware_bug = true;
usleep_range(10000, 11000);
}
- e_dbg("ULP_CONFIG_DONE cleared after %dmsec\n", i * 10);
+ if (firmware_bug)
+ e_warn("ULP_CONFIG_DONE took %dmsec. This is a firmware bug\n", i * 10);
+ else
+ e_dbg("ULP_CONFIG_DONE cleared after %dmsec\n", i * 10);
if (force) {
mac_reg = er32(H2ME);
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 128ab6898070..e9b82c209c2d 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -103,45 +103,6 @@ static const struct e1000_reg_info e1000_reg_info_tbl[] = {
{0, NULL}
};
-struct e1000e_me_supported {
- u16 device_id; /* supported device ID */
-};
-
-static const struct e1000e_me_supported me_supported[] = {
- {E1000_DEV_ID_PCH_LPT_I217_LM},
- {E1000_DEV_ID_PCH_LPTLP_I218_LM},
- {E1000_DEV_ID_PCH_I218_LM2},
- {E1000_DEV_ID_PCH_I218_LM3},
- {E1000_DEV_ID_PCH_SPT_I219_LM},
- {E1000_DEV_ID_PCH_SPT_I219_LM2},
- {E1000_DEV_ID_PCH_LBG_I219_LM3},
- {E1000_DEV_ID_PCH_SPT_I219_LM4},
- {E1000_DEV_ID_PCH_SPT_I219_LM5},
- {E1000_DEV_ID_PCH_CNP_I219_LM6},
- {E1000_DEV_ID_PCH_CNP_I219_LM7},
- {E1000_DEV_ID_PCH_ICP_I219_LM8},
- {E1000_DEV_ID_PCH_ICP_I219_LM9},
- {E1000_DEV_ID_PCH_CMP_I219_LM10},
- {E1000_DEV_ID_PCH_CMP_I219_LM11},
- {E1000_DEV_ID_PCH_CMP_I219_LM12},
- {E1000_DEV_ID_PCH_TGP_I219_LM13},
- {E1000_DEV_ID_PCH_TGP_I219_LM14},
- {E1000_DEV_ID_PCH_TGP_I219_LM15},
- {0}
-};
-
-static bool e1000e_check_me(u16 device_id)
-{
- struct e1000e_me_supported *id;
-
- for (id = (struct e1000e_me_supported *)me_supported;
- id->device_id; id++)
- if (device_id == id->device_id)
- return true;
-
- return false;
-}
-
/**
* __ew32_prepare - prepare to write to MAC CSR register on certain parts
* @hw: pointer to the HW structure
@@ -6962,7 +6923,6 @@ static __maybe_unused int e1000e_pm_suspend(struct device *dev)
struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev));
struct e1000_adapter *adapter = netdev_priv(netdev);
struct pci_dev *pdev = to_pci_dev(dev);
- struct e1000_hw *hw = &adapter->hw;
int rc;
e1000e_flush_lpic(pdev);
@@ -6970,13 +6930,13 @@ static __maybe_unused int e1000e_pm_suspend(struct device *dev)
e1000e_pm_freeze(dev);
rc = __e1000_shutdown(pdev, false);
- if (rc)
+ if (rc) {
e1000e_pm_thaw(dev);
-
- /* Introduce S0ix implementation */
- if (hw->mac.type >= e1000_pch_cnp &&
- !e1000e_check_me(hw->adapter->pdev->device))
- e1000e_s0ix_entry_flow(adapter);
+ } else {
+ /* Introduce S0ix implementation */
+ if (adapter->flags2 & FLAG2_ENABLE_S0IX_FLOWS)
+ e1000e_s0ix_entry_flow(adapter);
+ }
return rc;
}
@@ -6986,12 +6946,10 @@ static __maybe_unused int e1000e_pm_resume(struct device *dev)
struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev));
struct e1000_adapter *adapter = netdev_priv(netdev);
struct pci_dev *pdev = to_pci_dev(dev);
- struct e1000_hw *hw = &adapter->hw;
int rc;
/* Introduce S0ix implementation */
- if (hw->mac.type >= e1000_pch_cnp &&
- !e1000e_check_me(hw->adapter->pdev->device))
+ if (adapter->flags2 & FLAG2_ENABLE_S0IX_FLOWS)
e1000e_s0ix_exit_flow(adapter);
rc = __e1000_resume(pdev);
@@ -7655,6 +7613,9 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (!(adapter->flags & FLAG_HAS_AMT))
e1000e_get_hw_control(adapter);
+ if (hw->mac.type >= e1000_pch_cnp)
+ adapter->flags2 |= FLAG2_ENABLE_S0IX_FLOWS;
+
strlcpy(netdev->name, "eth%d", sizeof(netdev->name));
err = register_netdev(netdev);
if (err)
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index d231a2cdd98f..118473dfdcbd 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -120,6 +120,7 @@ enum i40e_state_t {
__I40E_RESET_INTR_RECEIVED,
__I40E_REINIT_REQUESTED,
__I40E_PF_RESET_REQUESTED,
+ __I40E_PF_RESET_AND_REBUILD_REQUESTED,
__I40E_CORE_RESET_REQUESTED,
__I40E_GLOBAL_RESET_REQUESTED,
__I40E_EMP_RESET_INTR_RECEIVED,
@@ -146,6 +147,8 @@ enum i40e_state_t {
};
#define I40E_PF_RESET_FLAG BIT_ULL(__I40E_PF_RESET_REQUESTED)
+#define I40E_PF_RESET_AND_REBUILD_FLAG \
+ BIT_ULL(__I40E_PF_RESET_AND_REBUILD_REQUESTED)
/* VSI state flags */
enum i40e_vsi_state_t {
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 1337686bd099..1db482d310c2 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -36,6 +36,8 @@ static int i40e_setup_misc_vector(struct i40e_pf *pf);
static void i40e_determine_queue_usage(struct i40e_pf *pf);
static int i40e_setup_pf_filter_control(struct i40e_pf *pf);
static void i40e_prep_for_reset(struct i40e_pf *pf, bool lock_acquired);
+static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit,
+ bool lock_acquired);
static int i40e_reset(struct i40e_pf *pf);
static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired);
static int i40e_setup_misc_vector_for_recovery_mode(struct i40e_pf *pf);
@@ -8536,6 +8538,14 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags, bool lock_acquired)
"FW LLDP is disabled\n" :
"FW LLDP is enabled\n");
+ } else if (reset_flags & I40E_PF_RESET_AND_REBUILD_FLAG) {
+ /* Request a PF Reset
+ *
+ * Resets PF and reinitializes PFs VSI.
+ */
+ i40e_prep_for_reset(pf, lock_acquired);
+ i40e_reset_and_rebuild(pf, true, lock_acquired);
+
} else if (reset_flags & BIT_ULL(__I40E_REINIT_REQUESTED)) {
int v;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 729c4f0d5ac5..21ee56420c3a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -1772,7 +1772,7 @@ int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
if (num_vfs) {
if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
- i40e_do_reset_safe(pf, I40E_PF_RESET_FLAG);
+ i40e_do_reset_safe(pf, I40E_PF_RESET_AND_REBUILD_FLAG);
}
ret = i40e_pci_sriov_enable(pdev, num_vfs);
goto sriov_configure_out;
@@ -1781,7 +1781,7 @@ int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
if (!pci_vfs_assigned(pf->pdev)) {
i40e_free_vfs(pf);
pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
- i40e_do_reset_safe(pf, I40E_PF_RESET_FLAG);
+ i40e_do_reset_safe(pf, I40E_PF_RESET_AND_REBUILD_FLAG);
} else {
dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs.\n");
ret = -EINVAL;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
index 47eb9c584a12..492ce213208d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
@@ -348,12 +348,12 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
* SBP is *not* set in PRT_SBPVSI (default not set).
*/
skb = i40e_construct_skb_zc(rx_ring, *bi);
- *bi = NULL;
if (!skb) {
rx_ring->rx_stats.alloc_buff_failed++;
break;
}
+ *bi = NULL;
cleaned_count++;
i40e_inc_ntc(rx_ring);
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index 95543dfd4fe7..0a867d64d467 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -1834,11 +1834,9 @@ static int iavf_init_get_resources(struct iavf_adapter *adapter)
netif_tx_stop_all_queues(netdev);
if (CLIENT_ALLOWED(adapter)) {
err = iavf_lan_add_device(adapter);
- if (err) {
- rtnl_unlock();
+ if (err)
dev_info(&pdev->dev, "Failed to add VF to client API service list: %d\n",
err);
- }
}
dev_info(&pdev->dev, "MAC address: %pM\n", adapter->hw.mac.addr);
if (netdev->features & NETIF_F_GRO)
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 563ceac3060f..bc4d8d144401 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -4432,7 +4432,7 @@ static int mvneta_xdp_setup(struct net_device *dev, struct bpf_prog *prog,
struct bpf_prog *old_prog;
if (prog && dev->mtu > MVNETA_MAX_RX_BUF_SIZE) {
- NL_SET_ERR_MSG_MOD(extack, "Jumbo frames not supported on XDP");
+ NL_SET_ERR_MSG_MOD(extack, "MTU too large for XDP");
return -EOPNOTSUPP;
}
@@ -5255,7 +5255,7 @@ static int mvneta_probe(struct platform_device *pdev)
err = mvneta_port_power_up(pp, pp->phy_interface);
if (err < 0) {
dev_err(&pdev->dev, "can't power up port\n");
- return err;
+ goto err_netdev;
}
/* Armada3700 network controller does not support per-cpu
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index afdd22827223..358119d98358 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -1231,7 +1231,7 @@ static void mvpp22_gop_init_rgmii(struct mvpp2_port *port)
regmap_read(priv->sysctrl_base, GENCONF_CTRL0, &val);
if (port->gop_id == 2)
- val |= GENCONF_CTRL0_PORT0_RGMII | GENCONF_CTRL0_PORT1_RGMII;
+ val |= GENCONF_CTRL0_PORT0_RGMII;
else if (port->gop_id == 3)
val |= GENCONF_CTRL0_PORT1_RGMII_MII;
regmap_write(priv->sysctrl_base, GENCONF_CTRL0, val);
@@ -2370,17 +2370,18 @@ static void mvpp2_rx_pkts_coal_set(struct mvpp2_port *port,
static void mvpp2_tx_pkts_coal_set(struct mvpp2_port *port,
struct mvpp2_tx_queue *txq)
{
- unsigned int thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
+ unsigned int thread;
u32 val;
if (txq->done_pkts_coal > MVPP2_TXQ_THRESH_MASK)
txq->done_pkts_coal = MVPP2_TXQ_THRESH_MASK;
val = (txq->done_pkts_coal << MVPP2_TXQ_THRESH_OFFSET);
- mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id);
- mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_THRESH_REG, val);
-
- put_cpu();
+ /* PKT-coalescing registers are per-queue + per-thread */
+ for (thread = 0; thread < MVPP2_MAX_THREADS; thread++) {
+ mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id);
+ mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_THRESH_REG, val);
+ }
}
static u32 mvpp2_usec_to_cycles(u32 usec, unsigned long clk_hz)
@@ -5487,7 +5488,7 @@ static int mvpp2_port_init(struct mvpp2_port *port)
struct mvpp2 *priv = port->priv;
struct mvpp2_txq_pcpu *txq_pcpu;
unsigned int thread;
- int queue, err;
+ int queue, err, val;
/* Checks for hardware constraints */
if (port->first_rxq + port->nrxqs >
@@ -5501,6 +5502,18 @@ static int mvpp2_port_init(struct mvpp2_port *port)
mvpp2_egress_disable(port);
mvpp2_port_disable(port);
+ if (mvpp2_is_xlg(port->phy_interface)) {
+ val = readl(port->base + MVPP22_XLG_CTRL0_REG);
+ val &= ~MVPP22_XLG_CTRL0_FORCE_LINK_PASS;
+ val |= MVPP22_XLG_CTRL0_FORCE_LINK_DOWN;
+ writel(val, port->base + MVPP22_XLG_CTRL0_REG);
+ } else {
+ val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
+ val &= ~MVPP2_GMAC_FORCE_LINK_PASS;
+ val |= MVPP2_GMAC_FORCE_LINK_DOWN;
+ writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
+ }
+
port->tx_time_coal = MVPP2_TXDONE_COAL_USEC;
port->txqs = devm_kcalloc(dev, port->ntxqs, sizeof(*port->txqs),
@@ -5869,8 +5882,6 @@ static void mvpp2_phylink_validate(struct phylink_config *config,
phylink_set(mask, Autoneg);
phylink_set_port_modes(mask);
- phylink_set(mask, Pause);
- phylink_set(mask, Asym_Pause);
switch (state->interface) {
case PHY_INTERFACE_MODE_10GBASER:
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c
index 5692c6087bbb..a30eb90ba3d2 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c
@@ -405,6 +405,38 @@ static int mvpp2_prs_tcam_first_free(struct mvpp2 *priv, unsigned char start,
return -EINVAL;
}
+/* Drop flow control pause frames */
+static void mvpp2_prs_drop_fc(struct mvpp2 *priv)
+{
+ unsigned char da[ETH_ALEN] = { 0x01, 0x80, 0xC2, 0x00, 0x00, 0x01 };
+ struct mvpp2_prs_entry pe;
+ unsigned int len;
+
+ memset(&pe, 0, sizeof(pe));
+
+ /* For all ports - drop flow control frames */
+ pe.index = MVPP2_PE_FC_DROP;
+ mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
+
+ /* Set match on DA */
+ len = ETH_ALEN;
+ while (len--)
+ mvpp2_prs_tcam_data_byte_set(&pe, len, da[len], 0xff);
+
+ mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
+ MVPP2_PRS_RI_DROP_MASK);
+
+ mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
+
+ /* Mask all ports */
+ mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
+
+ /* Update shadow table and hw entry */
+ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
+ mvpp2_prs_hw_write(priv, &pe);
+}
+
/* Enable/disable dropping all mac da's */
static void mvpp2_prs_mac_drop_all_set(struct mvpp2 *priv, int port, bool add)
{
@@ -1162,6 +1194,7 @@ static void mvpp2_prs_mac_init(struct mvpp2 *priv)
mvpp2_prs_hw_write(priv, &pe);
/* Create dummy entries for drop all and promiscuous modes */
+ mvpp2_prs_drop_fc(priv);
mvpp2_prs_mac_drop_all_set(priv, 0, false);
mvpp2_prs_mac_promisc_set(priv, 0, MVPP2_PRS_L2_UNI_CAST, false);
mvpp2_prs_mac_promisc_set(priv, 0, MVPP2_PRS_L2_MULTI_CAST, false);
@@ -1647,8 +1680,9 @@ static int mvpp2_prs_pppoe_init(struct mvpp2 *priv)
mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
MVPP2_PRS_RI_L3_PROTO_MASK);
- /* Skip eth_type + 4 bytes of IPv6 header */
- mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
+ /* Jump to DIP of IPV6 header */
+ mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 8 +
+ MVPP2_MAX_L3_ADDR_SIZE,
MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
/* Set L3 offset */
mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h
index e22f6c85d380..4b68dd374733 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h
@@ -129,7 +129,7 @@
#define MVPP2_PE_VID_EDSA_FLTR_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 7)
#define MVPP2_PE_VLAN_DBL (MVPP2_PRS_TCAM_SRAM_SIZE - 6)
#define MVPP2_PE_VLAN_NONE (MVPP2_PRS_TCAM_SRAM_SIZE - 5)
-/* reserved */
+#define MVPP2_PE_FC_DROP (MVPP2_PRS_TCAM_SRAM_SIZE - 4)
#define MVPP2_PE_MAC_MC_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 3)
#define MVPP2_PE_MAC_UC_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 2)
#define MVPP2_PE_MAC_NON_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 1)
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
index 7d0f96290943..1a8f5a039d50 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
@@ -871,8 +871,10 @@ static int cgx_lmac_init(struct cgx *cgx)
if (!lmac)
return -ENOMEM;
lmac->name = kcalloc(1, sizeof("cgx_fwi_xxx_yyy"), GFP_KERNEL);
- if (!lmac->name)
- return -ENOMEM;
+ if (!lmac->name) {
+ err = -ENOMEM;
+ goto err_lmac_free;
+ }
sprintf(lmac->name, "cgx_fwi_%d_%d", cgx->cgx_id, i);
lmac->lmac_id = i;
lmac->cgx = cgx;
@@ -883,7 +885,7 @@ static int cgx_lmac_init(struct cgx *cgx)
CGX_LMAC_FWI + i * 9),
cgx_fwi_event_handler, 0, lmac->name, lmac);
if (err)
- return err;
+ goto err_irq;
/* Enable interrupt */
cgx_write(cgx, lmac->lmac_id, CGXX_CMRX_INT_ENA_W1S,
@@ -895,6 +897,12 @@ static int cgx_lmac_init(struct cgx *cgx)
}
return cgx_lmac_verify_fwi_version(cgx);
+
+err_irq:
+ kfree(lmac->name);
+err_lmac_free:
+ kfree(lmac);
+ return err;
}
static int cgx_lmac_exit(struct cgx *cgx)
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
index d298b9357177..6c6b411e78fd 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
@@ -469,6 +469,9 @@ int rvu_mbox_handler_cgx_mac_addr_set(struct rvu *rvu,
int pf = rvu_get_pf(req->hdr.pcifunc);
u8 cgx_id, lmac_id;
+ if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
+ return -EPERM;
+
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
cgx_lmac_addr_set(cgx_id, lmac_id, req->mac_addr);
@@ -485,6 +488,9 @@ int rvu_mbox_handler_cgx_mac_addr_get(struct rvu *rvu,
int rc = 0, i;
u64 cfg;
+ if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
+ return -EPERM;
+
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
rsp->hdr.rc = rc;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
index d29af7b9c695..76177f7c5ec2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
@@ -626,6 +626,11 @@ bool mlx5e_rep_tc_update_skb(struct mlx5_cqe64 *cqe,
if (!reg_c0)
return true;
+ /* If reg_c0 is not equal to the default flow tag then skb->mark
+ * is not supported and must be reset back to 0.
+ */
+ skb->mark = 0;
+
priv = netdev_priv(skb->dev);
esw = priv->mdev->priv.eswitch;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
index e521254d886e..072363e73f1c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
@@ -118,16 +118,17 @@ struct mlx5_ct_tuple {
u16 zone;
};
-struct mlx5_ct_shared_counter {
+struct mlx5_ct_counter {
struct mlx5_fc *counter;
refcount_t refcount;
+ bool is_shared;
};
struct mlx5_ct_entry {
struct rhash_head node;
struct rhash_head tuple_node;
struct rhash_head tuple_nat_node;
- struct mlx5_ct_shared_counter *shared_counter;
+ struct mlx5_ct_counter *counter;
unsigned long cookie;
unsigned long restore_cookie;
struct mlx5_ct_tuple tuple;
@@ -394,13 +395,14 @@ mlx5_tc_ct_set_tuple_match(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec,
}
static void
-mlx5_tc_ct_shared_counter_put(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_entry *entry)
+mlx5_tc_ct_counter_put(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_entry *entry)
{
- if (!refcount_dec_and_test(&entry->shared_counter->refcount))
+ if (entry->counter->is_shared &&
+ !refcount_dec_and_test(&entry->counter->refcount))
return;
- mlx5_fc_destroy(ct_priv->dev, entry->shared_counter->counter);
- kfree(entry->shared_counter);
+ mlx5_fc_destroy(ct_priv->dev, entry->counter->counter);
+ kfree(entry->counter);
}
static void
@@ -699,7 +701,7 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv,
attr->dest_ft = ct_priv->post_ct;
attr->ft = nat ? ct_priv->ct_nat : ct_priv->ct;
attr->outer_match_level = MLX5_MATCH_L4;
- attr->counter = entry->shared_counter->counter;
+ attr->counter = entry->counter->counter;
attr->flags |= MLX5_ESW_ATTR_FLAG_NO_IN_PORT;
mlx5_tc_ct_set_tuple_match(netdev_priv(ct_priv->netdev), spec, flow_rule);
@@ -732,13 +734,34 @@ err_attr:
return err;
}
-static struct mlx5_ct_shared_counter *
+static struct mlx5_ct_counter *
+mlx5_tc_ct_counter_create(struct mlx5_tc_ct_priv *ct_priv)
+{
+ struct mlx5_ct_counter *counter;
+ int ret;
+
+ counter = kzalloc(sizeof(*counter), GFP_KERNEL);
+ if (!counter)
+ return ERR_PTR(-ENOMEM);
+
+ counter->is_shared = false;
+ counter->counter = mlx5_fc_create(ct_priv->dev, true);
+ if (IS_ERR(counter->counter)) {
+ ct_dbg("Failed to create counter for ct entry");
+ ret = PTR_ERR(counter->counter);
+ kfree(counter);
+ return ERR_PTR(ret);
+ }
+
+ return counter;
+}
+
+static struct mlx5_ct_counter *
mlx5_tc_ct_shared_counter_get(struct mlx5_tc_ct_priv *ct_priv,
struct mlx5_ct_entry *entry)
{
struct mlx5_ct_tuple rev_tuple = entry->tuple;
- struct mlx5_ct_shared_counter *shared_counter;
- struct mlx5_core_dev *dev = ct_priv->dev;
+ struct mlx5_ct_counter *shared_counter;
struct mlx5_ct_entry *rev_entry;
__be16 tmp_port;
int ret;
@@ -767,25 +790,20 @@ mlx5_tc_ct_shared_counter_get(struct mlx5_tc_ct_priv *ct_priv,
rev_entry = rhashtable_lookup_fast(&ct_priv->ct_tuples_ht, &rev_tuple,
tuples_ht_params);
if (rev_entry) {
- if (refcount_inc_not_zero(&rev_entry->shared_counter->refcount)) {
+ if (refcount_inc_not_zero(&rev_entry->counter->refcount)) {
mutex_unlock(&ct_priv->shared_counter_lock);
- return rev_entry->shared_counter;
+ return rev_entry->counter;
}
}
mutex_unlock(&ct_priv->shared_counter_lock);
- shared_counter = kzalloc(sizeof(*shared_counter), GFP_KERNEL);
- if (!shared_counter)
- return ERR_PTR(-ENOMEM);
-
- shared_counter->counter = mlx5_fc_create(dev, true);
- if (IS_ERR(shared_counter->counter)) {
- ct_dbg("Failed to create counter for ct entry");
- ret = PTR_ERR(shared_counter->counter);
- kfree(shared_counter);
+ shared_counter = mlx5_tc_ct_counter_create(ct_priv);
+ if (IS_ERR(shared_counter)) {
+ ret = PTR_ERR(shared_counter);
return ERR_PTR(ret);
}
+ shared_counter->is_shared = true;
refcount_set(&shared_counter->refcount, 1);
return shared_counter;
}
@@ -798,10 +816,13 @@ mlx5_tc_ct_entry_add_rules(struct mlx5_tc_ct_priv *ct_priv,
{
int err;
- entry->shared_counter = mlx5_tc_ct_shared_counter_get(ct_priv, entry);
- if (IS_ERR(entry->shared_counter)) {
- err = PTR_ERR(entry->shared_counter);
- ct_dbg("Failed to create counter for ct entry");
+ if (nf_ct_acct_enabled(dev_net(ct_priv->netdev)))
+ entry->counter = mlx5_tc_ct_counter_create(ct_priv);
+ else
+ entry->counter = mlx5_tc_ct_shared_counter_get(ct_priv, entry);
+
+ if (IS_ERR(entry->counter)) {
+ err = PTR_ERR(entry->counter);
return err;
}
@@ -820,7 +841,7 @@ mlx5_tc_ct_entry_add_rules(struct mlx5_tc_ct_priv *ct_priv,
err_nat:
mlx5_tc_ct_entry_del_rule(ct_priv, entry, false);
err_orig:
- mlx5_tc_ct_shared_counter_put(ct_priv, entry);
+ mlx5_tc_ct_counter_put(ct_priv, entry);
return err;
}
@@ -918,7 +939,7 @@ mlx5_tc_ct_del_ft_entry(struct mlx5_tc_ct_priv *ct_priv,
rhashtable_remove_fast(&ct_priv->ct_tuples_ht, &entry->tuple_node,
tuples_ht_params);
mutex_unlock(&ct_priv->shared_counter_lock);
- mlx5_tc_ct_shared_counter_put(ct_priv, entry);
+ mlx5_tc_ct_counter_put(ct_priv, entry);
}
@@ -956,7 +977,7 @@ mlx5_tc_ct_block_flow_offload_stats(struct mlx5_ct_ft *ft,
if (!entry)
return -ENOENT;
- mlx5_fc_query_cached(entry->shared_counter->counter, &bytes, &packets, &lastuse);
+ mlx5_fc_query_cached(entry->counter->counter, &bytes, &packets, &lastuse);
flow_stats_update(&f->stats, bytes, packets, 0, lastuse,
FLOW_ACTION_HW_STATS_DELAYED);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
index 7943eb30b837..4880f2179273 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
@@ -371,6 +371,15 @@ struct mlx5e_swp_spec {
u8 tun_l4_proto;
};
+static inline void mlx5e_eseg_swp_offsets_add_vlan(struct mlx5_wqe_eth_seg *eseg)
+{
+ /* SWP offsets are in 2-bytes words */
+ eseg->swp_outer_l3_offset += VLAN_HLEN / 2;
+ eseg->swp_outer_l4_offset += VLAN_HLEN / 2;
+ eseg->swp_inner_l3_offset += VLAN_HLEN / 2;
+ eseg->swp_inner_l4_offset += VLAN_HLEN / 2;
+}
+
static inline void
mlx5e_set_eseg_swp(struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg,
struct mlx5e_swp_spec *swp_spec)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
index 899b98aca0d3..1fae7fab8297 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
@@ -51,7 +51,7 @@ static inline bool mlx5_geneve_tx_allowed(struct mlx5_core_dev *mdev)
}
static inline void
-mlx5e_tx_tunnel_accel(struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg)
+mlx5e_tx_tunnel_accel(struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg, u16 ihs)
{
struct mlx5e_swp_spec swp_spec = {};
unsigned int offset = 0;
@@ -85,6 +85,8 @@ mlx5e_tx_tunnel_accel(struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg)
}
mlx5e_set_eseg_swp(skb, eseg, &swp_spec);
+ if (skb_vlan_tag_present(skb) && ihs)
+ mlx5e_eseg_swp_offsets_add_vlan(eseg);
}
#else
@@ -163,7 +165,7 @@ static inline unsigned int mlx5e_accel_tx_ids_len(struct mlx5e_txqsq *sq,
static inline bool mlx5e_accel_tx_eseg(struct mlx5e_priv *priv,
struct sk_buff *skb,
- struct mlx5_wqe_eth_seg *eseg)
+ struct mlx5_wqe_eth_seg *eseg, u16 ihs)
{
#ifdef CONFIG_MLX5_EN_IPSEC
if (xfrm_offload(skb))
@@ -172,7 +174,7 @@ static inline bool mlx5e_accel_tx_eseg(struct mlx5e_priv *priv,
#if IS_ENABLED(CONFIG_GENEVE)
if (skb->encapsulation)
- mlx5e_tx_tunnel_accel(skb, eseg);
+ mlx5e_tx_tunnel_accel(skb, eseg, ihs);
#endif
return true;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index d9076d543104..2d37742a888c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -1010,6 +1010,22 @@ static int mlx5e_get_link_ksettings(struct net_device *netdev,
return mlx5e_ethtool_get_link_ksettings(priv, link_ksettings);
}
+static int mlx5e_speed_validate(struct net_device *netdev, bool ext,
+ const unsigned long link_modes, u8 autoneg)
+{
+ /* Extended link-mode has no speed limitations. */
+ if (ext)
+ return 0;
+
+ if ((link_modes & MLX5E_PROT_MASK(MLX5E_56GBASE_R4)) &&
+ autoneg != AUTONEG_ENABLE) {
+ netdev_err(netdev, "%s: 56G link speed requires autoneg enabled\n",
+ __func__);
+ return -EINVAL;
+ }
+ return 0;
+}
+
static u32 mlx5e_ethtool2ptys_adver_link(const unsigned long *link_modes)
{
u32 i, ptys_modes = 0;
@@ -1103,13 +1119,9 @@ int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
link_modes = autoneg == AUTONEG_ENABLE ? ethtool2ptys_adver_func(adver) :
mlx5e_port_speed2linkmodes(mdev, speed, !ext);
- if ((link_modes & MLX5E_PROT_MASK(MLX5E_56GBASE_R4)) &&
- autoneg != AUTONEG_ENABLE) {
- netdev_err(priv->netdev, "%s: 56G link speed requires autoneg enabled\n",
- __func__);
- err = -EINVAL;
+ err = mlx5e_speed_validate(priv->netdev, ext, link_modes, autoneg);
+ if (err)
goto out;
- }
link_modes = link_modes & eproto.cap;
if (!link_modes) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
index fa8149f6eb08..e02e5895703d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
@@ -942,6 +942,7 @@ static int mlx5e_create_ttc_table_groups(struct mlx5e_ttc_table *ttc,
in = kvzalloc(inlen, GFP_KERNEL);
if (!in) {
kfree(ft->g);
+ ft->g = NULL;
return -ENOMEM;
}
@@ -1087,6 +1088,7 @@ static int mlx5e_create_inner_ttc_table_groups(struct mlx5e_ttc_table *ttc)
in = kvzalloc(inlen, GFP_KERNEL);
if (!in) {
kfree(ft->g);
+ ft->g = NULL;
return -ENOMEM;
}
@@ -1390,6 +1392,7 @@ err_destroy_groups:
ft->g[ft->num_groups] = NULL;
mlx5e_destroy_groups(ft);
kvfree(in);
+ kfree(ft->g);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 7a79d330c075..6a852b4901aa 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -3161,7 +3161,8 @@ static void mlx5e_modify_admin_state(struct mlx5_core_dev *mdev,
mlx5_set_port_admin_status(mdev, state);
- if (mlx5_eswitch_mode(mdev) != MLX5_ESWITCH_LEGACY)
+ if (mlx5_eswitch_mode(mdev) == MLX5_ESWITCH_OFFLOADS ||
+ !MLX5_CAP_GEN(mdev, uplink_follow))
return;
if (state == MLX5_PORT_UP)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index e47e2a0059d0..61ed671fe741 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -682,9 +682,9 @@ void mlx5e_tx_mpwqe_ensure_complete(struct mlx5e_txqsq *sq)
static bool mlx5e_txwqe_build_eseg(struct mlx5e_priv *priv, struct mlx5e_txqsq *sq,
struct sk_buff *skb, struct mlx5e_accel_tx_state *accel,
- struct mlx5_wqe_eth_seg *eseg)
+ struct mlx5_wqe_eth_seg *eseg, u16 ihs)
{
- if (unlikely(!mlx5e_accel_tx_eseg(priv, skb, eseg)))
+ if (unlikely(!mlx5e_accel_tx_eseg(priv, skb, eseg, ihs)))
return false;
mlx5e_txwqe_build_eseg_csum(sq, skb, accel, eseg);
@@ -714,7 +714,8 @@ netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
if (mlx5e_tx_skb_supports_mpwqe(skb, &attr)) {
struct mlx5_wqe_eth_seg eseg = {};
- if (unlikely(!mlx5e_txwqe_build_eseg(priv, sq, skb, &accel, &eseg)))
+ if (unlikely(!mlx5e_txwqe_build_eseg(priv, sq, skb, &accel, &eseg,
+ attr.ihs)))
return NETDEV_TX_OK;
mlx5e_sq_xmit_mpwqe(sq, skb, &eseg, netdev_xmit_more());
@@ -731,7 +732,7 @@ netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
/* May update the WQE, but may not post other WQEs. */
mlx5e_accel_tx_finish(sq, wqe, &accel,
(struct mlx5_wqe_inline_seg *)(wqe->data + wqe_attr.ds_cnt_inl));
- if (unlikely(!mlx5e_txwqe_build_eseg(priv, sq, skb, &accel, &wqe->eth)))
+ if (unlikely(!mlx5e_txwqe_build_eseg(priv, sq, skb, &accel, &wqe->eth, attr.ihs)))
return NETDEV_TX_OK;
mlx5e_sq_xmit_wqe(sq, skb, &attr, &wqe_attr, wqe, pi, netdev_xmit_more());
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c
index 2b85d4777303..3e19b1721303 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c
@@ -95,22 +95,21 @@ int esw_acl_egress_lgcy_setup(struct mlx5_eswitch *esw,
return 0;
}
- if (!IS_ERR_OR_NULL(vport->egress.acl))
- return 0;
-
- vport->egress.acl = esw_acl_table_create(esw, vport->vport,
- MLX5_FLOW_NAMESPACE_ESW_EGRESS,
- table_size);
- if (IS_ERR(vport->egress.acl)) {
- err = PTR_ERR(vport->egress.acl);
- vport->egress.acl = NULL;
- goto out;
+ if (!vport->egress.acl) {
+ vport->egress.acl = esw_acl_table_create(esw, vport->vport,
+ MLX5_FLOW_NAMESPACE_ESW_EGRESS,
+ table_size);
+ if (IS_ERR(vport->egress.acl)) {
+ err = PTR_ERR(vport->egress.acl);
+ vport->egress.acl = NULL;
+ goto out;
+ }
+
+ err = esw_acl_egress_lgcy_groups_create(esw, vport);
+ if (err)
+ goto out;
}
- err = esw_acl_egress_lgcy_groups_create(esw, vport);
- if (err)
- goto out;
-
esw_debug(esw->dev,
"vport[%d] configure egress rules, vlan(%d) qos(%d)\n",
vport->vport, vport->info.vlan, vport->info.qos);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
index f3d45ef082cd..83a05371e2aa 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
@@ -564,7 +564,9 @@ void mlx5_lag_add(struct mlx5_core_dev *dev, struct net_device *netdev)
struct mlx5_core_dev *tmp_dev;
int i, err;
- if (!MLX5_CAP_GEN(dev, vport_group_manager))
+ if (!MLX5_CAP_GEN(dev, vport_group_manager) ||
+ !MLX5_CAP_GEN(dev, lag_master) ||
+ MLX5_CAP_GEN(dev, num_lag_ports) != MLX5_MAX_PORTS)
return;
tmp_dev = mlx5_get_next_phys_dev(dev);
@@ -582,12 +584,9 @@ void mlx5_lag_add(struct mlx5_core_dev *dev, struct net_device *netdev)
if (mlx5_lag_dev_add_pf(ldev, dev, netdev) < 0)
return;
- for (i = 0; i < MLX5_MAX_PORTS; i++) {
- tmp_dev = ldev->pf[i].dev;
- if (!tmp_dev || !MLX5_CAP_GEN(tmp_dev, lag_master) ||
- MLX5_CAP_GEN(tmp_dev, num_lag_ports) != MLX5_MAX_PORTS)
+ for (i = 0; i < MLX5_MAX_PORTS; i++)
+ if (!ldev->pf[i].dev)
break;
- }
if (i >= MLX5_MAX_PORTS)
ldev->flags |= MLX5_LAG_FLAG_READY;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index c08315b51fd3..ca6f2fc39ea0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -1368,8 +1368,10 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *id)
MLX5_COREDEV_VF : MLX5_COREDEV_PF;
dev->priv.adev_idx = mlx5_adev_idx_alloc();
- if (dev->priv.adev_idx < 0)
- return dev->priv.adev_idx;
+ if (dev->priv.adev_idx < 0) {
+ err = dev->priv.adev_idx;
+ goto adev_init_err;
+ }
err = mlx5_mdev_init(dev, prof_sel);
if (err)
@@ -1403,6 +1405,7 @@ pci_init_err:
mlx5_mdev_uninit(dev);
mdev_init_err:
mlx5_adev_idx_free(dev->priv.adev_idx);
+adev_init_err:
mlx5_devlink_free(devlink);
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/rdma.c b/drivers/net/ethernet/mellanox/mlx5/core/rdma.c
index 0fc7de4aa572..8e0dddc6383f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/rdma.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/rdma.c
@@ -116,7 +116,7 @@ free:
static void mlx5_rdma_del_roce_addr(struct mlx5_core_dev *dev)
{
mlx5_core_roce_gid_set(dev, 0, 0, 0,
- NULL, NULL, false, 0, 0);
+ NULL, NULL, false, 0, 1);
}
static void mlx5_rdma_make_default_gid(struct mlx5_core_dev *dev, union ib_gid *gid)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
index 8fa286ccdd6b..bf85ce9835d7 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
@@ -19,7 +19,7 @@
#define MLXSW_THERMAL_ASIC_TEMP_NORM 75000 /* 75C */
#define MLXSW_THERMAL_ASIC_TEMP_HIGH 85000 /* 85C */
#define MLXSW_THERMAL_ASIC_TEMP_HOT 105000 /* 105C */
-#define MLXSW_THERMAL_ASIC_TEMP_CRIT 110000 /* 110C */
+#define MLXSW_THERMAL_ASIC_TEMP_CRIT 140000 /* 140C */
#define MLXSW_THERMAL_HYSTERESIS_TEMP 5000 /* 5C */
#define MLXSW_THERMAL_MODULE_TEMP_SHIFT (MLXSW_THERMAL_HYSTERESIS_TEMP * 2)
#define MLXSW_THERMAL_ZONE_MAX_NAME 16
@@ -176,6 +176,12 @@ mlxsw_thermal_module_trips_update(struct device *dev, struct mlxsw_core *core,
if (err)
return err;
+ if (crit_temp > emerg_temp) {
+ dev_warn(dev, "%s : Critical threshold %d is above emergency threshold %d\n",
+ tz->tzdev->type, crit_temp, emerg_temp);
+ return 0;
+ }
+
/* According to the system thermal requirements, the thermal zones are
* defined with four trip points. The critical and emergency
* temperature thresholds, provided by QSFP module are set as "active"
@@ -190,11 +196,8 @@ mlxsw_thermal_module_trips_update(struct device *dev, struct mlxsw_core *core,
tz->trips[MLXSW_THERMAL_TEMP_TRIP_NORM].temp = crit_temp;
tz->trips[MLXSW_THERMAL_TEMP_TRIP_HIGH].temp = crit_temp;
tz->trips[MLXSW_THERMAL_TEMP_TRIP_HOT].temp = emerg_temp;
- if (emerg_temp > crit_temp)
- tz->trips[MLXSW_THERMAL_TEMP_TRIP_CRIT].temp = emerg_temp +
+ tz->trips[MLXSW_THERMAL_TEMP_TRIP_CRIT].temp = emerg_temp +
MLXSW_THERMAL_MODULE_TEMP_SHIFT;
- else
- tz->trips[MLXSW_THERMAL_TEMP_TRIP_CRIT].temp = emerg_temp;
return 0;
}
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index 0b9992bd6626..ff87a0bc089c 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -60,14 +60,27 @@ int ocelot_mact_learn(struct ocelot *ocelot, int port,
const unsigned char mac[ETH_ALEN],
unsigned int vid, enum macaccess_entry_type type)
{
+ u32 cmd = ANA_TABLES_MACACCESS_VALID |
+ ANA_TABLES_MACACCESS_DEST_IDX(port) |
+ ANA_TABLES_MACACCESS_ENTRYTYPE(type) |
+ ANA_TABLES_MACACCESS_MAC_TABLE_CMD(MACACCESS_CMD_LEARN);
+ unsigned int mc_ports;
+
+ /* Set MAC_CPU_COPY if the CPU port is used by a multicast entry */
+ if (type == ENTRYTYPE_MACv4)
+ mc_ports = (mac[1] << 8) | mac[2];
+ else if (type == ENTRYTYPE_MACv6)
+ mc_ports = (mac[0] << 8) | mac[1];
+ else
+ mc_ports = 0;
+
+ if (mc_ports & BIT(ocelot->num_phys_ports))
+ cmd |= ANA_TABLES_MACACCESS_MAC_CPU_COPY;
+
ocelot_mact_select(ocelot, mac, vid);
/* Issue a write command */
- ocelot_write(ocelot, ANA_TABLES_MACACCESS_VALID |
- ANA_TABLES_MACACCESS_DEST_IDX(port) |
- ANA_TABLES_MACACCESS_ENTRYTYPE(type) |
- ANA_TABLES_MACACCESS_MAC_TABLE_CMD(MACACCESS_CMD_LEARN),
- ANA_TABLES_MACACCESS);
+ ocelot_write(ocelot, cmd, ANA_TABLES_MACACCESS);
return ocelot_mact_wait_for_completion(ocelot);
}
diff --git a/drivers/net/ethernet/mscc/ocelot_net.c b/drivers/net/ethernet/mscc/ocelot_net.c
index 2bd2840d88bd..42230f92ca9c 100644
--- a/drivers/net/ethernet/mscc/ocelot_net.c
+++ b/drivers/net/ethernet/mscc/ocelot_net.c
@@ -1042,10 +1042,8 @@ static int ocelot_netdevice_event(struct notifier_block *unused,
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
int ret = 0;
- if (!ocelot_netdevice_dev_check(dev))
- return 0;
-
if (event == NETDEV_PRECHANGEUPPER &&
+ ocelot_netdevice_dev_check(dev) &&
netif_is_lag_master(info->upper_dev)) {
struct netdev_lag_upper_info *lag_upper_info = info->upper_info;
struct netlink_ext_ack *extack;
diff --git a/drivers/net/ethernet/natsemi/macsonic.c b/drivers/net/ethernet/natsemi/macsonic.c
index 776b7d264dc3..2289e1fe3741 100644
--- a/drivers/net/ethernet/natsemi/macsonic.c
+++ b/drivers/net/ethernet/natsemi/macsonic.c
@@ -506,10 +506,14 @@ static int mac_sonic_platform_probe(struct platform_device *pdev)
err = register_netdev(dev);
if (err)
- goto out;
+ goto undo_probe;
return 0;
+undo_probe:
+ dma_free_coherent(lp->device,
+ SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode),
+ lp->descriptors, lp->descriptors_laddr);
out:
free_netdev(dev);
@@ -584,12 +588,16 @@ static int mac_sonic_nubus_probe(struct nubus_board *board)
err = register_netdev(ndev);
if (err)
- goto out;
+ goto undo_probe;
nubus_set_drvdata(board, ndev);
return 0;
+undo_probe:
+ dma_free_coherent(lp->device,
+ SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode),
+ lp->descriptors, lp->descriptors_laddr);
out:
free_netdev(ndev);
return err;
diff --git a/drivers/net/ethernet/natsemi/xtsonic.c b/drivers/net/ethernet/natsemi/xtsonic.c
index afa166ff7aef..28d9e98db81a 100644
--- a/drivers/net/ethernet/natsemi/xtsonic.c
+++ b/drivers/net/ethernet/natsemi/xtsonic.c
@@ -229,11 +229,14 @@ int xtsonic_probe(struct platform_device *pdev)
sonic_msg_init(dev);
if ((err = register_netdev(dev)))
- goto out1;
+ goto undo_probe1;
return 0;
-out1:
+undo_probe1:
+ dma_free_coherent(lp->device,
+ SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode),
+ lp->descriptors, lp->descriptors_laddr);
release_region(dev->base_addr, SONIC_MEM_SIZE);
out:
free_netdev(dev);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
index 9156c9825a16..ac4cd5d82e69 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
@@ -337,7 +337,7 @@ void ionic_rx_fill(struct ionic_queue *q)
unsigned int i, j;
unsigned int len;
- len = netdev->mtu + ETH_HLEN;
+ len = netdev->mtu + ETH_HLEN + VLAN_HLEN;
nfrags = round_up(len, PAGE_SIZE) / PAGE_SIZE;
for (i = ionic_q_space_avail(q); i; i--) {
diff --git a/drivers/net/ethernet/qlogic/Kconfig b/drivers/net/ethernet/qlogic/Kconfig
index 4366c7a8de95..6b5ddb07ee83 100644
--- a/drivers/net/ethernet/qlogic/Kconfig
+++ b/drivers/net/ethernet/qlogic/Kconfig
@@ -78,6 +78,7 @@ config QED
depends on PCI
select ZLIB_INFLATE
select CRC8
+ select CRC32
select NET_DEVLINK
help
This enables the support for Marvell FastLinQ adapters family.
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index f21847739ef1..d258e0ccf946 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -564,11 +564,6 @@ static const struct net_device_ops netxen_netdev_ops = {
.ndo_set_features = netxen_set_features,
};
-static inline bool netxen_function_zero(struct pci_dev *pdev)
-{
- return (PCI_FUNC(pdev->devfn) == 0) ? true : false;
-}
-
static inline void netxen_set_interrupt_mode(struct netxen_adapter *adapter,
u32 mode)
{
@@ -664,7 +659,7 @@ static int netxen_setup_intr(struct netxen_adapter *adapter)
netxen_initialize_interrupt_registers(adapter);
netxen_set_msix_bit(pdev, 0);
- if (netxen_function_zero(pdev)) {
+ if (adapter->portnum == 0) {
if (!netxen_setup_msi_interrupts(adapter, num_msix))
netxen_set_interrupt_mode(adapter, NETXEN_MSI_MODE);
else
diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c
index a2494bf85007..ca0ee29a57b5 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_fp.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c
@@ -1799,6 +1799,11 @@ netdev_features_t qede_features_check(struct sk_buff *skb,
ntohs(udp_hdr(skb)->dest) != gnv_port))
return features & ~(NETIF_F_CSUM_MASK |
NETIF_F_GSO_MASK);
+ } else if (l4_proto == IPPROTO_IPIP) {
+ /* IPIP tunnels are unknown to the device or at least unsupported natively,
+ * offloads for them can't be done trivially, so disable them for such skb.
+ */
+ return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
}
}
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index 46d8510b2fe2..a569abe7f5ef 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -2207,7 +2207,8 @@ static void rtl_pll_power_down(struct rtl8169_private *tp)
}
switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_25 ... RTL_GIGA_MAC_VER_33:
+ case RTL_GIGA_MAC_VER_25 ... RTL_GIGA_MAC_VER_26:
+ case RTL_GIGA_MAC_VER_32 ... RTL_GIGA_MAC_VER_33:
case RTL_GIGA_MAC_VER_37:
case RTL_GIGA_MAC_VER_39:
case RTL_GIGA_MAC_VER_43:
@@ -2233,7 +2234,8 @@ static void rtl_pll_power_down(struct rtl8169_private *tp)
static void rtl_pll_power_up(struct rtl8169_private *tp)
{
switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_25 ... RTL_GIGA_MAC_VER_33:
+ case RTL_GIGA_MAC_VER_25 ... RTL_GIGA_MAC_VER_26:
+ case RTL_GIGA_MAC_VER_32 ... RTL_GIGA_MAC_VER_33:
case RTL_GIGA_MAC_VER_37:
case RTL_GIGA_MAC_VER_39:
case RTL_GIGA_MAC_VER_43:
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index c63304632935..590b088bc4c7 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -2606,10 +2606,10 @@ static int sh_eth_close(struct net_device *ndev)
/* Free all the skbuffs in the Rx queue and the DMA buffer. */
sh_eth_ring_free(ndev);
- pm_runtime_put_sync(&mdp->pdev->dev);
-
mdp->is_opened = 0;
+ pm_runtime_put(&mdp->pdev->dev);
+
return 0;
}
@@ -3034,6 +3034,28 @@ static int sh_mdio_release(struct sh_eth_private *mdp)
return 0;
}
+static int sh_mdiobb_read(struct mii_bus *bus, int phy, int reg)
+{
+ int res;
+
+ pm_runtime_get_sync(bus->parent);
+ res = mdiobb_read(bus, phy, reg);
+ pm_runtime_put(bus->parent);
+
+ return res;
+}
+
+static int sh_mdiobb_write(struct mii_bus *bus, int phy, int reg, u16 val)
+{
+ int res;
+
+ pm_runtime_get_sync(bus->parent);
+ res = mdiobb_write(bus, phy, reg, val);
+ pm_runtime_put(bus->parent);
+
+ return res;
+}
+
/* MDIO bus init function */
static int sh_mdio_init(struct sh_eth_private *mdp,
struct sh_eth_plat_data *pd)
@@ -3058,6 +3080,10 @@ static int sh_mdio_init(struct sh_eth_private *mdp,
if (!mdp->mii_bus)
return -ENOMEM;
+ /* Wrap accessors with Runtime PM-aware ops */
+ mdp->mii_bus->read = sh_mdiobb_read;
+ mdp->mii_bus->write = sh_mdiobb_write;
+
/* Hook up MII support for ethtool */
mdp->mii_bus->name = "sh_mii";
mdp->mii_bus->parent = dev;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
index a2e80c89de2d..9a6a519426a0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
@@ -721,6 +721,8 @@ static SIMPLE_DEV_PM_OPS(intel_eth_pm_ops, intel_eth_pci_suspend,
#define PCI_DEVICE_ID_INTEL_EHL_PSE1_RGMII1G_ID 0x4bb0
#define PCI_DEVICE_ID_INTEL_EHL_PSE1_SGMII1G_ID 0x4bb1
#define PCI_DEVICE_ID_INTEL_EHL_PSE1_SGMII2G5_ID 0x4bb2
+#define PCI_DEVICE_ID_INTEL_TGLH_SGMII1G_0_ID 0x43ac
+#define PCI_DEVICE_ID_INTEL_TGLH_SGMII1G_1_ID 0x43a2
#define PCI_DEVICE_ID_INTEL_TGL_SGMII1G_ID 0xa0ac
static const struct pci_device_id intel_eth_pci_id_table[] = {
@@ -735,6 +737,8 @@ static const struct pci_device_id intel_eth_pci_id_table[] = {
{ PCI_DEVICE_DATA(INTEL, EHL_PSE1_SGMII1G_ID, &ehl_pse1_sgmii1g_info) },
{ PCI_DEVICE_DATA(INTEL, EHL_PSE1_SGMII2G5_ID, &ehl_pse1_sgmii1g_info) },
{ PCI_DEVICE_DATA(INTEL, TGL_SGMII1G_ID, &tgl_sgmii1g_info) },
+ { PCI_DEVICE_DATA(INTEL, TGLH_SGMII1G_0_ID, &tgl_sgmii1g_info) },
+ { PCI_DEVICE_DATA(INTEL, TGLH_SGMII1G_1_ID, &tgl_sgmii1g_info) },
{}
};
MODULE_DEVICE_TABLE(pci, intel_eth_pci_id_table);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
index 459ae715b33d..f184b00f5116 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
@@ -135,7 +135,7 @@ static int meson8b_init_rgmii_tx_clk(struct meson8b_dwmac *dwmac)
struct device *dev = dwmac->dev;
static const struct clk_parent_data mux_parents[] = {
{ .fw_name = "clkin0", },
- { .fw_name = "clkin1", },
+ { .index = -1, },
};
static const struct clk_div_table div_table[] = {
{ .div = 2, .val = 2, },
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
index 58e0511badba..a5e0eff4a387 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
@@ -64,6 +64,7 @@ struct emac_variant {
* @variant: reference to the current board variant
* @regmap: regmap for using the syscon
* @internal_phy_powered: Does the internal PHY is enabled
+ * @use_internal_phy: Is the internal PHY selected for use
* @mux_handle: Internal pointer used by mdio-mux lib
*/
struct sunxi_priv_data {
@@ -74,6 +75,7 @@ struct sunxi_priv_data {
const struct emac_variant *variant;
struct regmap_field *regmap_field;
bool internal_phy_powered;
+ bool use_internal_phy;
void *mux_handle;
};
@@ -539,8 +541,11 @@ static const struct stmmac_dma_ops sun8i_dwmac_dma_ops = {
.dma_interrupt = sun8i_dwmac_dma_interrupt,
};
+static int sun8i_dwmac_power_internal_phy(struct stmmac_priv *priv);
+
static int sun8i_dwmac_init(struct platform_device *pdev, void *priv)
{
+ struct net_device *ndev = platform_get_drvdata(pdev);
struct sunxi_priv_data *gmac = priv;
int ret;
@@ -554,13 +559,25 @@ static int sun8i_dwmac_init(struct platform_device *pdev, void *priv)
ret = clk_prepare_enable(gmac->tx_clk);
if (ret) {
- if (gmac->regulator)
- regulator_disable(gmac->regulator);
dev_err(&pdev->dev, "Could not enable AHB clock\n");
- return ret;
+ goto err_disable_regulator;
+ }
+
+ if (gmac->use_internal_phy) {
+ ret = sun8i_dwmac_power_internal_phy(netdev_priv(ndev));
+ if (ret)
+ goto err_disable_clk;
}
return 0;
+
+err_disable_clk:
+ clk_disable_unprepare(gmac->tx_clk);
+err_disable_regulator:
+ if (gmac->regulator)
+ regulator_disable(gmac->regulator);
+
+ return ret;
}
static void sun8i_dwmac_core_init(struct mac_device_info *hw,
@@ -831,7 +848,6 @@ static int mdio_mux_syscon_switch_fn(int current_child, int desired_child,
struct sunxi_priv_data *gmac = priv->plat->bsp_priv;
u32 reg, val;
int ret = 0;
- bool need_power_ephy = false;
if (current_child ^ desired_child) {
regmap_field_read(gmac->regmap_field, &reg);
@@ -839,13 +855,12 @@ static int mdio_mux_syscon_switch_fn(int current_child, int desired_child,
case DWMAC_SUN8I_MDIO_MUX_INTERNAL_ID:
dev_info(priv->device, "Switch mux to internal PHY");
val = (reg & ~H3_EPHY_MUX_MASK) | H3_EPHY_SELECT;
-
- need_power_ephy = true;
+ gmac->use_internal_phy = true;
break;
case DWMAC_SUN8I_MDIO_MUX_EXTERNAL_ID:
dev_info(priv->device, "Switch mux to external PHY");
val = (reg & ~H3_EPHY_MUX_MASK) | H3_EPHY_SHUTDOWN;
- need_power_ephy = false;
+ gmac->use_internal_phy = false;
break;
default:
dev_err(priv->device, "Invalid child ID %x\n",
@@ -853,7 +868,7 @@ static int mdio_mux_syscon_switch_fn(int current_child, int desired_child,
return -EINVAL;
}
regmap_field_write(gmac->regmap_field, val);
- if (need_power_ephy) {
+ if (gmac->use_internal_phy) {
ret = sun8i_dwmac_power_internal_phy(priv);
if (ret)
return ret;
@@ -883,22 +898,23 @@ static int sun8i_dwmac_register_mdio_mux(struct stmmac_priv *priv)
return ret;
}
-static int sun8i_dwmac_set_syscon(struct stmmac_priv *priv)
+static int sun8i_dwmac_set_syscon(struct device *dev,
+ struct plat_stmmacenet_data *plat)
{
- struct sunxi_priv_data *gmac = priv->plat->bsp_priv;
- struct device_node *node = priv->device->of_node;
+ struct sunxi_priv_data *gmac = plat->bsp_priv;
+ struct device_node *node = dev->of_node;
int ret;
u32 reg, val;
ret = regmap_field_read(gmac->regmap_field, &val);
if (ret) {
- dev_err(priv->device, "Fail to read from regmap field.\n");
+ dev_err(dev, "Fail to read from regmap field.\n");
return ret;
}
reg = gmac->variant->default_syscon_value;
if (reg != val)
- dev_warn(priv->device,
+ dev_warn(dev,
"Current syscon value is not the default %x (expect %x)\n",
val, reg);
@@ -911,9 +927,9 @@ static int sun8i_dwmac_set_syscon(struct stmmac_priv *priv)
/* Force EPHY xtal frequency to 24MHz. */
reg |= H3_EPHY_CLK_SEL;
- ret = of_mdio_parse_addr(priv->device, priv->plat->phy_node);
+ ret = of_mdio_parse_addr(dev, plat->phy_node);
if (ret < 0) {
- dev_err(priv->device, "Could not parse MDIO addr\n");
+ dev_err(dev, "Could not parse MDIO addr\n");
return ret;
}
/* of_mdio_parse_addr returns a valid (0 ~ 31) PHY
@@ -929,17 +945,17 @@ static int sun8i_dwmac_set_syscon(struct stmmac_priv *priv)
if (!of_property_read_u32(node, "allwinner,tx-delay-ps", &val)) {
if (val % 100) {
- dev_err(priv->device, "tx-delay must be a multiple of 100\n");
+ dev_err(dev, "tx-delay must be a multiple of 100\n");
return -EINVAL;
}
val /= 100;
- dev_dbg(priv->device, "set tx-delay to %x\n", val);
+ dev_dbg(dev, "set tx-delay to %x\n", val);
if (val <= gmac->variant->tx_delay_max) {
reg &= ~(gmac->variant->tx_delay_max <<
SYSCON_ETXDC_SHIFT);
reg |= (val << SYSCON_ETXDC_SHIFT);
} else {
- dev_err(priv->device, "Invalid TX clock delay: %d\n",
+ dev_err(dev, "Invalid TX clock delay: %d\n",
val);
return -EINVAL;
}
@@ -947,17 +963,17 @@ static int sun8i_dwmac_set_syscon(struct stmmac_priv *priv)
if (!of_property_read_u32(node, "allwinner,rx-delay-ps", &val)) {
if (val % 100) {
- dev_err(priv->device, "rx-delay must be a multiple of 100\n");
+ dev_err(dev, "rx-delay must be a multiple of 100\n");
return -EINVAL;
}
val /= 100;
- dev_dbg(priv->device, "set rx-delay to %x\n", val);
+ dev_dbg(dev, "set rx-delay to %x\n", val);
if (val <= gmac->variant->rx_delay_max) {
reg &= ~(gmac->variant->rx_delay_max <<
SYSCON_ERXDC_SHIFT);
reg |= (val << SYSCON_ERXDC_SHIFT);
} else {
- dev_err(priv->device, "Invalid RX clock delay: %d\n",
+ dev_err(dev, "Invalid RX clock delay: %d\n",
val);
return -EINVAL;
}
@@ -968,7 +984,7 @@ static int sun8i_dwmac_set_syscon(struct stmmac_priv *priv)
if (gmac->variant->support_rmii)
reg &= ~SYSCON_RMII_EN;
- switch (priv->plat->interface) {
+ switch (plat->interface) {
case PHY_INTERFACE_MODE_MII:
/* default */
break;
@@ -982,8 +998,8 @@ static int sun8i_dwmac_set_syscon(struct stmmac_priv *priv)
reg |= SYSCON_RMII_EN | SYSCON_ETCS_EXT_GMII;
break;
default:
- dev_err(priv->device, "Unsupported interface mode: %s",
- phy_modes(priv->plat->interface));
+ dev_err(dev, "Unsupported interface mode: %s",
+ phy_modes(plat->interface));
return -EINVAL;
}
@@ -1004,17 +1020,10 @@ static void sun8i_dwmac_exit(struct platform_device *pdev, void *priv)
struct sunxi_priv_data *gmac = priv;
if (gmac->variant->soc_has_internal_phy) {
- /* sun8i_dwmac_exit could be called with mdiomux uninit */
- if (gmac->mux_handle)
- mdio_mux_uninit(gmac->mux_handle);
if (gmac->internal_phy_powered)
sun8i_dwmac_unpower_internal_phy(gmac);
}
- sun8i_dwmac_unset_syscon(gmac);
-
- reset_control_put(gmac->rst_ephy);
-
clk_disable_unprepare(gmac->tx_clk);
if (gmac->regulator)
@@ -1049,16 +1058,11 @@ static struct mac_device_info *sun8i_dwmac_setup(void *ppriv)
{
struct mac_device_info *mac;
struct stmmac_priv *priv = ppriv;
- int ret;
mac = devm_kzalloc(priv->device, sizeof(*mac), GFP_KERNEL);
if (!mac)
return NULL;
- ret = sun8i_dwmac_set_syscon(priv);
- if (ret)
- return NULL;
-
mac->pcsr = priv->ioaddr;
mac->mac = &sun8i_dwmac_ops;
mac->dma = &sun8i_dwmac_dma_ops;
@@ -1134,10 +1138,6 @@ static int sun8i_dwmac_probe(struct platform_device *pdev)
if (ret)
return ret;
- plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac);
- if (IS_ERR(plat_dat))
- return PTR_ERR(plat_dat);
-
gmac = devm_kzalloc(dev, sizeof(*gmac), GFP_KERNEL);
if (!gmac)
return -ENOMEM;
@@ -1201,11 +1201,15 @@ static int sun8i_dwmac_probe(struct platform_device *pdev)
ret = of_get_phy_mode(dev->of_node, &interface);
if (ret)
return -EINVAL;
- plat_dat->interface = interface;
+
+ plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac);
+ if (IS_ERR(plat_dat))
+ return PTR_ERR(plat_dat);
/* platform data specifying hardware features and callbacks.
* hardware features were copied from Allwinner drivers.
*/
+ plat_dat->interface = interface;
plat_dat->rx_coe = STMMAC_RX_COE_TYPE2;
plat_dat->tx_coe = 1;
plat_dat->has_sun8i = true;
@@ -1214,9 +1218,13 @@ static int sun8i_dwmac_probe(struct platform_device *pdev)
plat_dat->exit = sun8i_dwmac_exit;
plat_dat->setup = sun8i_dwmac_setup;
+ ret = sun8i_dwmac_set_syscon(&pdev->dev, plat_dat);
+ if (ret)
+ goto dwmac_deconfig;
+
ret = sun8i_dwmac_init(pdev, plat_dat->bsp_priv);
if (ret)
- return ret;
+ goto dwmac_syscon;
ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
if (ret)
@@ -1230,7 +1238,7 @@ static int sun8i_dwmac_probe(struct platform_device *pdev)
if (gmac->variant->soc_has_internal_phy) {
ret = get_ephy_nodes(priv);
if (ret)
- goto dwmac_exit;
+ goto dwmac_remove;
ret = sun8i_dwmac_register_mdio_mux(priv);
if (ret) {
dev_err(&pdev->dev, "Failed to register mux\n");
@@ -1239,15 +1247,42 @@ static int sun8i_dwmac_probe(struct platform_device *pdev)
} else {
ret = sun8i_dwmac_reset(priv);
if (ret)
- goto dwmac_exit;
+ goto dwmac_remove;
}
return ret;
dwmac_mux:
- sun8i_dwmac_unset_syscon(gmac);
+ reset_control_put(gmac->rst_ephy);
+ clk_put(gmac->ephy_clk);
+dwmac_remove:
+ stmmac_dvr_remove(&pdev->dev);
dwmac_exit:
+ sun8i_dwmac_exit(pdev, gmac);
+dwmac_syscon:
+ sun8i_dwmac_unset_syscon(gmac);
+dwmac_deconfig:
+ stmmac_remove_config_dt(pdev, plat_dat);
+
+ return ret;
+}
+
+static int sun8i_dwmac_remove(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ struct sunxi_priv_data *gmac = priv->plat->bsp_priv;
+
+ if (gmac->variant->soc_has_internal_phy) {
+ mdio_mux_uninit(gmac->mux_handle);
+ sun8i_dwmac_unpower_internal_phy(gmac);
+ reset_control_put(gmac->rst_ephy);
+ clk_put(gmac->ephy_clk);
+ }
+
stmmac_pltfr_remove(pdev);
-return ret;
+ sun8i_dwmac_unset_syscon(gmac);
+
+ return 0;
}
static const struct of_device_id sun8i_dwmac_match[] = {
@@ -1269,7 +1304,7 @@ MODULE_DEVICE_TABLE(of, sun8i_dwmac_match);
static struct platform_driver sun8i_dwmac_driver = {
.probe = sun8i_dwmac_probe,
- .remove = stmmac_pltfr_remove,
+ .remove = sun8i_dwmac_remove,
.driver = {
.name = "dwmac-sun8i",
.pm = &stmmac_pltfr_pm_ops,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac5.c b/drivers/net/ethernet/stmicro/stmmac/dwmac5.c
index 03e79a677c8b..8f7ac24545ef 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac5.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac5.c
@@ -568,68 +568,24 @@ static int dwmac5_est_write(void __iomem *ioaddr, u32 reg, u32 val, bool gcl)
int dwmac5_est_configure(void __iomem *ioaddr, struct stmmac_est *cfg,
unsigned int ptp_rate)
{
- u32 speed, total_offset, offset, ctrl, ctr_low;
- u32 extcfg = readl(ioaddr + GMAC_EXT_CONFIG);
- u32 mac_cfg = readl(ioaddr + GMAC_CONFIG);
int i, ret = 0x0;
- u64 total_ctr;
-
- if (extcfg & GMAC_CONFIG_EIPG_EN) {
- offset = (extcfg & GMAC_CONFIG_EIPG) >> GMAC_CONFIG_EIPG_SHIFT;
- offset = 104 + (offset * 8);
- } else {
- offset = (mac_cfg & GMAC_CONFIG_IPG) >> GMAC_CONFIG_IPG_SHIFT;
- offset = 96 - (offset * 8);
- }
-
- speed = mac_cfg & (GMAC_CONFIG_PS | GMAC_CONFIG_FES);
- speed = speed >> GMAC_CONFIG_FES_SHIFT;
-
- switch (speed) {
- case 0x0:
- offset = offset * 1000; /* 1G */
- break;
- case 0x1:
- offset = offset * 400; /* 2.5G */
- break;
- case 0x2:
- offset = offset * 100000; /* 10M */
- break;
- case 0x3:
- offset = offset * 10000; /* 100M */
- break;
- default:
- return -EINVAL;
- }
-
- offset = offset / 1000;
+ u32 ctrl;
ret |= dwmac5_est_write(ioaddr, BTR_LOW, cfg->btr[0], false);
ret |= dwmac5_est_write(ioaddr, BTR_HIGH, cfg->btr[1], false);
ret |= dwmac5_est_write(ioaddr, TER, cfg->ter, false);
ret |= dwmac5_est_write(ioaddr, LLR, cfg->gcl_size, false);
+ ret |= dwmac5_est_write(ioaddr, CTR_LOW, cfg->ctr[0], false);
+ ret |= dwmac5_est_write(ioaddr, CTR_HIGH, cfg->ctr[1], false);
if (ret)
return ret;
- total_offset = 0;
for (i = 0; i < cfg->gcl_size; i++) {
- ret = dwmac5_est_write(ioaddr, i, cfg->gcl[i] + offset, true);
+ ret = dwmac5_est_write(ioaddr, i, cfg->gcl[i], true);
if (ret)
return ret;
-
- total_offset += offset;
}
- total_ctr = cfg->ctr[0] + cfg->ctr[1] * 1000000000ULL;
- total_ctr += total_offset;
-
- ctr_low = do_div(total_ctr, 1000000000);
-
- ret |= dwmac5_est_write(ioaddr, CTR_LOW, ctr_low, false);
- ret |= dwmac5_est_write(ioaddr, CTR_HIGH, total_ctr, false);
- if (ret)
- return ret;
-
ctrl = readl(ioaddr + MTL_EST_CONTROL);
ctrl &= ~PTOV;
ctrl |= ((1000000000 / ptp_rate) * 6) << PTOV_SHIFT;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 5b1c12ff98c0..26b971cd4da5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -2184,7 +2184,7 @@ static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan)
spin_lock_irqsave(&ch->lock, flags);
stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
spin_unlock_irqrestore(&ch->lock, flags);
- __napi_schedule_irqoff(&ch->rx_napi);
+ __napi_schedule(&ch->rx_napi);
}
}
@@ -2193,7 +2193,7 @@ static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan)
spin_lock_irqsave(&ch->lock, flags);
stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
spin_unlock_irqrestore(&ch->lock, flags);
- __napi_schedule_irqoff(&ch->tx_napi);
+ __napi_schedule(&ch->tx_napi);
}
}
@@ -4026,6 +4026,7 @@ static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
{
struct stmmac_priv *priv = netdev_priv(dev);
int txfifosz = priv->plat->tx_fifo_size;
+ const int mtu = new_mtu;
if (txfifosz == 0)
txfifosz = priv->dma_cap.tx_fifo_size;
@@ -4043,7 +4044,7 @@ static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB))
return -EINVAL;
- dev->mtu = new_mtu;
+ dev->mtu = mtu;
netdev_update_features(dev);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
index f5bed4d26e80..8ed3b2c834a0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
@@ -599,7 +599,8 @@ static int tc_setup_taprio(struct stmmac_priv *priv,
{
u32 size, wid = priv->dma_cap.estwid, dep = priv->dma_cap.estdep;
struct plat_stmmacenet_data *plat = priv->plat;
- struct timespec64 time;
+ struct timespec64 time, current_time;
+ ktime_t current_time_ns;
bool fpe = false;
int i, ret = 0;
u64 ctr;
@@ -694,7 +695,22 @@ static int tc_setup_taprio(struct stmmac_priv *priv,
}
/* Adjust for real system time */
- time = ktime_to_timespec64(qopt->base_time);
+ priv->ptp_clock_ops.gettime64(&priv->ptp_clock_ops, &current_time);
+ current_time_ns = timespec64_to_ktime(current_time);
+ if (ktime_after(qopt->base_time, current_time_ns)) {
+ time = ktime_to_timespec64(qopt->base_time);
+ } else {
+ ktime_t base_time;
+ s64 n;
+
+ n = div64_s64(ktime_sub_ns(current_time_ns, qopt->base_time),
+ qopt->cycle_time);
+ base_time = ktime_add_ns(qopt->base_time,
+ (n + 1) * qopt->cycle_time);
+
+ time = ktime_to_timespec64(base_time);
+ }
+
priv->plat->est->btr[0] = (u32)time.tv_nsec;
priv->plat->est->btr[1] = (u32)time.tv_sec;
diff --git a/drivers/net/ethernet/ti/cpts.c b/drivers/net/ethernet/ti/cpts.c
index d1fc7955d422..43222a34cba0 100644
--- a/drivers/net/ethernet/ti/cpts.c
+++ b/drivers/net/ethernet/ti/cpts.c
@@ -599,6 +599,7 @@ void cpts_unregister(struct cpts *cpts)
ptp_clock_unregister(cpts->clock);
cpts->clock = NULL;
+ cpts->phc_index = -1;
cpts_write32(cpts, 0, int_enable);
cpts_write32(cpts, 0, control);
@@ -784,6 +785,7 @@ struct cpts *cpts_create(struct device *dev, void __iomem *regs,
cpts->cc.read = cpts_systim_read;
cpts->cc.mask = CLOCKSOURCE_MASK(32);
cpts->info = cpts_info;
+ cpts->phc_index = -1;
if (n_ext_ts)
cpts->info.n_ext_ts = n_ext_ts;
diff --git a/drivers/net/ipa/gsi.c b/drivers/net/ipa/gsi.c
index c4795249719d..14d9a791924b 100644
--- a/drivers/net/ipa/gsi.c
+++ b/drivers/net/ipa/gsi.c
@@ -326,8 +326,8 @@ gsi_evt_ring_state(struct gsi *gsi, u32 evt_ring_id)
}
/* Issue an event ring command and wait for it to complete */
-static int evt_ring_command(struct gsi *gsi, u32 evt_ring_id,
- enum gsi_evt_cmd_opcode opcode)
+static void evt_ring_command(struct gsi *gsi, u32 evt_ring_id,
+ enum gsi_evt_cmd_opcode opcode)
{
struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
struct completion *completion = &evt_ring->completion;
@@ -340,7 +340,13 @@ static int evt_ring_command(struct gsi *gsi, u32 evt_ring_id,
* is issued here. Only permit *this* event ring to trigger
* an interrupt, and only enable the event control IRQ type
* when we expect it to occur.
+ *
+ * There's a small chance that a previous command completed
+ * after the interrupt was disabled, so make sure we have no
+ * pending interrupts before we enable them.
*/
+ iowrite32(~0, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_CLR_OFFSET);
+
val = BIT(evt_ring_id);
iowrite32(val, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET);
gsi_irq_type_enable(gsi, GSI_EV_CTRL);
@@ -355,19 +361,16 @@ static int evt_ring_command(struct gsi *gsi, u32 evt_ring_id,
iowrite32(0, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET);
if (success)
- return 0;
+ return;
dev_err(dev, "GSI command %u for event ring %u timed out, state %u\n",
opcode, evt_ring_id, evt_ring->state);
-
- return -ETIMEDOUT;
}
/* Allocate an event ring in NOT_ALLOCATED state */
static int gsi_evt_ring_alloc_command(struct gsi *gsi, u32 evt_ring_id)
{
struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
- int ret;
/* Get initial event ring state */
evt_ring->state = gsi_evt_ring_state(gsi, evt_ring_id);
@@ -377,14 +380,16 @@ static int gsi_evt_ring_alloc_command(struct gsi *gsi, u32 evt_ring_id)
return -EINVAL;
}
- ret = evt_ring_command(gsi, evt_ring_id, GSI_EVT_ALLOCATE);
- if (!ret && evt_ring->state != GSI_EVT_RING_STATE_ALLOCATED) {
- dev_err(gsi->dev, "event ring %u bad state %u after alloc\n",
- evt_ring_id, evt_ring->state);
- ret = -EIO;
- }
+ evt_ring_command(gsi, evt_ring_id, GSI_EVT_ALLOCATE);
- return ret;
+ /* If successful the event ring state will have changed */
+ if (evt_ring->state == GSI_EVT_RING_STATE_ALLOCATED)
+ return 0;
+
+ dev_err(gsi->dev, "event ring %u bad state %u after alloc\n",
+ evt_ring_id, evt_ring->state);
+
+ return -EIO;
}
/* Reset a GSI event ring in ALLOCATED or ERROR state. */
@@ -392,7 +397,6 @@ static void gsi_evt_ring_reset_command(struct gsi *gsi, u32 evt_ring_id)
{
struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
enum gsi_evt_ring_state state = evt_ring->state;
- int ret;
if (state != GSI_EVT_RING_STATE_ALLOCATED &&
state != GSI_EVT_RING_STATE_ERROR) {
@@ -401,17 +405,20 @@ static void gsi_evt_ring_reset_command(struct gsi *gsi, u32 evt_ring_id)
return;
}
- ret = evt_ring_command(gsi, evt_ring_id, GSI_EVT_RESET);
- if (!ret && evt_ring->state != GSI_EVT_RING_STATE_ALLOCATED)
- dev_err(gsi->dev, "event ring %u bad state %u after reset\n",
- evt_ring_id, evt_ring->state);
+ evt_ring_command(gsi, evt_ring_id, GSI_EVT_RESET);
+
+ /* If successful the event ring state will have changed */
+ if (evt_ring->state == GSI_EVT_RING_STATE_ALLOCATED)
+ return;
+
+ dev_err(gsi->dev, "event ring %u bad state %u after reset\n",
+ evt_ring_id, evt_ring->state);
}
/* Issue a hardware de-allocation request for an allocated event ring */
static void gsi_evt_ring_de_alloc_command(struct gsi *gsi, u32 evt_ring_id)
{
struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
- int ret;
if (evt_ring->state != GSI_EVT_RING_STATE_ALLOCATED) {
dev_err(gsi->dev, "event ring %u state %u before dealloc\n",
@@ -419,10 +426,14 @@ static void gsi_evt_ring_de_alloc_command(struct gsi *gsi, u32 evt_ring_id)
return;
}
- ret = evt_ring_command(gsi, evt_ring_id, GSI_EVT_DE_ALLOC);
- if (!ret && evt_ring->state != GSI_EVT_RING_STATE_NOT_ALLOCATED)
- dev_err(gsi->dev, "event ring %u bad state %u after dealloc\n",
- evt_ring_id, evt_ring->state);
+ evt_ring_command(gsi, evt_ring_id, GSI_EVT_DE_ALLOC);
+
+ /* If successful the event ring state will have changed */
+ if (evt_ring->state == GSI_EVT_RING_STATE_NOT_ALLOCATED)
+ return;
+
+ dev_err(gsi->dev, "event ring %u bad state %u after dealloc\n",
+ evt_ring_id, evt_ring->state);
}
/* Fetch the current state of a channel from hardware */
@@ -438,7 +449,7 @@ static enum gsi_channel_state gsi_channel_state(struct gsi_channel *channel)
}
/* Issue a channel command and wait for it to complete */
-static int
+static void
gsi_channel_command(struct gsi_channel *channel, enum gsi_ch_cmd_opcode opcode)
{
struct completion *completion = &channel->completion;
@@ -453,7 +464,13 @@ gsi_channel_command(struct gsi_channel *channel, enum gsi_ch_cmd_opcode opcode)
* issued here. So we only permit *this* channel to trigger
* an interrupt and only enable the channel control IRQ type
* when we expect it to occur.
+ *
+ * There's a small chance that a previous command completed
+ * after the interrupt was disabled, so make sure we have no
+ * pending interrupts before we enable them.
*/
+ iowrite32(~0, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_CLR_OFFSET);
+
val = BIT(channel_id);
iowrite32(val, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET);
gsi_irq_type_enable(gsi, GSI_CH_CTRL);
@@ -467,12 +484,10 @@ gsi_channel_command(struct gsi_channel *channel, enum gsi_ch_cmd_opcode opcode)
iowrite32(0, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET);
if (success)
- return 0;
+ return;
dev_err(dev, "GSI command %u for channel %u timed out, state %u\n",
opcode, channel_id, gsi_channel_state(channel));
-
- return -ETIMEDOUT;
}
/* Allocate GSI channel in NOT_ALLOCATED state */
@@ -481,7 +496,6 @@ static int gsi_channel_alloc_command(struct gsi *gsi, u32 channel_id)
struct gsi_channel *channel = &gsi->channel[channel_id];
struct device *dev = gsi->dev;
enum gsi_channel_state state;
- int ret;
/* Get initial channel state */
state = gsi_channel_state(channel);
@@ -491,17 +505,17 @@ static int gsi_channel_alloc_command(struct gsi *gsi, u32 channel_id)
return -EINVAL;
}
- ret = gsi_channel_command(channel, GSI_CH_ALLOCATE);
+ gsi_channel_command(channel, GSI_CH_ALLOCATE);
- /* Channel state will normally have been updated */
+ /* If successful the channel state will have changed */
state = gsi_channel_state(channel);
- if (!ret && state != GSI_CHANNEL_STATE_ALLOCATED) {
- dev_err(dev, "channel %u bad state %u after alloc\n",
- channel_id, state);
- ret = -EIO;
- }
+ if (state == GSI_CHANNEL_STATE_ALLOCATED)
+ return 0;
- return ret;
+ dev_err(dev, "channel %u bad state %u after alloc\n",
+ channel_id, state);
+
+ return -EIO;
}
/* Start an ALLOCATED channel */
@@ -509,7 +523,6 @@ static int gsi_channel_start_command(struct gsi_channel *channel)
{
struct device *dev = channel->gsi->dev;
enum gsi_channel_state state;
- int ret;
state = gsi_channel_state(channel);
if (state != GSI_CHANNEL_STATE_ALLOCATED &&
@@ -519,17 +532,17 @@ static int gsi_channel_start_command(struct gsi_channel *channel)
return -EINVAL;
}
- ret = gsi_channel_command(channel, GSI_CH_START);
+ gsi_channel_command(channel, GSI_CH_START);
- /* Channel state will normally have been updated */
+ /* If successful the channel state will have changed */
state = gsi_channel_state(channel);
- if (!ret && state != GSI_CHANNEL_STATE_STARTED) {
- dev_err(dev, "channel %u bad state %u after start\n",
- gsi_channel_id(channel), state);
- ret = -EIO;
- }
+ if (state == GSI_CHANNEL_STATE_STARTED)
+ return 0;
- return ret;
+ dev_err(dev, "channel %u bad state %u after start\n",
+ gsi_channel_id(channel), state);
+
+ return -EIO;
}
/* Stop a GSI channel in STARTED state */
@@ -537,7 +550,6 @@ static int gsi_channel_stop_command(struct gsi_channel *channel)
{
struct device *dev = channel->gsi->dev;
enum gsi_channel_state state;
- int ret;
state = gsi_channel_state(channel);
@@ -554,12 +566,12 @@ static int gsi_channel_stop_command(struct gsi_channel *channel)
return -EINVAL;
}
- ret = gsi_channel_command(channel, GSI_CH_STOP);
+ gsi_channel_command(channel, GSI_CH_STOP);
- /* Channel state will normally have been updated */
+ /* If successful the channel state will have changed */
state = gsi_channel_state(channel);
- if (ret || state == GSI_CHANNEL_STATE_STOPPED)
- return ret;
+ if (state == GSI_CHANNEL_STATE_STOPPED)
+ return 0;
/* We may have to try again if stop is in progress */
if (state == GSI_CHANNEL_STATE_STOP_IN_PROC)
@@ -576,7 +588,6 @@ static void gsi_channel_reset_command(struct gsi_channel *channel)
{
struct device *dev = channel->gsi->dev;
enum gsi_channel_state state;
- int ret;
msleep(1); /* A short delay is required before a RESET command */
@@ -590,11 +601,11 @@ static void gsi_channel_reset_command(struct gsi_channel *channel)
return;
}
- ret = gsi_channel_command(channel, GSI_CH_RESET);
+ gsi_channel_command(channel, GSI_CH_RESET);
- /* Channel state will normally have been updated */
+ /* If successful the channel state will have changed */
state = gsi_channel_state(channel);
- if (!ret && state != GSI_CHANNEL_STATE_ALLOCATED)
+ if (state != GSI_CHANNEL_STATE_ALLOCATED)
dev_err(dev, "channel %u bad state %u after reset\n",
gsi_channel_id(channel), state);
}
@@ -605,7 +616,6 @@ static void gsi_channel_de_alloc_command(struct gsi *gsi, u32 channel_id)
struct gsi_channel *channel = &gsi->channel[channel_id];
struct device *dev = gsi->dev;
enum gsi_channel_state state;
- int ret;
state = gsi_channel_state(channel);
if (state != GSI_CHANNEL_STATE_ALLOCATED) {
@@ -614,11 +624,12 @@ static void gsi_channel_de_alloc_command(struct gsi *gsi, u32 channel_id)
return;
}
- ret = gsi_channel_command(channel, GSI_CH_DE_ALLOC);
+ gsi_channel_command(channel, GSI_CH_DE_ALLOC);
- /* Channel state will normally have been updated */
+ /* If successful the channel state will have changed */
state = gsi_channel_state(channel);
- if (!ret && state != GSI_CHANNEL_STATE_NOT_ALLOCATED)
+
+ if (state != GSI_CHANNEL_STATE_NOT_ALLOCATED)
dev_err(dev, "channel %u bad state %u after dealloc\n",
channel_id, state);
}
diff --git a/drivers/net/ipa/ipa_clock.c b/drivers/net/ipa/ipa_clock.c
index 9dcf16f399b7..135c393437f1 100644
--- a/drivers/net/ipa/ipa_clock.c
+++ b/drivers/net/ipa/ipa_clock.c
@@ -115,13 +115,13 @@ static int ipa_interconnect_enable(struct ipa *ipa)
return ret;
data = &clock->interconnect_data[IPA_INTERCONNECT_IMEM];
- ret = icc_set_bw(clock->memory_path, data->average_rate,
+ ret = icc_set_bw(clock->imem_path, data->average_rate,
data->peak_rate);
if (ret)
goto err_memory_path_disable;
data = &clock->interconnect_data[IPA_INTERCONNECT_CONFIG];
- ret = icc_set_bw(clock->memory_path, data->average_rate,
+ ret = icc_set_bw(clock->config_path, data->average_rate,
data->peak_rate);
if (ret)
goto err_imem_path_disable;
diff --git a/drivers/net/ipa/ipa_modem.c b/drivers/net/ipa/ipa_modem.c
index e34fe2d77324..9b08eb823984 100644
--- a/drivers/net/ipa/ipa_modem.c
+++ b/drivers/net/ipa/ipa_modem.c
@@ -216,6 +216,7 @@ int ipa_modem_start(struct ipa *ipa)
ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX]->netdev = netdev;
ipa->name_map[IPA_ENDPOINT_AP_MODEM_RX]->netdev = netdev;
+ SET_NETDEV_DEV(netdev, &ipa->pdev->dev);
priv = netdev_priv(netdev);
priv->ipa = ipa;
diff --git a/drivers/net/mdio/mdio-bitbang.c b/drivers/net/mdio/mdio-bitbang.c
index 5136275c8e73..d3915f831854 100644
--- a/drivers/net/mdio/mdio-bitbang.c
+++ b/drivers/net/mdio/mdio-bitbang.c
@@ -149,7 +149,7 @@ static int mdiobb_cmd_addr(struct mdiobb_ctrl *ctrl, int phy, u32 addr)
return dev_addr;
}
-static int mdiobb_read(struct mii_bus *bus, int phy, int reg)
+int mdiobb_read(struct mii_bus *bus, int phy, int reg)
{
struct mdiobb_ctrl *ctrl = bus->priv;
int ret, i;
@@ -180,8 +180,9 @@ static int mdiobb_read(struct mii_bus *bus, int phy, int reg)
mdiobb_get_bit(ctrl);
return ret;
}
+EXPORT_SYMBOL(mdiobb_read);
-static int mdiobb_write(struct mii_bus *bus, int phy, int reg, u16 val)
+int mdiobb_write(struct mii_bus *bus, int phy, int reg, u16 val)
{
struct mdiobb_ctrl *ctrl = bus->priv;
@@ -201,6 +202,7 @@ static int mdiobb_write(struct mii_bus *bus, int phy, int reg, u16 val)
mdiobb_get_bit(ctrl);
return 0;
}
+EXPORT_SYMBOL(mdiobb_write);
struct mii_bus *alloc_mdio_bitbang(struct mdiobb_ctrl *ctrl)
{
diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c
index 33372756a451..ddb78fb4d6dc 100644
--- a/drivers/net/phy/smsc.c
+++ b/drivers/net/phy/smsc.c
@@ -317,7 +317,8 @@ static int smsc_phy_probe(struct phy_device *phydev)
/* Make clk optional to keep DTB backward compatibility. */
priv->refclk = clk_get_optional(dev, NULL);
if (IS_ERR(priv->refclk))
- dev_err_probe(dev, PTR_ERR(priv->refclk), "Failed to request clock\n");
+ return dev_err_probe(dev, PTR_ERR(priv->refclk),
+ "Failed to request clock\n");
ret = clk_prepare_enable(priv->refclk);
if (ret)
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index 09c27f7773f9..d445ecb1d0c7 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -623,6 +623,7 @@ static int ppp_bridge_channels(struct channel *pch, struct channel *pchb)
write_unlock_bh(&pch->upl);
return -EALREADY;
}
+ refcount_inc(&pchb->file.refcnt);
rcu_assign_pointer(pch->bridge, pchb);
write_unlock_bh(&pch->upl);
@@ -632,19 +633,24 @@ static int ppp_bridge_channels(struct channel *pch, struct channel *pchb)
write_unlock_bh(&pchb->upl);
goto err_unset;
}
+ refcount_inc(&pch->file.refcnt);
rcu_assign_pointer(pchb->bridge, pch);
write_unlock_bh(&pchb->upl);
- refcount_inc(&pch->file.refcnt);
- refcount_inc(&pchb->file.refcnt);
-
return 0;
err_unset:
write_lock_bh(&pch->upl);
+ /* Re-read pch->bridge with upl held in case it was modified concurrently */
+ pchb = rcu_dereference_protected(pch->bridge, lockdep_is_held(&pch->upl));
RCU_INIT_POINTER(pch->bridge, NULL);
write_unlock_bh(&pch->upl);
synchronize_rcu();
+
+ if (pchb)
+ if (refcount_dec_and_test(&pchb->file.refcnt))
+ ppp_destroy_channel(pchb);
+
return -EALREADY;
}
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index fbed05ae7b0f..978ac0981d16 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1365,7 +1365,7 @@ static struct sk_buff *tun_napi_alloc_frags(struct tun_file *tfile,
int i;
if (it->nr_segs > MAX_SKB_FRAGS + 1)
- return ERR_PTR(-ENOMEM);
+ return ERR_PTR(-EMSGSIZE);
local_bh_disable();
skb = napi_get_frags(&tfile->napi);
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index 1e3719028780..fbbe78643631 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -631,7 +631,6 @@ config USB_NET_AQC111
config USB_RTL8153_ECM
tristate "RTL8153 ECM support"
depends on USB_NET_CDCETHER && (USB_RTL8152 || USB_RTL8152=n)
- default y
help
This option supports ECM mode for RTL8153 ethernet adapter, when
CONFIG_USB_RTL8152 is not set, or the RTL8153 device is not
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 8c1d61c2cbac..6aaa0675c28a 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -793,6 +793,13 @@ static const struct usb_device_id products[] = {
.driver_info = 0,
},
+/* Lenovo Powered USB-C Travel Hub (4X90S92381, based on Realtek RTL8153) */
+{
+ USB_DEVICE_AND_INTERFACE_INFO(LENOVO_VENDOR_ID, 0x721e, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
+ .driver_info = 0,
+},
+
/* ThinkPad USB-C Dock Gen 2 (based on Realtek RTL8153) */
{
USB_DEVICE_AND_INTERFACE_INFO(LENOVO_VENDOR_ID, 0xa387, USB_CLASS_COMM,
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index 2bac57d5e8d5..291e76d32abe 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -1199,7 +1199,10 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
* accordingly. Otherwise, we should check here.
*/
if (ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END)
- delayed_ndp_size = ALIGN(ctx->max_ndp_size, ctx->tx_ndp_modulus);
+ delayed_ndp_size = ctx->max_ndp_size +
+ max_t(u32,
+ ctx->tx_ndp_modulus,
+ ctx->tx_modulus + ctx->tx_remainder) - 1;
else
delayed_ndp_size = 0;
@@ -1410,7 +1413,8 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
if (!(dev->driver_info->flags & FLAG_SEND_ZLP) &&
skb_out->len > ctx->min_tx_pkt) {
padding_count = ctx->tx_curr_size - skb_out->len;
- skb_put_zero(skb_out, padding_count);
+ if (!WARN_ON(padding_count > ctx->tx_curr_size))
+ skb_put_zero(skb_out, padding_count);
} else if (skb_out->len < ctx->tx_curr_size &&
(skb_out->len % dev->maxpacket) == 0) {
skb_put_u8(skb_out, 0); /* force short packet */
@@ -1823,6 +1827,15 @@ cdc_ncm_speed_change(struct usbnet *dev,
uint32_t rx_speed = le32_to_cpu(data->DLBitRRate);
uint32_t tx_speed = le32_to_cpu(data->ULBitRate);
+ /* if the speed hasn't changed, don't report it.
+ * RTL8156 shipped before 2021 sends notification about every 32ms.
+ */
+ if (dev->rx_speed == rx_speed && dev->tx_speed == tx_speed)
+ return;
+
+ dev->rx_speed = rx_speed;
+ dev->tx_speed = tx_speed;
+
/*
* Currently the USB-NET API does not support reporting the actual
* device speed. Do print it instead.
@@ -1863,10 +1876,8 @@ static void cdc_ncm_status(struct usbnet *dev, struct urb *urb)
* USB_CDC_NOTIFY_NETWORK_CONNECTION notification shall be
* sent by device after USB_CDC_NOTIFY_SPEED_CHANGE.
*/
- netif_info(dev, link, dev->net,
- "network connection: %sconnected\n",
- !!event->wValue ? "" : "dis");
- usbnet_link_change(dev, !!event->wValue, 0);
+ if (netif_carrier_ok(dev->net) != !!event->wValue)
+ usbnet_link_change(dev, !!event->wValue, 0);
break;
case USB_CDC_NOTIFY_SPEED_CHANGE:
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index d166c321ee9b..af19513a9f75 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -1013,6 +1013,7 @@ static const struct usb_device_id products[] = {
{QMI_MATCH_FF_FF_FF(0x2c7c, 0x0125)}, /* Quectel EC25, EC20 R2.0 Mini PCIe */
{QMI_MATCH_FF_FF_FF(0x2c7c, 0x0306)}, /* Quectel EP06/EG06/EM06 */
{QMI_MATCH_FF_FF_FF(0x2c7c, 0x0512)}, /* Quectel EG12/EM12 */
+ {QMI_MATCH_FF_FF_FF(0x2c7c, 0x0620)}, /* Quectel EM160R-GL */
{QMI_MATCH_FF_FF_FF(0x2c7c, 0x0800)}, /* Quectel RM500Q-GL */
/* 3. Combined interface devices matching on interface number */
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index c448d6089821..67cd6986634f 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -6877,6 +6877,7 @@ static const struct usb_device_id rtl8152_table[] = {
{REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x7205)},
{REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x720c)},
{REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x7214)},
+ {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x721e)},
{REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0xa387)},
{REALTEK_USB_DEVICE(VENDOR_ID_LINKSYS, 0x0041)},
{REALTEK_USB_DEVICE(VENDOR_ID_NVIDIA, 0x09ff)},
diff --git a/drivers/net/usb/r8153_ecm.c b/drivers/net/usb/r8153_ecm.c
index 2c3fabd38b16..20b2df8d74ae 100644
--- a/drivers/net/usb/r8153_ecm.c
+++ b/drivers/net/usb/r8153_ecm.c
@@ -122,12 +122,20 @@ static const struct driver_info r8153_info = {
};
static const struct usb_device_id products[] = {
+/* Realtek RTL8153 Based USB 3.0 Ethernet Adapters */
{
USB_DEVICE_AND_INTERFACE_INFO(VENDOR_ID_REALTEK, 0x8153, USB_CLASS_COMM,
USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
.driver_info = (unsigned long)&r8153_info,
},
+/* Lenovo Powered USB-C Travel Hub (4X90S92381, based on Realtek RTL8153) */
+{
+ USB_DEVICE_AND_INTERFACE_INFO(VENDOR_ID_LENOVO, 0x721e, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
+ .driver_info = (unsigned long)&r8153_info,
+},
+
{ }, /* END */
};
MODULE_DEVICE_TABLE(usb, products);
diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c
index 6609d21ef894..f813ca9dec53 100644
--- a/drivers/net/usb/rndis_host.c
+++ b/drivers/net/usb/rndis_host.c
@@ -387,7 +387,7 @@ generic_rndis_bind(struct usbnet *dev, struct usb_interface *intf, int flags)
reply_len = sizeof *phym;
retval = rndis_query(dev, intf, u.buf,
RNDIS_OID_GEN_PHYSICAL_MEDIUM,
- 0, (void **) &phym, &reply_len);
+ reply_len, (void **)&phym, &reply_len);
if (retval != 0 || !phym) {
/* OID is optional so don't fail here. */
phym_unspec = cpu_to_le32(RNDIS_PHYSICAL_MEDIUM_UNSPECIFIED);
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 4c41df624dbb..508408fbe78f 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -2093,14 +2093,16 @@ static int virtnet_set_channels(struct net_device *dev,
get_online_cpus();
err = _virtnet_set_queues(vi, queue_pairs);
- if (!err) {
- netif_set_real_num_tx_queues(dev, queue_pairs);
- netif_set_real_num_rx_queues(dev, queue_pairs);
-
- virtnet_set_affinity(vi);
+ if (err) {
+ put_online_cpus();
+ goto err;
}
+ virtnet_set_affinity(vi);
put_online_cpus();
+ netif_set_real_num_tx_queues(dev, queue_pairs);
+ netif_set_real_num_rx_queues(dev, queue_pairs);
+ err:
return err;
}
diff --git a/drivers/net/wan/Kconfig b/drivers/net/wan/Kconfig
index 4029fde71a9e..83c9481995dd 100644
--- a/drivers/net/wan/Kconfig
+++ b/drivers/net/wan/Kconfig
@@ -282,6 +282,7 @@ config SLIC_DS26522
tristate "Slic Maxim ds26522 card support"
depends on SPI
depends on FSL_SOC || ARCH_MXC || ARCH_LAYERSCAPE || COMPILE_TEST
+ select BITREVERSE
help
This module initializes and configures the slic maxim card
in T1 or E1 mode.
diff --git a/drivers/net/wan/hdlc_ppp.c b/drivers/net/wan/hdlc_ppp.c
index 64f855651336..261b53fc8e04 100644
--- a/drivers/net/wan/hdlc_ppp.c
+++ b/drivers/net/wan/hdlc_ppp.c
@@ -569,6 +569,13 @@ static void ppp_timer(struct timer_list *t)
unsigned long flags;
spin_lock_irqsave(&ppp->lock, flags);
+ /* mod_timer could be called after we entered this function but
+ * before we got the lock.
+ */
+ if (timer_pending(&proto->timer)) {
+ spin_unlock_irqrestore(&ppp->lock, flags);
+ return;
+ }
switch (proto->state) {
case STOPPING:
case REQ_SENT:
diff --git a/drivers/net/wireless/ath/ath11k/core.c b/drivers/net/wireless/ath/ath11k/core.c
index b97c38b9a270..350b7913622c 100644
--- a/drivers/net/wireless/ath/ath11k/core.c
+++ b/drivers/net/wireless/ath/ath11k/core.c
@@ -185,7 +185,7 @@ int ath11k_core_suspend(struct ath11k_base *ab)
ath11k_hif_ce_irq_disable(ab);
ret = ath11k_hif_suspend(ab);
- if (!ret) {
+ if (ret) {
ath11k_warn(ab, "failed to suspend hif: %d\n", ret);
return ret;
}
diff --git a/drivers/net/wireless/ath/ath11k/dp_rx.c b/drivers/net/wireless/ath/ath11k/dp_rx.c
index 205c0f1a40e9..920e5026a635 100644
--- a/drivers/net/wireless/ath/ath11k/dp_rx.c
+++ b/drivers/net/wireless/ath/ath11k/dp_rx.c
@@ -2294,6 +2294,7 @@ static void ath11k_dp_rx_h_ppdu(struct ath11k *ar, struct hal_rx_desc *rx_desc,
{
u8 channel_num;
u32 center_freq;
+ struct ieee80211_channel *channel;
rx_status->freq = 0;
rx_status->rate_idx = 0;
@@ -2314,9 +2315,12 @@ static void ath11k_dp_rx_h_ppdu(struct ath11k *ar, struct hal_rx_desc *rx_desc,
rx_status->band = NL80211_BAND_5GHZ;
} else {
spin_lock_bh(&ar->data_lock);
- rx_status->band = ar->rx_channel->band;
- channel_num =
- ieee80211_frequency_to_channel(ar->rx_channel->center_freq);
+ channel = ar->rx_channel;
+ if (channel) {
+ rx_status->band = channel->band;
+ channel_num =
+ ieee80211_frequency_to_channel(channel->center_freq);
+ }
spin_unlock_bh(&ar->data_lock);
ath11k_dbg_dump(ar->ab, ATH11K_DBG_DATA, NULL, "rx_desc: ",
rx_desc, sizeof(struct hal_rx_desc));
diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c
index 5c175e3e09b2..c1608f64ea95 100644
--- a/drivers/net/wireless/ath/ath11k/mac.c
+++ b/drivers/net/wireless/ath/ath11k/mac.c
@@ -3021,6 +3021,7 @@ static int ath11k_mac_station_add(struct ath11k *ar,
}
if (ab->hw_params.vdev_start_delay &&
+ !arvif->is_started &&
arvif->vdev_type != WMI_VDEV_TYPE_AP) {
ret = ath11k_start_vdev_delay(ar->hw, vif);
if (ret) {
@@ -5284,7 +5285,8 @@ ath11k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
/* for QCA6390 bss peer must be created before vdev_start */
if (ab->hw_params.vdev_start_delay &&
arvif->vdev_type != WMI_VDEV_TYPE_AP &&
- arvif->vdev_type != WMI_VDEV_TYPE_MONITOR) {
+ arvif->vdev_type != WMI_VDEV_TYPE_MONITOR &&
+ !ath11k_peer_find_by_vdev_id(ab, arvif->vdev_id)) {
memcpy(&arvif->chanctx, ctx, sizeof(*ctx));
ret = 0;
goto out;
@@ -5295,7 +5297,9 @@ ath11k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
goto out;
}
- if (ab->hw_params.vdev_start_delay) {
+ if (ab->hw_params.vdev_start_delay &&
+ (arvif->vdev_type == WMI_VDEV_TYPE_AP ||
+ arvif->vdev_type == WMI_VDEV_TYPE_MONITOR)) {
param.vdev_id = arvif->vdev_id;
param.peer_type = WMI_PEER_TYPE_DEFAULT;
param.peer_addr = ar->mac_addr;
diff --git a/drivers/net/wireless/ath/ath11k/pci.c b/drivers/net/wireless/ath/ath11k/pci.c
index 857647aa57c8..20b415cd96c4 100644
--- a/drivers/net/wireless/ath/ath11k/pci.c
+++ b/drivers/net/wireless/ath/ath11k/pci.c
@@ -274,7 +274,7 @@ static int ath11k_pci_fix_l1ss(struct ath11k_base *ab)
PCIE_QSERDES_COM_SYSCLK_EN_SEL_REG,
PCIE_QSERDES_COM_SYSCLK_EN_SEL_VAL,
PCIE_QSERDES_COM_SYSCLK_EN_SEL_MSK);
- if (!ret) {
+ if (ret) {
ath11k_warn(ab, "failed to set sysclk: %d\n", ret);
return ret;
}
@@ -283,7 +283,7 @@ static int ath11k_pci_fix_l1ss(struct ath11k_base *ab)
PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG1_REG,
PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG1_VAL,
PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG_MSK);
- if (!ret) {
+ if (ret) {
ath11k_warn(ab, "failed to set dtct config1 error: %d\n", ret);
return ret;
}
@@ -292,7 +292,7 @@ static int ath11k_pci_fix_l1ss(struct ath11k_base *ab)
PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG2_REG,
PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG2_VAL,
PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG_MSK);
- if (!ret) {
+ if (ret) {
ath11k_warn(ab, "failed to set dtct config2: %d\n", ret);
return ret;
}
@@ -301,7 +301,7 @@ static int ath11k_pci_fix_l1ss(struct ath11k_base *ab)
PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG4_REG,
PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG4_VAL,
PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG_MSK);
- if (!ret) {
+ if (ret) {
ath11k_warn(ab, "failed to set dtct config4: %d\n", ret);
return ret;
}
@@ -886,6 +886,32 @@ static void ath11k_pci_free_region(struct ath11k_pci *ab_pci)
pci_disable_device(pci_dev);
}
+static void ath11k_pci_aspm_disable(struct ath11k_pci *ab_pci)
+{
+ struct ath11k_base *ab = ab_pci->ab;
+
+ pcie_capability_read_word(ab_pci->pdev, PCI_EXP_LNKCTL,
+ &ab_pci->link_ctl);
+
+ ath11k_dbg(ab, ATH11K_DBG_PCI, "pci link_ctl 0x%04x L0s %d L1 %d\n",
+ ab_pci->link_ctl,
+ u16_get_bits(ab_pci->link_ctl, PCI_EXP_LNKCTL_ASPM_L0S),
+ u16_get_bits(ab_pci->link_ctl, PCI_EXP_LNKCTL_ASPM_L1));
+
+ /* disable L0s and L1 */
+ pcie_capability_write_word(ab_pci->pdev, PCI_EXP_LNKCTL,
+ ab_pci->link_ctl & ~PCI_EXP_LNKCTL_ASPMC);
+
+ set_bit(ATH11K_PCI_ASPM_RESTORE, &ab_pci->flags);
+}
+
+static void ath11k_pci_aspm_restore(struct ath11k_pci *ab_pci)
+{
+ if (test_and_clear_bit(ATH11K_PCI_ASPM_RESTORE, &ab_pci->flags))
+ pcie_capability_write_word(ab_pci->pdev, PCI_EXP_LNKCTL,
+ ab_pci->link_ctl);
+}
+
static int ath11k_pci_power_up(struct ath11k_base *ab)
{
struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
@@ -895,6 +921,11 @@ static int ath11k_pci_power_up(struct ath11k_base *ab)
clear_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags);
ath11k_pci_sw_reset(ab_pci->ab, true);
+ /* Disable ASPM during firmware download due to problems switching
+ * to AMSS state.
+ */
+ ath11k_pci_aspm_disable(ab_pci);
+
ret = ath11k_mhi_start(ab_pci);
if (ret) {
ath11k_err(ab, "failed to start mhi: %d\n", ret);
@@ -908,6 +939,9 @@ static void ath11k_pci_power_down(struct ath11k_base *ab)
{
struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
+ /* restore aspm in case firmware bootup fails */
+ ath11k_pci_aspm_restore(ab_pci);
+
ath11k_pci_force_wake(ab_pci->ab);
ath11k_mhi_stop(ab_pci);
clear_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags);
@@ -965,6 +999,8 @@ static int ath11k_pci_start(struct ath11k_base *ab)
set_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags);
+ ath11k_pci_aspm_restore(ab_pci);
+
ath11k_pci_ce_irqs_enable(ab);
ath11k_ce_rx_post_buf(ab);
diff --git a/drivers/net/wireless/ath/ath11k/pci.h b/drivers/net/wireless/ath/ath11k/pci.h
index 0432a702416b..fe44d0dfce19 100644
--- a/drivers/net/wireless/ath/ath11k/pci.h
+++ b/drivers/net/wireless/ath/ath11k/pci.h
@@ -63,6 +63,7 @@ struct ath11k_msi_config {
enum ath11k_pci_flags {
ATH11K_PCI_FLAG_INIT_DONE,
ATH11K_PCI_FLAG_IS_MSI_64,
+ ATH11K_PCI_ASPM_RESTORE,
};
struct ath11k_pci {
@@ -80,6 +81,7 @@ struct ath11k_pci {
/* enum ath11k_pci_flags */
unsigned long flags;
+ u16 link_ctl;
};
static inline struct ath11k_pci *ath11k_pci_priv(struct ath11k_base *ab)
diff --git a/drivers/net/wireless/ath/ath11k/peer.c b/drivers/net/wireless/ath/ath11k/peer.c
index 1866d82678fa..b69e7ebfa930 100644
--- a/drivers/net/wireless/ath/ath11k/peer.c
+++ b/drivers/net/wireless/ath/ath11k/peer.c
@@ -76,6 +76,23 @@ struct ath11k_peer *ath11k_peer_find_by_id(struct ath11k_base *ab,
return NULL;
}
+struct ath11k_peer *ath11k_peer_find_by_vdev_id(struct ath11k_base *ab,
+ int vdev_id)
+{
+ struct ath11k_peer *peer;
+
+ spin_lock_bh(&ab->base_lock);
+
+ list_for_each_entry(peer, &ab->peers, list) {
+ if (vdev_id == peer->vdev_id) {
+ spin_unlock_bh(&ab->base_lock);
+ return peer;
+ }
+ }
+ spin_unlock_bh(&ab->base_lock);
+ return NULL;
+}
+
void ath11k_peer_unmap_event(struct ath11k_base *ab, u16 peer_id)
{
struct ath11k_peer *peer;
diff --git a/drivers/net/wireless/ath/ath11k/peer.h b/drivers/net/wireless/ath/ath11k/peer.h
index bba2e00b6944..8553ed061aea 100644
--- a/drivers/net/wireless/ath/ath11k/peer.h
+++ b/drivers/net/wireless/ath/ath11k/peer.h
@@ -43,5 +43,7 @@ int ath11k_peer_create(struct ath11k *ar, struct ath11k_vif *arvif,
struct ieee80211_sta *sta, struct peer_create_params *param);
int ath11k_wait_for_peer_delete_done(struct ath11k *ar, u32 vdev_id,
const u8 *addr);
+struct ath11k_peer *ath11k_peer_find_by_vdev_id(struct ath11k_base *ab,
+ int vdev_id);
#endif /* _PEER_H_ */
diff --git a/drivers/net/wireless/ath/ath11k/qmi.c b/drivers/net/wireless/ath/ath11k/qmi.c
index f0b5c50974f3..0db623ff4bb9 100644
--- a/drivers/net/wireless/ath/ath11k/qmi.c
+++ b/drivers/net/wireless/ath/ath11k/qmi.c
@@ -1660,6 +1660,7 @@ static int ath11k_qmi_respond_fw_mem_request(struct ath11k_base *ab)
struct qmi_wlanfw_respond_mem_resp_msg_v01 resp;
struct qmi_txn txn = {};
int ret = 0, i;
+ bool delayed;
req = kzalloc(sizeof(*req), GFP_KERNEL);
if (!req)
@@ -1672,11 +1673,13 @@ static int ath11k_qmi_respond_fw_mem_request(struct ath11k_base *ab)
* failure to FW and FW will then request mulitple blocks of small
* chunk size memory.
*/
- if (!ab->bus_params.fixed_mem_region && ab->qmi.mem_seg_count <= 2) {
+ if (!ab->bus_params.fixed_mem_region && ab->qmi.target_mem_delayed) {
+ delayed = true;
ath11k_dbg(ab, ATH11K_DBG_QMI, "qmi delays mem_request %d\n",
ab->qmi.mem_seg_count);
memset(req, 0, sizeof(*req));
} else {
+ delayed = false;
req->mem_seg_len = ab->qmi.mem_seg_count;
for (i = 0; i < req->mem_seg_len ; i++) {
@@ -1708,6 +1711,12 @@ static int ath11k_qmi_respond_fw_mem_request(struct ath11k_base *ab)
}
if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ /* the error response is expected when
+ * target_mem_delayed is true.
+ */
+ if (delayed && resp.resp.error == 0)
+ goto out;
+
ath11k_warn(ab, "Respond mem req failed, result: %d, err: %d\n",
resp.resp.result, resp.resp.error);
ret = -EINVAL;
@@ -1742,6 +1751,8 @@ static int ath11k_qmi_alloc_target_mem_chunk(struct ath11k_base *ab)
int i;
struct target_mem_chunk *chunk;
+ ab->qmi.target_mem_delayed = false;
+
for (i = 0; i < ab->qmi.mem_seg_count; i++) {
chunk = &ab->qmi.target_mem[i];
chunk->vaddr = dma_alloc_coherent(ab->dev,
@@ -1749,6 +1760,15 @@ static int ath11k_qmi_alloc_target_mem_chunk(struct ath11k_base *ab)
&chunk->paddr,
GFP_KERNEL);
if (!chunk->vaddr) {
+ if (ab->qmi.mem_seg_count <= 2) {
+ ath11k_dbg(ab, ATH11K_DBG_QMI,
+ "qmi dma allocation failed (%d B type %u), will try later with small size\n",
+ chunk->size,
+ chunk->type);
+ ath11k_qmi_free_target_mem_chunk(ab);
+ ab->qmi.target_mem_delayed = true;
+ return 0;
+ }
ath11k_err(ab, "failed to alloc memory, size: 0x%x, type: %u\n",
chunk->size,
chunk->type);
@@ -2517,7 +2537,7 @@ static void ath11k_qmi_msg_mem_request_cb(struct qmi_handle *qmi_hdl,
ret);
return;
}
- } else if (msg->mem_seg_len > 2) {
+ } else {
ret = ath11k_qmi_alloc_target_mem_chunk(ab);
if (ret) {
ath11k_warn(ab, "qmi failed to alloc target memory: %d\n",
diff --git a/drivers/net/wireless/ath/ath11k/qmi.h b/drivers/net/wireless/ath/ath11k/qmi.h
index 92925c9eac67..7bad374cc23a 100644
--- a/drivers/net/wireless/ath/ath11k/qmi.h
+++ b/drivers/net/wireless/ath/ath11k/qmi.h
@@ -125,6 +125,7 @@ struct ath11k_qmi {
struct target_mem_chunk target_mem[ATH11K_QMI_WLANFW_MAX_NUM_MEM_SEG_V01];
u32 mem_seg_count;
u32 target_mem_mode;
+ bool target_mem_delayed;
u8 cal_done;
struct target_info target;
struct m3_mem_region m3_mem;
diff --git a/drivers/net/wireless/ath/ath11k/wmi.c b/drivers/net/wireless/ath/ath11k/wmi.c
index da4b546b62cb..73869d445c5b 100644
--- a/drivers/net/wireless/ath/ath11k/wmi.c
+++ b/drivers/net/wireless/ath/ath11k/wmi.c
@@ -3460,6 +3460,9 @@ int ath11k_wmi_set_hw_mode(struct ath11k_base *ab,
len = sizeof(*cmd);
skb = ath11k_wmi_alloc_skb(wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
cmd = (struct wmi_pdev_set_hw_mode_cmd_param *)skb->data;
cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_SET_HW_MODE_CMD) |
diff --git a/drivers/net/wireless/ath/wil6210/Kconfig b/drivers/net/wireless/ath/wil6210/Kconfig
index 6a95b199bf62..f074e9c31aa2 100644
--- a/drivers/net/wireless/ath/wil6210/Kconfig
+++ b/drivers/net/wireless/ath/wil6210/Kconfig
@@ -2,6 +2,7 @@
config WIL6210
tristate "Wilocity 60g WiFi card wil6210 support"
select WANT_DEV_COREDUMP
+ select CRC32
depends on CFG80211
depends on PCI
default n
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/init.c b/drivers/net/wireless/mediatek/mt76/mt7915/init.c
index ed4635bd151a..102a8f14c22d 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/init.c
@@ -40,9 +40,9 @@ static const struct ieee80211_iface_limit if_limits[] = {
.types = BIT(NL80211_IFTYPE_ADHOC)
}, {
.max = 16,
- .types = BIT(NL80211_IFTYPE_AP) |
+ .types = BIT(NL80211_IFTYPE_AP)
#ifdef CONFIG_MAC80211_MESH
- BIT(NL80211_IFTYPE_MESH_POINT)
+ | BIT(NL80211_IFTYPE_MESH_POINT)
#endif
}, {
.max = MT7915_MAX_INTERFACES,
diff --git a/drivers/net/wireless/mediatek/mt76/sdio.c b/drivers/net/wireless/mediatek/mt76/sdio.c
index 62b5b912818f..0b6facb17ff7 100644
--- a/drivers/net/wireless/mediatek/mt76/sdio.c
+++ b/drivers/net/wireless/mediatek/mt76/sdio.c
@@ -157,10 +157,14 @@ static void mt76s_net_worker(struct mt76_worker *w)
static int mt76s_process_tx_queue(struct mt76_dev *dev, struct mt76_queue *q)
{
- bool wake, mcu = q == dev->q_mcu[MT_MCUQ_WM];
struct mt76_queue_entry entry;
int nframes = 0;
+ bool mcu;
+ if (!q)
+ return 0;
+
+ mcu = q == dev->q_mcu[MT_MCUQ_WM];
while (q->queued > 0) {
if (!q->entry[q->tail].done)
break;
@@ -177,21 +181,12 @@ static int mt76s_process_tx_queue(struct mt76_dev *dev, struct mt76_queue *q)
nframes++;
}
- wake = q->stopped && q->queued < q->ndesc - 8;
- if (wake)
- q->stopped = false;
-
if (!q->queued)
wake_up(&dev->tx_wait);
- if (mcu)
- goto out;
-
- mt76_txq_schedule(&dev->phy, q->qid);
+ if (!mcu)
+ mt76_txq_schedule(&dev->phy, q->qid);
- if (wake)
- ieee80211_wake_queue(dev->hw, q->qid);
-out:
return nframes;
}
diff --git a/drivers/net/wireless/mediatek/mt76/usb.c b/drivers/net/wireless/mediatek/mt76/usb.c
index dc850109de22..b95d093728b9 100644
--- a/drivers/net/wireless/mediatek/mt76/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/usb.c
@@ -811,11 +811,12 @@ static void mt76u_status_worker(struct mt76_worker *w)
struct mt76_dev *dev = container_of(usb, struct mt76_dev, usb);
struct mt76_queue_entry entry;
struct mt76_queue *q;
- bool wake;
int i;
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
q = dev->phy.q_tx[i];
+ if (!q)
+ continue;
while (q->queued > 0) {
if (!q->entry[q->tail].done)
@@ -827,10 +828,6 @@ static void mt76u_status_worker(struct mt76_worker *w)
mt76_queue_tx_complete(dev, q, &entry);
}
- wake = q->stopped && q->queued < q->ndesc - 8;
- if (wake)
- q->stopped = false;
-
if (!q->queued)
wake_up(&dev->tx_wait);
@@ -839,8 +836,6 @@ static void mt76u_status_worker(struct mt76_worker *w)
if (dev->drv->tx_status_data &&
!test_and_set_bit(MT76_READING_STATS, &dev->phy.state))
queue_work(dev->wq, &dev->usb.stat_work);
- if (wake)
- ieee80211_wake_queue(dev->hw, i);
}
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/core.c b/drivers/net/wireless/realtek/rtlwifi/core.c
index a7259dbc953d..965bd9589045 100644
--- a/drivers/net/wireless/realtek/rtlwifi/core.c
+++ b/drivers/net/wireless/realtek/rtlwifi/core.c
@@ -78,7 +78,6 @@ static void rtl_fw_do_work(const struct firmware *firmware, void *context,
rtl_dbg(rtlpriv, COMP_ERR, DBG_LOUD,
"Firmware callback routine entered!\n");
- complete(&rtlpriv->firmware_loading_complete);
if (!firmware) {
if (rtlpriv->cfg->alt_fw_name) {
err = request_firmware(&firmware,
@@ -91,13 +90,13 @@ static void rtl_fw_do_work(const struct firmware *firmware, void *context,
}
pr_err("Selected firmware is not available\n");
rtlpriv->max_fw_size = 0;
- return;
+ goto exit;
}
found_alt:
if (firmware->size > rtlpriv->max_fw_size) {
pr_err("Firmware is too big!\n");
release_firmware(firmware);
- return;
+ goto exit;
}
if (!is_wow) {
memcpy(rtlpriv->rtlhal.pfirmware, firmware->data,
@@ -109,6 +108,9 @@ found_alt:
rtlpriv->rtlhal.wowlan_fwsize = firmware->size;
}
release_firmware(firmware);
+
+exit:
+ complete(&rtlpriv->firmware_loading_complete);
}
void rtl_fw_cb(const struct firmware *firmware, void *context)
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index ce1b61519441..8caf9b34734d 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -179,7 +179,7 @@ int nvme_reset_ctrl(struct nvme_ctrl *ctrl)
}
EXPORT_SYMBOL_GPL(nvme_reset_ctrl);
-int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl)
+static int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl)
{
int ret;
@@ -192,7 +192,6 @@ int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl)
return ret;
}
-EXPORT_SYMBOL_GPL(nvme_reset_ctrl_sync);
static void nvme_do_delete_ctrl(struct nvme_ctrl *ctrl)
{
@@ -331,7 +330,7 @@ static inline void nvme_end_req(struct request *req)
req->__sector = nvme_lba_to_sect(req->q->queuedata,
le64_to_cpu(nvme_req(req)->result.u64));
- nvme_trace_bio_complete(req, status);
+ nvme_trace_bio_complete(req);
blk_mq_end_request(req, status);
}
@@ -578,7 +577,7 @@ struct request *nvme_alloc_request(struct request_queue *q,
}
EXPORT_SYMBOL_GPL(nvme_alloc_request);
-struct request *nvme_alloc_request_qid(struct request_queue *q,
+static struct request *nvme_alloc_request_qid(struct request_queue *q,
struct nvme_command *cmd, blk_mq_req_flags_t flags, int qid)
{
struct request *req;
@@ -589,7 +588,6 @@ struct request *nvme_alloc_request_qid(struct request_queue *q,
nvme_init_request(req, cmd);
return req;
}
-EXPORT_SYMBOL_GPL(nvme_alloc_request_qid);
static int nvme_toggle_streams(struct nvme_ctrl *ctrl, bool enable)
{
@@ -1545,8 +1543,21 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
}
length = (io.nblocks + 1) << ns->lba_shift;
- meta_len = (io.nblocks + 1) * ns->ms;
- metadata = nvme_to_user_ptr(io.metadata);
+
+ if ((io.control & NVME_RW_PRINFO_PRACT) &&
+ ns->ms == sizeof(struct t10_pi_tuple)) {
+ /*
+ * Protection information is stripped/inserted by the
+ * controller.
+ */
+ if (nvme_to_user_ptr(io.metadata))
+ return -EINVAL;
+ meta_len = 0;
+ metadata = NULL;
+ } else {
+ meta_len = (io.nblocks + 1) * ns->ms;
+ metadata = nvme_to_user_ptr(io.metadata);
+ }
if (ns->features & NVME_NS_EXT_LBAS) {
length += meta_len;
@@ -2858,6 +2869,11 @@ static const struct attribute_group *nvme_subsys_attrs_groups[] = {
NULL,
};
+static inline bool nvme_discovery_ctrl(struct nvme_ctrl *ctrl)
+{
+ return ctrl->opts && ctrl->opts->discovery_nqn;
+}
+
static bool nvme_validate_cntlid(struct nvme_subsystem *subsys,
struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
{
@@ -2877,7 +2893,7 @@ static bool nvme_validate_cntlid(struct nvme_subsystem *subsys,
}
if ((id->cmic & NVME_CTRL_CMIC_MULTI_CTRL) ||
- (ctrl->opts && ctrl->opts->discovery_nqn))
+ nvme_discovery_ctrl(ctrl))
continue;
dev_err(ctrl->device,
@@ -3146,7 +3162,7 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
goto out_free;
}
- if (!ctrl->opts->discovery_nqn && !ctrl->kas) {
+ if (!nvme_discovery_ctrl(ctrl) && !ctrl->kas) {
dev_err(ctrl->device,
"keep-alive support is mandatory for fabrics\n");
ret = -EINVAL;
@@ -3186,7 +3202,7 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
if (ret < 0)
return ret;
- if (!ctrl->identified) {
+ if (!ctrl->identified && !nvme_discovery_ctrl(ctrl)) {
ret = nvme_hwmon_init(ctrl);
if (ret < 0)
return ret;
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index 38373a0e86ef..5f36cfa8136c 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -166,6 +166,7 @@ struct nvme_fc_ctrl {
struct blk_mq_tag_set admin_tag_set;
struct blk_mq_tag_set tag_set;
+ struct work_struct ioerr_work;
struct delayed_work connect_work;
struct kref ref;
@@ -1889,6 +1890,15 @@ __nvme_fc_fcpop_chk_teardowns(struct nvme_fc_ctrl *ctrl,
}
static void
+nvme_fc_ctrl_ioerr_work(struct work_struct *work)
+{
+ struct nvme_fc_ctrl *ctrl =
+ container_of(work, struct nvme_fc_ctrl, ioerr_work);
+
+ nvme_fc_error_recovery(ctrl, "transport detected io error");
+}
+
+static void
nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
{
struct nvme_fc_fcp_op *op = fcp_req_to_fcp_op(req);
@@ -2046,7 +2056,7 @@ done:
check_error:
if (terminate_assoc)
- nvme_fc_error_recovery(ctrl, "transport detected io error");
+ queue_work(nvme_reset_wq, &ctrl->ioerr_work);
}
static int
@@ -3233,6 +3243,7 @@ nvme_fc_delete_ctrl(struct nvme_ctrl *nctrl)
{
struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
+ cancel_work_sync(&ctrl->ioerr_work);
cancel_delayed_work_sync(&ctrl->connect_work);
/*
* kill the association on the link side. this will block
@@ -3449,6 +3460,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
INIT_WORK(&ctrl->ctrl.reset_work, nvme_fc_reset_ctrl_work);
INIT_DELAYED_WORK(&ctrl->connect_work, nvme_fc_connect_ctrl_work);
+ INIT_WORK(&ctrl->ioerr_work, nvme_fc_ctrl_ioerr_work);
spin_lock_init(&ctrl->lock);
/* io queue count */
@@ -3540,6 +3552,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
fail_ctrl:
nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING);
+ cancel_work_sync(&ctrl->ioerr_work);
cancel_work_sync(&ctrl->ctrl.reset_work);
cancel_delayed_work_sync(&ctrl->connect_work);
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 7e49f61f81df..88a6b97247f5 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -610,8 +610,6 @@ void nvme_start_freeze(struct nvme_ctrl *ctrl);
#define NVME_QID_ANY -1
struct request *nvme_alloc_request(struct request_queue *q,
struct nvme_command *cmd, blk_mq_req_flags_t flags);
-struct request *nvme_alloc_request_qid(struct request_queue *q,
- struct nvme_command *cmd, blk_mq_req_flags_t flags, int qid);
void nvme_cleanup_cmd(struct request *req);
blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
struct nvme_command *cmd);
@@ -630,7 +628,6 @@ int nvme_get_features(struct nvme_ctrl *dev, unsigned int fid,
int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count);
void nvme_stop_keep_alive(struct nvme_ctrl *ctrl);
int nvme_reset_ctrl(struct nvme_ctrl *ctrl);
-int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl);
int nvme_try_sched_reset(struct nvme_ctrl *ctrl);
int nvme_delete_ctrl(struct nvme_ctrl *ctrl);
@@ -675,8 +672,7 @@ static inline void nvme_mpath_check_last_path(struct nvme_ns *ns)
kblockd_schedule_work(&head->requeue_work);
}
-static inline void nvme_trace_bio_complete(struct request *req,
- blk_status_t status)
+static inline void nvme_trace_bio_complete(struct request *req)
{
struct nvme_ns *ns = req->q->queuedata;
@@ -731,8 +727,7 @@ static inline void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl)
static inline void nvme_mpath_check_last_path(struct nvme_ns *ns)
{
}
-static inline void nvme_trace_bio_complete(struct request *req,
- blk_status_t status)
+static inline void nvme_trace_bio_complete(struct request *req)
{
}
static inline int nvme_mpath_init(struct nvme_ctrl *ctrl,
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index b4385cb0ff60..856aa31931c1 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -23,6 +23,7 @@
#include <linux/t10-pi.h>
#include <linux/types.h>
#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/io-64-nonatomic-hi-lo.h>
#include <linux/sed-opal.h>
#include <linux/pci-p2pdma.h>
@@ -542,50 +543,71 @@ static inline bool nvme_pci_use_sgls(struct nvme_dev *dev, struct request *req)
return true;
}
-static void nvme_unmap_data(struct nvme_dev *dev, struct request *req)
+static void nvme_free_prps(struct nvme_dev *dev, struct request *req)
{
- struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
const int last_prp = NVME_CTRL_PAGE_SIZE / sizeof(__le64) - 1;
- dma_addr_t dma_addr = iod->first_dma, next_dma_addr;
+ struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+ dma_addr_t dma_addr = iod->first_dma;
int i;
- if (iod->dma_len) {
- dma_unmap_page(dev->dev, dma_addr, iod->dma_len,
- rq_dma_dir(req));
- return;
+ for (i = 0; i < iod->npages; i++) {
+ __le64 *prp_list = nvme_pci_iod_list(req)[i];
+ dma_addr_t next_dma_addr = le64_to_cpu(prp_list[last_prp]);
+
+ dma_pool_free(dev->prp_page_pool, prp_list, dma_addr);
+ dma_addr = next_dma_addr;
}
- WARN_ON_ONCE(!iod->nents);
+}
- if (is_pci_p2pdma_page(sg_page(iod->sg)))
- pci_p2pdma_unmap_sg(dev->dev, iod->sg, iod->nents,
- rq_dma_dir(req));
- else
- dma_unmap_sg(dev->dev, iod->sg, iod->nents, rq_dma_dir(req));
+static void nvme_free_sgls(struct nvme_dev *dev, struct request *req)
+{
+ const int last_sg = SGES_PER_PAGE - 1;
+ struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+ dma_addr_t dma_addr = iod->first_dma;
+ int i;
+ for (i = 0; i < iod->npages; i++) {
+ struct nvme_sgl_desc *sg_list = nvme_pci_iod_list(req)[i];
+ dma_addr_t next_dma_addr = le64_to_cpu((sg_list[last_sg]).addr);
- if (iod->npages == 0)
- dma_pool_free(dev->prp_small_pool, nvme_pci_iod_list(req)[0],
- dma_addr);
+ dma_pool_free(dev->prp_page_pool, sg_list, dma_addr);
+ dma_addr = next_dma_addr;
+ }
- for (i = 0; i < iod->npages; i++) {
- void *addr = nvme_pci_iod_list(req)[i];
+}
- if (iod->use_sgl) {
- struct nvme_sgl_desc *sg_list = addr;
+static void nvme_unmap_sg(struct nvme_dev *dev, struct request *req)
+{
+ struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
- next_dma_addr =
- le64_to_cpu((sg_list[SGES_PER_PAGE - 1]).addr);
- } else {
- __le64 *prp_list = addr;
+ if (is_pci_p2pdma_page(sg_page(iod->sg)))
+ pci_p2pdma_unmap_sg(dev->dev, iod->sg, iod->nents,
+ rq_dma_dir(req));
+ else
+ dma_unmap_sg(dev->dev, iod->sg, iod->nents, rq_dma_dir(req));
+}
- next_dma_addr = le64_to_cpu(prp_list[last_prp]);
- }
+static void nvme_unmap_data(struct nvme_dev *dev, struct request *req)
+{
+ struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
- dma_pool_free(dev->prp_page_pool, addr, dma_addr);
- dma_addr = next_dma_addr;
+ if (iod->dma_len) {
+ dma_unmap_page(dev->dev, iod->first_dma, iod->dma_len,
+ rq_dma_dir(req));
+ return;
}
+ WARN_ON_ONCE(!iod->nents);
+
+ nvme_unmap_sg(dev, req);
+ if (iod->npages == 0)
+ dma_pool_free(dev->prp_small_pool, nvme_pci_iod_list(req)[0],
+ iod->first_dma);
+ else if (iod->use_sgl)
+ nvme_free_sgls(dev, req);
+ else
+ nvme_free_prps(dev, req);
mempool_free(iod->sg, dev->iod_mempool);
}
@@ -661,7 +683,7 @@ static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev,
__le64 *old_prp_list = prp_list;
prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma);
if (!prp_list)
- return BLK_STS_RESOURCE;
+ goto free_prps;
list[iod->npages++] = prp_list;
prp_list[0] = old_prp_list[i - 1];
old_prp_list[i - 1] = cpu_to_le64(prp_dma);
@@ -681,14 +703,14 @@ static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev,
dma_addr = sg_dma_address(sg);
dma_len = sg_dma_len(sg);
}
-
done:
cmnd->dptr.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
cmnd->dptr.prp2 = cpu_to_le64(iod->first_dma);
-
return BLK_STS_OK;
-
- bad_sgl:
+free_prps:
+ nvme_free_prps(dev, req);
+ return BLK_STS_RESOURCE;
+bad_sgl:
WARN(DO_ONCE(nvme_print_sgl, iod->sg, iod->nents),
"Invalid SGL for payload:%d nents:%d\n",
blk_rq_payload_bytes(req), iod->nents);
@@ -760,7 +782,7 @@ static blk_status_t nvme_pci_setup_sgls(struct nvme_dev *dev,
sg_list = dma_pool_alloc(pool, GFP_ATOMIC, &sgl_dma);
if (!sg_list)
- return BLK_STS_RESOURCE;
+ goto free_sgls;
i = 0;
nvme_pci_iod_list(req)[iod->npages++] = sg_list;
@@ -773,6 +795,9 @@ static blk_status_t nvme_pci_setup_sgls(struct nvme_dev *dev,
} while (--entries > 0);
return BLK_STS_OK;
+free_sgls:
+ nvme_free_sgls(dev, req);
+ return BLK_STS_RESOURCE;
}
static blk_status_t nvme_setup_prp_simple(struct nvme_dev *dev,
@@ -841,7 +866,7 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
sg_init_table(iod->sg, blk_rq_nr_phys_segments(req));
iod->nents = blk_rq_map_sg(req->q, req, iod->sg);
if (!iod->nents)
- goto out;
+ goto out_free_sg;
if (is_pci_p2pdma_page(sg_page(iod->sg)))
nr_mapped = pci_p2pdma_map_sg_attrs(dev->dev, iod->sg,
@@ -850,16 +875,21 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
nr_mapped = dma_map_sg_attrs(dev->dev, iod->sg, iod->nents,
rq_dma_dir(req), DMA_ATTR_NO_WARN);
if (!nr_mapped)
- goto out;
+ goto out_free_sg;
iod->use_sgl = nvme_pci_use_sgls(dev, req);
if (iod->use_sgl)
ret = nvme_pci_setup_sgls(dev, req, &cmnd->rw, nr_mapped);
else
ret = nvme_pci_setup_prps(dev, req, &cmnd->rw);
-out:
if (ret != BLK_STS_OK)
- nvme_unmap_data(dev, req);
+ goto out_unmap_sg;
+ return BLK_STS_OK;
+
+out_unmap_sg:
+ nvme_unmap_sg(dev, req);
+out_free_sg:
+ mempool_free(iod->sg, dev->iod_mempool);
return ret;
}
@@ -967,6 +997,7 @@ static inline struct blk_mq_tags *nvme_queue_tagset(struct nvme_queue *nvmeq)
static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, u16 idx)
{
struct nvme_completion *cqe = &nvmeq->cqes[idx];
+ __u16 command_id = READ_ONCE(cqe->command_id);
struct request *req;
/*
@@ -975,17 +1006,17 @@ static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, u16 idx)
* aborts. We don't even bother to allocate a struct request
* for them but rather special case them here.
*/
- if (unlikely(nvme_is_aen_req(nvmeq->qid, cqe->command_id))) {
+ if (unlikely(nvme_is_aen_req(nvmeq->qid, command_id))) {
nvme_complete_async_event(&nvmeq->dev->ctrl,
cqe->status, &cqe->result);
return;
}
- req = blk_mq_tag_to_rq(nvme_queue_tagset(nvmeq), cqe->command_id);
+ req = blk_mq_tag_to_rq(nvme_queue_tagset(nvmeq), command_id);
if (unlikely(!req)) {
dev_warn(nvmeq->dev->ctrl.device,
"invalid id %d completed on queue %d\n",
- cqe->command_id, le16_to_cpu(cqe->sq_id));
+ command_id, le16_to_cpu(cqe->sq_id));
return;
}
@@ -1794,6 +1825,9 @@ static void nvme_map_cmb(struct nvme_dev *dev)
if (dev->cmb_size)
return;
+ if (NVME_CAP_CMBS(dev->ctrl.cap))
+ writel(NVME_CMBMSC_CRE, dev->bar + NVME_REG_CMBMSC);
+
dev->cmbsz = readl(dev->bar + NVME_REG_CMBSZ);
if (!dev->cmbsz)
return;
@@ -1808,6 +1842,16 @@ static void nvme_map_cmb(struct nvme_dev *dev)
return;
/*
+ * Tell the controller about the host side address mapping the CMB,
+ * and enable CMB decoding for the NVMe 1.4+ scheme:
+ */
+ if (NVME_CAP_CMBS(dev->ctrl.cap)) {
+ hi_lo_writeq(NVME_CMBMSC_CRE | NVME_CMBMSC_CMSE |
+ (pci_bus_address(pdev, bar) + offset),
+ dev->bar + NVME_REG_CMBMSC);
+ }
+
+ /*
* Controllers may support a CMB size larger than their BAR,
* for example, due to being behind a bridge. Reduce the CMB to
* the reported size of the BAR
@@ -3196,7 +3240,8 @@ static const struct pci_device_id nvme_id_table[] = {
{ PCI_DEVICE(0x144d, 0xa821), /* Samsung PM1725 */
.driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
{ PCI_DEVICE(0x144d, 0xa822), /* Samsung PM1725a */
- .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
+ .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY |
+ NVME_QUIRK_IGNORE_DEV_SUBNQN, },
{ PCI_DEVICE(0x1d1d, 0x1f1f), /* LighNVM qemu device */
.driver_data = NVME_QUIRK_LIGHTNVM, },
{ PCI_DEVICE(0x1d1d, 0x2807), /* CNEX WL */
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index cf6c49d09c82..b7ce4f221d99 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -97,6 +97,7 @@ struct nvme_rdma_queue {
struct completion cm_done;
bool pi_support;
int cq_size;
+ struct mutex queue_lock;
};
struct nvme_rdma_ctrl {
@@ -579,6 +580,7 @@ static int nvme_rdma_alloc_queue(struct nvme_rdma_ctrl *ctrl,
int ret;
queue = &ctrl->queues[idx];
+ mutex_init(&queue->queue_lock);
queue->ctrl = ctrl;
if (idx && ctrl->ctrl.max_integrity_segments)
queue->pi_support = true;
@@ -598,7 +600,8 @@ static int nvme_rdma_alloc_queue(struct nvme_rdma_ctrl *ctrl,
if (IS_ERR(queue->cm_id)) {
dev_info(ctrl->ctrl.device,
"failed to create CM ID: %ld\n", PTR_ERR(queue->cm_id));
- return PTR_ERR(queue->cm_id);
+ ret = PTR_ERR(queue->cm_id);
+ goto out_destroy_mutex;
}
if (ctrl->ctrl.opts->mask & NVMF_OPT_HOST_TRADDR)
@@ -628,6 +631,8 @@ static int nvme_rdma_alloc_queue(struct nvme_rdma_ctrl *ctrl,
out_destroy_cm_id:
rdma_destroy_id(queue->cm_id);
nvme_rdma_destroy_queue_ib(queue);
+out_destroy_mutex:
+ mutex_destroy(&queue->queue_lock);
return ret;
}
@@ -639,9 +644,10 @@ static void __nvme_rdma_stop_queue(struct nvme_rdma_queue *queue)
static void nvme_rdma_stop_queue(struct nvme_rdma_queue *queue)
{
- if (!test_and_clear_bit(NVME_RDMA_Q_LIVE, &queue->flags))
- return;
- __nvme_rdma_stop_queue(queue);
+ mutex_lock(&queue->queue_lock);
+ if (test_and_clear_bit(NVME_RDMA_Q_LIVE, &queue->flags))
+ __nvme_rdma_stop_queue(queue);
+ mutex_unlock(&queue->queue_lock);
}
static void nvme_rdma_free_queue(struct nvme_rdma_queue *queue)
@@ -651,6 +657,7 @@ static void nvme_rdma_free_queue(struct nvme_rdma_queue *queue)
nvme_rdma_destroy_queue_ib(queue);
rdma_destroy_id(queue->cm_id);
+ mutex_destroy(&queue->queue_lock);
}
static void nvme_rdma_free_io_queues(struct nvme_rdma_ctrl *ctrl)
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index 1ba659927442..881d28eb15e9 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -76,6 +76,7 @@ struct nvme_tcp_queue {
struct work_struct io_work;
int io_cpu;
+ struct mutex queue_lock;
struct mutex send_mutex;
struct llist_head req_list;
struct list_head send_list;
@@ -201,7 +202,7 @@ static inline size_t nvme_tcp_req_cur_offset(struct nvme_tcp_request *req)
static inline size_t nvme_tcp_req_cur_length(struct nvme_tcp_request *req)
{
- return min_t(size_t, req->iter.bvec->bv_len - req->iter.iov_offset,
+ return min_t(size_t, iov_iter_single_seg_count(&req->iter),
req->pdu_len - req->pdu_sent);
}
@@ -262,6 +263,16 @@ static inline void nvme_tcp_advance_req(struct nvme_tcp_request *req,
}
}
+static inline void nvme_tcp_send_all(struct nvme_tcp_queue *queue)
+{
+ int ret;
+
+ /* drain the send queue as much as we can... */
+ do {
+ ret = nvme_tcp_try_send(queue);
+ } while (ret > 0);
+}
+
static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req,
bool sync, bool last)
{
@@ -276,10 +287,10 @@ static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req,
* directly, otherwise queue io_work. Also, only do that if we
* are on the same cpu, so we don't introduce contention.
*/
- if (queue->io_cpu == smp_processor_id() &&
+ if (queue->io_cpu == __smp_processor_id() &&
sync && empty && mutex_trylock(&queue->send_mutex)) {
queue->more_requests = !last;
- nvme_tcp_try_send(queue);
+ nvme_tcp_send_all(queue);
queue->more_requests = false;
mutex_unlock(&queue->send_mutex);
} else if (last) {
@@ -1209,6 +1220,7 @@ static void nvme_tcp_free_queue(struct nvme_ctrl *nctrl, int qid)
sock_release(queue->sock);
kfree(queue->pdu);
+ mutex_destroy(&queue->queue_lock);
}
static int nvme_tcp_init_connection(struct nvme_tcp_queue *queue)
@@ -1370,6 +1382,7 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl,
struct nvme_tcp_queue *queue = &ctrl->queues[qid];
int ret, rcv_pdu_size;
+ mutex_init(&queue->queue_lock);
queue->ctrl = ctrl;
init_llist_head(&queue->req_list);
INIT_LIST_HEAD(&queue->send_list);
@@ -1388,7 +1401,7 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl,
if (ret) {
dev_err(nctrl->device,
"failed to create socket: %d\n", ret);
- return ret;
+ goto err_destroy_mutex;
}
/* Single syn retry */
@@ -1497,6 +1510,8 @@ err_crypto:
err_sock:
sock_release(queue->sock);
queue->sock = NULL;
+err_destroy_mutex:
+ mutex_destroy(&queue->queue_lock);
return ret;
}
@@ -1524,9 +1539,10 @@ static void nvme_tcp_stop_queue(struct nvme_ctrl *nctrl, int qid)
struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
struct nvme_tcp_queue *queue = &ctrl->queues[qid];
- if (!test_and_clear_bit(NVME_TCP_Q_LIVE, &queue->flags))
- return;
- __nvme_tcp_stop_queue(queue);
+ mutex_lock(&queue->queue_lock);
+ if (test_and_clear_bit(NVME_TCP_Q_LIVE, &queue->flags))
+ __nvme_tcp_stop_queue(queue);
+ mutex_unlock(&queue->queue_lock);
}
static int nvme_tcp_start_queue(struct nvme_ctrl *nctrl, int idx)
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
index 8d90235e4fcc..dc1ea468b182 100644
--- a/drivers/nvme/target/admin-cmd.c
+++ b/drivers/nvme/target/admin-cmd.c
@@ -487,8 +487,10 @@ static void nvmet_execute_identify_ns(struct nvmet_req *req)
/* return an all zeroed buffer if we can't find an active namespace */
ns = nvmet_find_namespace(ctrl, req->cmd->identify.nsid);
- if (!ns)
+ if (!ns) {
+ status = NVME_SC_INVALID_NS;
goto done;
+ }
nvmet_ns_revalidate(ns);
@@ -541,7 +543,9 @@ static void nvmet_execute_identify_ns(struct nvmet_req *req)
id->nsattr |= (1 << 0);
nvmet_put_namespace(ns);
done:
- status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
+ if (!status)
+ status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
+
kfree(id);
out:
nvmet_req_complete(req, status);
diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c
index 733d9363900e..68213f0a052b 100644
--- a/drivers/nvme/target/fcloop.c
+++ b/drivers/nvme/target/fcloop.c
@@ -1501,7 +1501,8 @@ static ssize_t
fcloop_set_cmd_drop(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- int opcode, starting, amount;
+ unsigned int opcode;
+ int starting, amount;
if (sscanf(buf, "%x:%d:%d", &opcode, &starting, &amount) != 3)
return -EBADRQC;
@@ -1588,8 +1589,8 @@ out_destroy_class:
static void __exit fcloop_exit(void)
{
- struct fcloop_lport *lport;
- struct fcloop_nport *nport;
+ struct fcloop_lport *lport = NULL;
+ struct fcloop_nport *nport = NULL;
struct fcloop_tport *tport;
struct fcloop_rport *rport;
unsigned long flags;
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index 5c1e7cb7fe0d..06b6b742bb21 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -1220,6 +1220,14 @@ nvmet_rdma_find_get_device(struct rdma_cm_id *cm_id)
}
ndev->inline_data_size = nport->inline_data_size;
ndev->inline_page_count = inline_page_count;
+
+ if (nport->pi_enable && !(cm_id->device->attrs.device_cap_flags &
+ IB_DEVICE_INTEGRITY_HANDOVER)) {
+ pr_warn("T10-PI is not supported by device %s. Disabling it\n",
+ cm_id->device->name);
+ nport->pi_enable = false;
+ }
+
ndev->device = cm_id->device;
kref_init(&ndev->ref);
@@ -1641,6 +1649,16 @@ static void __nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue)
spin_lock_irqsave(&queue->state_lock, flags);
switch (queue->state) {
case NVMET_RDMA_Q_CONNECTING:
+ while (!list_empty(&queue->rsp_wait_list)) {
+ struct nvmet_rdma_rsp *rsp;
+
+ rsp = list_first_entry(&queue->rsp_wait_list,
+ struct nvmet_rdma_rsp,
+ wait_list);
+ list_del(&rsp->wait_list);
+ nvmet_rdma_put_rsp(rsp);
+ }
+ fallthrough;
case NVMET_RDMA_Q_LIVE:
queue->state = NVMET_RDMA_Q_DISCONNECTING;
disconnect = true;
@@ -1845,14 +1863,6 @@ static int nvmet_rdma_enable_port(struct nvmet_rdma_port *port)
goto out_destroy_id;
}
- if (port->nport->pi_enable &&
- !(cm_id->device->attrs.device_cap_flags &
- IB_DEVICE_INTEGRITY_HANDOVER)) {
- pr_err("T10-PI is not supported for %pISpcs\n", addr);
- ret = -EINVAL;
- goto out_destroy_id;
- }
-
port->cm_id = cm_id;
return 0;
diff --git a/drivers/opp/core.c b/drivers/opp/core.c
index 4268eb359915..8c905aabacc0 100644
--- a/drivers/opp/core.c
+++ b/drivers/opp/core.c
@@ -1092,7 +1092,7 @@ static struct opp_table *_allocate_opp_table(struct device *dev, int index)
if (IS_ERR(opp_table->clk)) {
ret = PTR_ERR(opp_table->clk);
if (ret == -EPROBE_DEFER)
- goto err;
+ goto remove_opp_dev;
dev_dbg(dev, "%s: Couldn't find clock: %d\n", __func__, ret);
}
@@ -1101,7 +1101,7 @@ static struct opp_table *_allocate_opp_table(struct device *dev, int index)
ret = dev_pm_opp_of_find_icc_paths(dev, opp_table);
if (ret) {
if (ret == -EPROBE_DEFER)
- goto err;
+ goto put_clk;
dev_warn(dev, "%s: Error finding interconnect paths: %d\n",
__func__, ret);
@@ -1113,6 +1113,11 @@ static struct opp_table *_allocate_opp_table(struct device *dev, int index)
return opp_table;
+put_clk:
+ if (!IS_ERR(opp_table->clk))
+ clk_put(opp_table->clk);
+remove_opp_dev:
+ _remove_opp_dev(opp_dev, opp_table);
err:
kfree(opp_table);
return ERR_PTR(ret);
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index b9fecc25d213..09b03cfba889 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -1644,7 +1644,7 @@ static void pci_restore_rebar_state(struct pci_dev *pdev)
pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
bar_idx = ctrl & PCI_REBAR_CTRL_BAR_IDX;
res = pdev->resource + bar_idx;
- size = ilog2(resource_size(res)) - 20;
+ size = pci_rebar_bytes_to_size(resource_size(res));
ctrl &= ~PCI_REBAR_CTRL_BAR_SIZE;
ctrl |= size << PCI_REBAR_CTRL_BAR_SHIFT;
pci_write_config_dword(pdev, pos + PCI_REBAR_CTRL, ctrl);
@@ -3603,8 +3603,16 @@ u32 pci_rebar_get_possible_sizes(struct pci_dev *pdev, int bar)
return 0;
pci_read_config_dword(pdev, pos + PCI_REBAR_CAP, &cap);
- return (cap & PCI_REBAR_CAP_SIZES) >> 4;
+ cap &= PCI_REBAR_CAP_SIZES;
+
+ /* Sapphire RX 5600 XT Pulse has an invalid cap dword for BAR 0 */
+ if (pdev->vendor == PCI_VENDOR_ID_ATI && pdev->device == 0x731f &&
+ bar == 0 && cap == 0x7000)
+ cap = 0x3f000;
+
+ return cap >> 4;
}
+EXPORT_SYMBOL(pci_rebar_get_possible_sizes);
/**
* pci_rebar_get_current_size - get the current size of a BAR
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 5c59365092fa..8f130f9531b3 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -630,7 +630,6 @@ int acpi_get_rc_resources(struct device *dev, const char *hid, u16 segment,
struct resource *res);
#endif
-u32 pci_rebar_get_possible_sizes(struct pci_dev *pdev, int bar);
int pci_rebar_get_current_size(struct pci_dev *pdev, int bar);
int pci_rebar_set_size(struct pci_dev *pdev, int bar, int size);
static inline u64 pci_rebar_size_to_bytes(int size)
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
index 794a37d50853..cb2f55f450e4 100644
--- a/drivers/perf/arm_pmu.c
+++ b/drivers/perf/arm_pmu.c
@@ -726,11 +726,6 @@ static int armpmu_get_cpu_irq(struct arm_pmu *pmu, int cpu)
return per_cpu(hw_events->irq, cpu);
}
-bool arm_pmu_irq_is_nmi(void)
-{
- return has_nmi;
-}
-
/*
* PMU hardware loses all context when a CPU goes offline.
* When a CPU is hotplugged back in, since some hardware registers are
diff --git a/drivers/phy/ingenic/Makefile b/drivers/phy/ingenic/Makefile
index 65d5ea00fc9d..1cb158d7233f 100644
--- a/drivers/phy/ingenic/Makefile
+++ b/drivers/phy/ingenic/Makefile
@@ -1,2 +1,2 @@
# SPDX-License-Identifier: GPL-2.0
-obj-y += phy-ingenic-usb.o
+obj-$(CONFIG_PHY_INGENIC_USB) += phy-ingenic-usb.o
diff --git a/drivers/phy/mediatek/Kconfig b/drivers/phy/mediatek/Kconfig
index d38def43b1bf..55f8e6c048ab 100644
--- a/drivers/phy/mediatek/Kconfig
+++ b/drivers/phy/mediatek/Kconfig
@@ -49,7 +49,9 @@ config PHY_MTK_HDMI
config PHY_MTK_MIPI_DSI
tristate "MediaTek MIPI-DSI Driver"
- depends on ARCH_MEDIATEK && OF
+ depends on ARCH_MEDIATEK || COMPILE_TEST
+ depends on COMMON_CLK
+ depends on OF
select GENERIC_PHY
help
Support MIPI DSI for Mediatek SoCs.
diff --git a/drivers/phy/motorola/phy-cpcap-usb.c b/drivers/phy/motorola/phy-cpcap-usb.c
index 442522ba487f..4728e2bff662 100644
--- a/drivers/phy/motorola/phy-cpcap-usb.c
+++ b/drivers/phy/motorola/phy-cpcap-usb.c
@@ -662,35 +662,42 @@ static int cpcap_usb_phy_probe(struct platform_device *pdev)
generic_phy = devm_phy_create(ddata->dev, NULL, &ops);
if (IS_ERR(generic_phy)) {
error = PTR_ERR(generic_phy);
- return PTR_ERR(generic_phy);
+ goto out_reg_disable;
}
phy_set_drvdata(generic_phy, ddata);
phy_provider = devm_of_phy_provider_register(ddata->dev,
of_phy_simple_xlate);
- if (IS_ERR(phy_provider))
- return PTR_ERR(phy_provider);
+ if (IS_ERR(phy_provider)) {
+ error = PTR_ERR(phy_provider);
+ goto out_reg_disable;
+ }
error = cpcap_usb_init_optional_pins(ddata);
if (error)
- return error;
+ goto out_reg_disable;
cpcap_usb_init_optional_gpios(ddata);
error = cpcap_usb_init_iio(ddata);
if (error)
- return error;
+ goto out_reg_disable;
error = cpcap_usb_init_interrupts(pdev, ddata);
if (error)
- return error;
+ goto out_reg_disable;
usb_add_phy_dev(&ddata->phy);
atomic_set(&ddata->active, 1);
schedule_delayed_work(&ddata->detect_work, msecs_to_jiffies(1));
return 0;
+
+out_reg_disable:
+ regulator_disable(ddata->vusb);
+
+ return error;
}
static int cpcap_usb_phy_remove(struct platform_device *pdev)
diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c b/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c
index 34803a6c7664..5c1a109842a7 100644
--- a/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c
+++ b/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c
@@ -347,7 +347,7 @@ FUNC_GROUP_DECL(RMII4, F24, E23, E24, E25, C25, C24, B26, B25, B24);
#define D22 40
SIG_EXPR_LIST_DECL_SESG(D22, SD1CLK, SD1, SIG_DESC_SET(SCU414, 8));
-SIG_EXPR_LIST_DECL_SEMG(D22, PWM8, PWM8G0, PWM8, SIG_DESC_SET(SCU414, 8));
+SIG_EXPR_LIST_DECL_SEMG(D22, PWM8, PWM8G0, PWM8, SIG_DESC_SET(SCU4B4, 8));
PIN_DECL_2(D22, GPIOF0, SD1CLK, PWM8);
GROUP_DECL(PWM8G0, D22);
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c
index 7aeb552d16ce..72f17f26acd8 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c
@@ -920,6 +920,10 @@ int mtk_pinconf_adv_pull_set(struct mtk_pinctrl *hw,
err = hw->soc->bias_set(hw, desc, pullup);
if (err)
return err;
+ } else if (hw->soc->bias_set_combo) {
+ err = hw->soc->bias_set_combo(hw, desc, pullup, arg);
+ if (err)
+ return err;
} else {
return -ENOTSUPP;
}
diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik.c b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
index d4ea10803fd9..abfe11c7b49f 100644
--- a/drivers/pinctrl/nomadik/pinctrl-nomadik.c
+++ b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
@@ -949,7 +949,6 @@ static void nmk_gpio_dbg_show_one(struct seq_file *s,
} else {
int irq = chip->to_irq(chip, offset);
const int pullidx = pull ? 1 : 0;
- bool wake;
int val;
static const char * const pulls[] = {
"none ",
diff --git a/drivers/pinctrl/pinctrl-ingenic.c b/drivers/pinctrl/pinctrl-ingenic.c
index 53a6a24bd052..3ea163498647 100644
--- a/drivers/pinctrl/pinctrl-ingenic.c
+++ b/drivers/pinctrl/pinctrl-ingenic.c
@@ -37,11 +37,11 @@
#define JZ4740_GPIO_TRIG 0x70
#define JZ4740_GPIO_FLAG 0x80
-#define JZ4760_GPIO_INT 0x10
-#define JZ4760_GPIO_PAT1 0x30
-#define JZ4760_GPIO_PAT0 0x40
-#define JZ4760_GPIO_FLAG 0x50
-#define JZ4760_GPIO_PEN 0x70
+#define JZ4770_GPIO_INT 0x10
+#define JZ4770_GPIO_PAT1 0x30
+#define JZ4770_GPIO_PAT0 0x40
+#define JZ4770_GPIO_FLAG 0x50
+#define JZ4770_GPIO_PEN 0x70
#define X1830_GPIO_PEL 0x110
#define X1830_GPIO_PEH 0x120
@@ -1688,8 +1688,8 @@ static inline bool ingenic_gpio_get_value(struct ingenic_gpio_chip *jzgc,
static void ingenic_gpio_set_value(struct ingenic_gpio_chip *jzgc,
u8 offset, int value)
{
- if (jzgc->jzpc->info->version >= ID_JZ4760)
- ingenic_gpio_set_bit(jzgc, JZ4760_GPIO_PAT0, offset, !!value);
+ if (jzgc->jzpc->info->version >= ID_JZ4770)
+ ingenic_gpio_set_bit(jzgc, JZ4770_GPIO_PAT0, offset, !!value);
else
ingenic_gpio_set_bit(jzgc, JZ4740_GPIO_DATA, offset, !!value);
}
@@ -1718,9 +1718,9 @@ static void irq_set_type(struct ingenic_gpio_chip *jzgc,
break;
}
- if (jzgc->jzpc->info->version >= ID_JZ4760) {
- reg1 = JZ4760_GPIO_PAT1;
- reg2 = JZ4760_GPIO_PAT0;
+ if (jzgc->jzpc->info->version >= ID_JZ4770) {
+ reg1 = JZ4770_GPIO_PAT1;
+ reg2 = JZ4770_GPIO_PAT0;
} else {
reg1 = JZ4740_GPIO_TRIG;
reg2 = JZ4740_GPIO_DIR;
@@ -1758,8 +1758,8 @@ static void ingenic_gpio_irq_enable(struct irq_data *irqd)
struct ingenic_gpio_chip *jzgc = gpiochip_get_data(gc);
int irq = irqd->hwirq;
- if (jzgc->jzpc->info->version >= ID_JZ4760)
- ingenic_gpio_set_bit(jzgc, JZ4760_GPIO_INT, irq, true);
+ if (jzgc->jzpc->info->version >= ID_JZ4770)
+ ingenic_gpio_set_bit(jzgc, JZ4770_GPIO_INT, irq, true);
else
ingenic_gpio_set_bit(jzgc, JZ4740_GPIO_SELECT, irq, true);
@@ -1774,8 +1774,8 @@ static void ingenic_gpio_irq_disable(struct irq_data *irqd)
ingenic_gpio_irq_mask(irqd);
- if (jzgc->jzpc->info->version >= ID_JZ4760)
- ingenic_gpio_set_bit(jzgc, JZ4760_GPIO_INT, irq, false);
+ if (jzgc->jzpc->info->version >= ID_JZ4770)
+ ingenic_gpio_set_bit(jzgc, JZ4770_GPIO_INT, irq, false);
else
ingenic_gpio_set_bit(jzgc, JZ4740_GPIO_SELECT, irq, false);
}
@@ -1799,8 +1799,8 @@ static void ingenic_gpio_irq_ack(struct irq_data *irqd)
irq_set_type(jzgc, irq, IRQ_TYPE_LEVEL_HIGH);
}
- if (jzgc->jzpc->info->version >= ID_JZ4760)
- ingenic_gpio_set_bit(jzgc, JZ4760_GPIO_FLAG, irq, false);
+ if (jzgc->jzpc->info->version >= ID_JZ4770)
+ ingenic_gpio_set_bit(jzgc, JZ4770_GPIO_FLAG, irq, false);
else
ingenic_gpio_set_bit(jzgc, JZ4740_GPIO_DATA, irq, true);
}
@@ -1856,8 +1856,8 @@ static void ingenic_gpio_irq_handler(struct irq_desc *desc)
chained_irq_enter(irq_chip, desc);
- if (jzgc->jzpc->info->version >= ID_JZ4760)
- flag = ingenic_gpio_read_reg(jzgc, JZ4760_GPIO_FLAG);
+ if (jzgc->jzpc->info->version >= ID_JZ4770)
+ flag = ingenic_gpio_read_reg(jzgc, JZ4770_GPIO_FLAG);
else
flag = ingenic_gpio_read_reg(jzgc, JZ4740_GPIO_FLAG);
@@ -1938,9 +1938,9 @@ static int ingenic_gpio_get_direction(struct gpio_chip *gc, unsigned int offset)
struct ingenic_pinctrl *jzpc = jzgc->jzpc;
unsigned int pin = gc->base + offset;
- if (jzpc->info->version >= ID_JZ4760) {
- if (ingenic_get_pin_config(jzpc, pin, JZ4760_GPIO_INT) ||
- ingenic_get_pin_config(jzpc, pin, JZ4760_GPIO_PAT1))
+ if (jzpc->info->version >= ID_JZ4770) {
+ if (ingenic_get_pin_config(jzpc, pin, JZ4770_GPIO_INT) ||
+ ingenic_get_pin_config(jzpc, pin, JZ4770_GPIO_PAT1))
return GPIO_LINE_DIRECTION_IN;
return GPIO_LINE_DIRECTION_OUT;
}
@@ -1991,20 +1991,20 @@ static int ingenic_pinmux_set_pin_fn(struct ingenic_pinctrl *jzpc,
'A' + offt, idx, func);
if (jzpc->info->version >= ID_X1000) {
- ingenic_shadow_config_pin(jzpc, pin, JZ4760_GPIO_INT, false);
+ ingenic_shadow_config_pin(jzpc, pin, JZ4770_GPIO_INT, false);
ingenic_shadow_config_pin(jzpc, pin, GPIO_MSK, false);
- ingenic_shadow_config_pin(jzpc, pin, JZ4760_GPIO_PAT1, func & 0x2);
- ingenic_shadow_config_pin(jzpc, pin, JZ4760_GPIO_PAT0, func & 0x1);
+ ingenic_shadow_config_pin(jzpc, pin, JZ4770_GPIO_PAT1, func & 0x2);
+ ingenic_shadow_config_pin(jzpc, pin, JZ4770_GPIO_PAT0, func & 0x1);
ingenic_shadow_config_pin_load(jzpc, pin);
- } else if (jzpc->info->version >= ID_JZ4760) {
- ingenic_config_pin(jzpc, pin, JZ4760_GPIO_INT, false);
+ } else if (jzpc->info->version >= ID_JZ4770) {
+ ingenic_config_pin(jzpc, pin, JZ4770_GPIO_INT, false);
ingenic_config_pin(jzpc, pin, GPIO_MSK, false);
- ingenic_config_pin(jzpc, pin, JZ4760_GPIO_PAT1, func & 0x2);
- ingenic_config_pin(jzpc, pin, JZ4760_GPIO_PAT0, func & 0x1);
+ ingenic_config_pin(jzpc, pin, JZ4770_GPIO_PAT1, func & 0x2);
+ ingenic_config_pin(jzpc, pin, JZ4770_GPIO_PAT0, func & 0x1);
} else {
ingenic_config_pin(jzpc, pin, JZ4740_GPIO_FUNC, true);
ingenic_config_pin(jzpc, pin, JZ4740_GPIO_TRIG, func & 0x2);
- ingenic_config_pin(jzpc, pin, JZ4740_GPIO_SELECT, func > 0);
+ ingenic_config_pin(jzpc, pin, JZ4740_GPIO_SELECT, func & 0x1);
}
return 0;
@@ -2057,14 +2057,14 @@ static int ingenic_pinmux_gpio_set_direction(struct pinctrl_dev *pctldev,
'A' + offt, idx, input ? "in" : "out");
if (jzpc->info->version >= ID_X1000) {
- ingenic_shadow_config_pin(jzpc, pin, JZ4760_GPIO_INT, false);
+ ingenic_shadow_config_pin(jzpc, pin, JZ4770_GPIO_INT, false);
ingenic_shadow_config_pin(jzpc, pin, GPIO_MSK, true);
- ingenic_shadow_config_pin(jzpc, pin, JZ4760_GPIO_PAT1, input);
+ ingenic_shadow_config_pin(jzpc, pin, JZ4770_GPIO_PAT1, input);
ingenic_shadow_config_pin_load(jzpc, pin);
- } else if (jzpc->info->version >= ID_JZ4760) {
- ingenic_config_pin(jzpc, pin, JZ4760_GPIO_INT, false);
+ } else if (jzpc->info->version >= ID_JZ4770) {
+ ingenic_config_pin(jzpc, pin, JZ4770_GPIO_INT, false);
ingenic_config_pin(jzpc, pin, GPIO_MSK, true);
- ingenic_config_pin(jzpc, pin, JZ4760_GPIO_PAT1, input);
+ ingenic_config_pin(jzpc, pin, JZ4770_GPIO_PAT1, input);
} else {
ingenic_config_pin(jzpc, pin, JZ4740_GPIO_SELECT, false);
ingenic_config_pin(jzpc, pin, JZ4740_GPIO_DIR, !input);
@@ -2091,8 +2091,8 @@ static int ingenic_pinconf_get(struct pinctrl_dev *pctldev,
unsigned int offt = pin / PINS_PER_GPIO_CHIP;
bool pull;
- if (jzpc->info->version >= ID_JZ4760)
- pull = !ingenic_get_pin_config(jzpc, pin, JZ4760_GPIO_PEN);
+ if (jzpc->info->version >= ID_JZ4770)
+ pull = !ingenic_get_pin_config(jzpc, pin, JZ4770_GPIO_PEN);
else
pull = !ingenic_get_pin_config(jzpc, pin, JZ4740_GPIO_PULL_DIS);
@@ -2141,8 +2141,8 @@ static void ingenic_set_bias(struct ingenic_pinctrl *jzpc,
REG_SET(X1830_GPIO_PEH), bias << idxh);
}
- } else if (jzpc->info->version >= ID_JZ4760) {
- ingenic_config_pin(jzpc, pin, JZ4760_GPIO_PEN, !bias);
+ } else if (jzpc->info->version >= ID_JZ4770) {
+ ingenic_config_pin(jzpc, pin, JZ4770_GPIO_PEN, !bias);
} else {
ingenic_config_pin(jzpc, pin, JZ4740_GPIO_PULL_DIS, !bias);
}
@@ -2151,8 +2151,8 @@ static void ingenic_set_bias(struct ingenic_pinctrl *jzpc,
static void ingenic_set_output_level(struct ingenic_pinctrl *jzpc,
unsigned int pin, bool high)
{
- if (jzpc->info->version >= ID_JZ4760)
- ingenic_config_pin(jzpc, pin, JZ4760_GPIO_PAT0, high);
+ if (jzpc->info->version >= ID_JZ4770)
+ ingenic_config_pin(jzpc, pin, JZ4770_GPIO_PAT0, high);
else
ingenic_config_pin(jzpc, pin, JZ4740_GPIO_DATA, high);
}
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
index e051aecf95c4..d70caecd21d2 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm.c
@@ -51,6 +51,7 @@
* @dual_edge_irqs: Bitmap of irqs that need sw emulated dual edge
* detection.
* @skip_wake_irqs: Skip IRQs that are handled by wakeup interrupt controller
+ * @disabled_for_mux: These IRQs were disabled because we muxed away.
* @soc: Reference to soc_data of platform specific data.
* @regs: Base addresses for the TLMM tiles.
* @phys_base: Physical base address
@@ -72,6 +73,7 @@ struct msm_pinctrl {
DECLARE_BITMAP(dual_edge_irqs, MAX_NR_GPIO);
DECLARE_BITMAP(enabled_irqs, MAX_NR_GPIO);
DECLARE_BITMAP(skip_wake_irqs, MAX_NR_GPIO);
+ DECLARE_BITMAP(disabled_for_mux, MAX_NR_GPIO);
const struct msm_pinctrl_soc_data *soc;
void __iomem *regs[MAX_NR_TILES];
@@ -96,6 +98,14 @@ MSM_ACCESSOR(intr_cfg)
MSM_ACCESSOR(intr_status)
MSM_ACCESSOR(intr_target)
+static void msm_ack_intr_status(struct msm_pinctrl *pctrl,
+ const struct msm_pingroup *g)
+{
+ u32 val = g->intr_ack_high ? BIT(g->intr_status_bit) : 0;
+
+ msm_writel_intr_status(val, pctrl, g);
+}
+
static int msm_get_groups_count(struct pinctrl_dev *pctldev)
{
struct msm_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
@@ -171,6 +181,10 @@ static int msm_pinmux_set_mux(struct pinctrl_dev *pctldev,
unsigned group)
{
struct msm_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ struct gpio_chip *gc = &pctrl->chip;
+ unsigned int irq = irq_find_mapping(gc->irq.domain, group);
+ struct irq_data *d = irq_get_irq_data(irq);
+ unsigned int gpio_func = pctrl->soc->gpio_func;
const struct msm_pingroup *g;
unsigned long flags;
u32 val, mask;
@@ -187,6 +201,20 @@ static int msm_pinmux_set_mux(struct pinctrl_dev *pctldev,
if (WARN_ON(i == g->nfuncs))
return -EINVAL;
+ /*
+ * If an GPIO interrupt is setup on this pin then we need special
+ * handling. Specifically interrupt detection logic will still see
+ * the pin twiddle even when we're muxed away.
+ *
+ * When we see a pin with an interrupt setup on it then we'll disable
+ * (mask) interrupts on it when we mux away until we mux back. Note
+ * that disable_irq() refcounts and interrupts are disabled as long as
+ * at least one disable_irq() has been called.
+ */
+ if (d && i != gpio_func &&
+ !test_and_set_bit(d->hwirq, pctrl->disabled_for_mux))
+ disable_irq(irq);
+
raw_spin_lock_irqsave(&pctrl->lock, flags);
val = msm_readl_ctl(pctrl, g);
@@ -196,6 +224,20 @@ static int msm_pinmux_set_mux(struct pinctrl_dev *pctldev,
raw_spin_unlock_irqrestore(&pctrl->lock, flags);
+ if (d && i == gpio_func &&
+ test_and_clear_bit(d->hwirq, pctrl->disabled_for_mux)) {
+ /*
+ * Clear interrupts detected while not GPIO since we only
+ * masked things.
+ */
+ if (d->parent_data && test_bit(d->hwirq, pctrl->skip_wake_irqs))
+ irq_chip_set_parent_state(d, IRQCHIP_STATE_PENDING, false);
+ else
+ msm_ack_intr_status(pctrl, g);
+
+ enable_irq(irq);
+ }
+
return 0;
}
@@ -210,8 +252,7 @@ static int msm_pinmux_request_gpio(struct pinctrl_dev *pctldev,
if (!g->nfuncs)
return 0;
- /* For now assume function 0 is GPIO because it always is */
- return msm_pinmux_set_mux(pctldev, g->funcs[0], offset);
+ return msm_pinmux_set_mux(pctldev, g->funcs[pctrl->soc->gpio_func], offset);
}
static const struct pinmux_ops msm_pinmux_ops = {
@@ -774,7 +815,7 @@ static void msm_gpio_irq_mask(struct irq_data *d)
raw_spin_unlock_irqrestore(&pctrl->lock, flags);
}
-static void msm_gpio_irq_clear_unmask(struct irq_data *d, bool status_clear)
+static void msm_gpio_irq_unmask(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct msm_pinctrl *pctrl = gpiochip_get_data(gc);
@@ -792,17 +833,6 @@ static void msm_gpio_irq_clear_unmask(struct irq_data *d, bool status_clear)
raw_spin_lock_irqsave(&pctrl->lock, flags);
- if (status_clear) {
- /*
- * clear the interrupt status bit before unmask to avoid
- * any erroneous interrupts that would have got latched
- * when the interrupt is not in use.
- */
- val = msm_readl_intr_status(pctrl, g);
- val &= ~BIT(g->intr_status_bit);
- msm_writel_intr_status(val, pctrl, g);
- }
-
val = msm_readl_intr_cfg(pctrl, g);
val |= BIT(g->intr_raw_status_bit);
val |= BIT(g->intr_enable_bit);
@@ -822,7 +852,7 @@ static void msm_gpio_irq_enable(struct irq_data *d)
irq_chip_enable_parent(d);
if (!test_bit(d->hwirq, pctrl->skip_wake_irqs))
- msm_gpio_irq_clear_unmask(d, true);
+ msm_gpio_irq_unmask(d);
}
static void msm_gpio_irq_disable(struct irq_data *d)
@@ -837,11 +867,6 @@ static void msm_gpio_irq_disable(struct irq_data *d)
msm_gpio_irq_mask(d);
}
-static void msm_gpio_irq_unmask(struct irq_data *d)
-{
- msm_gpio_irq_clear_unmask(d, false);
-}
-
/**
* msm_gpio_update_dual_edge_parent() - Prime next edge for IRQs handled by parent.
* @d: The irq dta.
@@ -894,7 +919,6 @@ static void msm_gpio_irq_ack(struct irq_data *d)
struct msm_pinctrl *pctrl = gpiochip_get_data(gc);
const struct msm_pingroup *g;
unsigned long flags;
- u32 val;
if (test_bit(d->hwirq, pctrl->skip_wake_irqs)) {
if (test_bit(d->hwirq, pctrl->dual_edge_irqs))
@@ -906,12 +930,7 @@ static void msm_gpio_irq_ack(struct irq_data *d)
raw_spin_lock_irqsave(&pctrl->lock, flags);
- val = msm_readl_intr_status(pctrl, g);
- if (g->intr_ack_high)
- val |= BIT(g->intr_status_bit);
- else
- val &= ~BIT(g->intr_status_bit);
- msm_writel_intr_status(val, pctrl, g);
+ msm_ack_intr_status(pctrl, g);
if (test_bit(d->hwirq, pctrl->dual_edge_irqs))
msm_gpio_update_dual_edge_pos(pctrl, g, d);
@@ -936,6 +955,7 @@ static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int type)
struct msm_pinctrl *pctrl = gpiochip_get_data(gc);
const struct msm_pingroup *g;
unsigned long flags;
+ bool was_enabled;
u32 val;
if (msm_gpio_needs_dual_edge_parent_workaround(d, type)) {
@@ -997,6 +1017,7 @@ static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int type)
* could cause the INTR_STATUS to be set for EDGE interrupts.
*/
val = msm_readl_intr_cfg(pctrl, g);
+ was_enabled = val & BIT(g->intr_raw_status_bit);
val |= BIT(g->intr_raw_status_bit);
if (g->intr_detection_width == 2) {
val &= ~(3 << g->intr_detection_bit);
@@ -1046,6 +1067,14 @@ static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int type)
}
msm_writel_intr_cfg(val, pctrl, g);
+ /*
+ * The first time we set RAW_STATUS_EN it could trigger an interrupt.
+ * Clear the interrupt. This is safe because we have
+ * IRQCHIP_SET_TYPE_MASKED.
+ */
+ if (!was_enabled)
+ msm_ack_intr_status(pctrl, g);
+
if (test_bit(d->hwirq, pctrl->dual_edge_irqs))
msm_gpio_update_dual_edge_pos(pctrl, g, d);
@@ -1099,16 +1128,11 @@ static int msm_gpio_irq_reqres(struct irq_data *d)
}
/*
- * Clear the interrupt that may be pending before we enable
- * the line.
- * This is especially a problem with the GPIOs routed to the
- * PDC. These GPIOs are direct-connect interrupts to the GIC.
- * Disabling the interrupt line at the PDC does not prevent
- * the interrupt from being latched at the GIC. The state at
- * GIC needs to be cleared before enabling.
+ * The disable / clear-enable workaround we do in msm_pinmux_set_mux()
+ * only works if disable is not lazy since we only clear any bogus
+ * interrupt in hardware. Explicitly mark the interrupt as UNLAZY.
*/
- if (d->parent_data && test_bit(d->hwirq, pctrl->skip_wake_irqs))
- irq_chip_set_parent_state(d, IRQCHIP_STATE_PENDING, 0);
+ irq_set_status_flags(d->irq, IRQ_DISABLE_UNLAZY);
return 0;
out:
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.h b/drivers/pinctrl/qcom/pinctrl-msm.h
index 333f99243c43..e31a5167c91e 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm.h
+++ b/drivers/pinctrl/qcom/pinctrl-msm.h
@@ -118,6 +118,7 @@ struct msm_gpio_wakeirq_map {
* @wakeirq_dual_edge_errata: If true then GPIOs using the wakeirq_map need
* to be aware that their parent can't handle dual
* edge interrupts.
+ * @gpio_func: Which function number is GPIO (usually 0).
*/
struct msm_pinctrl_soc_data {
const struct pinctrl_pin_desc *pins;
@@ -134,6 +135,7 @@ struct msm_pinctrl_soc_data {
const struct msm_gpio_wakeirq_map *wakeirq_map;
unsigned int nwakeirq_map;
bool wakeirq_dual_edge_errata;
+ unsigned int gpio_func;
};
extern const struct dev_pm_ops msm_pinctrl_dev_pm_ops;
diff --git a/drivers/platform/surface/Kconfig b/drivers/platform/surface/Kconfig
index 33040b0b3b79..2c941cdac9ee 100644
--- a/drivers/platform/surface/Kconfig
+++ b/drivers/platform/surface/Kconfig
@@ -5,6 +5,7 @@
menuconfig SURFACE_PLATFORMS
bool "Microsoft Surface Platform-Specific Device Drivers"
+ depends on ACPI
default y
help
Say Y here to get to see options for platform-specific device drivers
@@ -29,20 +30,19 @@ config SURFACE3_WMI
config SURFACE_3_BUTTON
tristate "Power/home/volume buttons driver for Microsoft Surface 3 tablet"
- depends on ACPI && KEYBOARD_GPIO && I2C
+ depends on KEYBOARD_GPIO && I2C
help
This driver handles the power/home/volume buttons on the Microsoft Surface 3 tablet.
config SURFACE_3_POWER_OPREGION
tristate "Surface 3 battery platform operation region support"
- depends on ACPI && I2C
+ depends on I2C
help
This driver provides support for ACPI operation
region of the Surface 3 battery platform driver.
config SURFACE_GPE
tristate "Surface GPE/Lid Support Driver"
- depends on ACPI
depends on DMI
help
This driver marks the GPEs related to the ACPI lid device found on
@@ -52,7 +52,7 @@ config SURFACE_GPE
config SURFACE_PRO3_BUTTON
tristate "Power/home/volume buttons driver for Microsoft Surface Pro 3/4 tablet"
- depends on ACPI && INPUT
+ depends on INPUT
help
This driver handles the power/home/volume buttons on the Microsoft Surface Pro 3/4 tablet.
diff --git a/drivers/platform/surface/surface_gpe.c b/drivers/platform/surface/surface_gpe.c
index e49e5d6d5d4e..86f6991b1215 100644
--- a/drivers/platform/surface/surface_gpe.c
+++ b/drivers/platform/surface/surface_gpe.c
@@ -181,12 +181,12 @@ static int surface_lid_enable_wakeup(struct device *dev, bool enable)
return 0;
}
-static int surface_gpe_suspend(struct device *dev)
+static int __maybe_unused surface_gpe_suspend(struct device *dev)
{
return surface_lid_enable_wakeup(dev, true);
}
-static int surface_gpe_resume(struct device *dev)
+static int __maybe_unused surface_gpe_resume(struct device *dev)
{
return surface_lid_enable_wakeup(dev, false);
}
diff --git a/drivers/platform/x86/amd-pmc.c b/drivers/platform/x86/amd-pmc.c
index 0102bf1c7916..ef8342572463 100644
--- a/drivers/platform/x86/amd-pmc.c
+++ b/drivers/platform/x86/amd-pmc.c
@@ -85,7 +85,7 @@ static inline void amd_pmc_reg_write(struct amd_pmc_dev *dev, int reg_offset, u3
iowrite32(val, dev->regbase + reg_offset);
}
-#if CONFIG_DEBUG_FS
+#ifdef CONFIG_DEBUG_FS
static int smu_fw_info_show(struct seq_file *s, void *unused)
{
struct amd_pmc_dev *dev = s->private;
diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c
index ecd477964d11..18bf8aeb5f87 100644
--- a/drivers/platform/x86/hp-wmi.c
+++ b/drivers/platform/x86/hp-wmi.c
@@ -247,7 +247,8 @@ static int hp_wmi_perform_query(int query, enum hp_wmi_command command,
ret = bios_return->return_code;
if (ret) {
- if (ret != HPWMI_RET_UNKNOWN_CMDTYPE)
+ if (ret != HPWMI_RET_UNKNOWN_COMMAND &&
+ ret != HPWMI_RET_UNKNOWN_CMDTYPE)
pr_warn("query 0x%x returned error 0x%x\n", query, ret);
goto out_free;
}
diff --git a/drivers/platform/x86/i2c-multi-instantiate.c b/drivers/platform/x86/i2c-multi-instantiate.c
index b457b0babde3..2cce82579d09 100644
--- a/drivers/platform/x86/i2c-multi-instantiate.c
+++ b/drivers/platform/x86/i2c-multi-instantiate.c
@@ -164,13 +164,29 @@ static const struct i2c_inst_data bsg2150_data[] = {
{}
};
-static const struct i2c_inst_data int3515_data[] = {
- { "tps6598x", IRQ_RESOURCE_APIC, 0 },
- { "tps6598x", IRQ_RESOURCE_APIC, 1 },
- { "tps6598x", IRQ_RESOURCE_APIC, 2 },
- { "tps6598x", IRQ_RESOURCE_APIC, 3 },
- {}
-};
+/*
+ * Device with _HID INT3515 (TI PD controllers) has some unresolved interrupt
+ * issues. The most common problem seen is interrupt flood.
+ *
+ * There are at least two known causes. Firstly, on some boards, the
+ * I2CSerialBus resource index does not match the Interrupt resource, i.e. they
+ * are not one-to-one mapped like in the array below. Secondly, on some boards
+ * the IRQ line from the PD controller is not actually connected at all. But the
+ * interrupt flood is also seen on some boards where those are not a problem, so
+ * there are some other problems as well.
+ *
+ * Because of the issues with the interrupt, the device is disabled for now. If
+ * you wish to debug the issues, uncomment the below, and add an entry for the
+ * INT3515 device to the i2c_multi_instance_ids table.
+ *
+ * static const struct i2c_inst_data int3515_data[] = {
+ * { "tps6598x", IRQ_RESOURCE_APIC, 0 },
+ * { "tps6598x", IRQ_RESOURCE_APIC, 1 },
+ * { "tps6598x", IRQ_RESOURCE_APIC, 2 },
+ * { "tps6598x", IRQ_RESOURCE_APIC, 3 },
+ * { }
+ * };
+ */
/*
* Note new device-ids must also be added to i2c_multi_instantiate_ids in
@@ -179,7 +195,6 @@ static const struct i2c_inst_data int3515_data[] = {
static const struct acpi_device_id i2c_multi_inst_acpi_ids[] = {
{ "BSG1160", (unsigned long)bsg1160_data },
{ "BSG2150", (unsigned long)bsg2150_data },
- { "INT3515", (unsigned long)int3515_data },
{ }
};
MODULE_DEVICE_TABLE(acpi, i2c_multi_inst_acpi_ids);
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
index 7598cd46cf60..5b81bafa5c16 100644
--- a/drivers/platform/x86/ideapad-laptop.c
+++ b/drivers/platform/x86/ideapad-laptop.c
@@ -92,6 +92,7 @@ struct ideapad_private {
struct dentry *debug;
unsigned long cfg;
bool has_hw_rfkill_switch;
+ bool has_touchpad_switch;
const char *fnesc_guid;
};
@@ -535,7 +536,9 @@ static umode_t ideapad_is_visible(struct kobject *kobj,
} else if (attr == &dev_attr_fn_lock.attr) {
supported = acpi_has_method(priv->adev->handle, "HALS") &&
acpi_has_method(priv->adev->handle, "SALS");
- } else
+ } else if (attr == &dev_attr_touchpad.attr)
+ supported = priv->has_touchpad_switch;
+ else
supported = true;
return supported ? attr->mode : 0;
@@ -867,6 +870,9 @@ static void ideapad_sync_touchpad_state(struct ideapad_private *priv)
{
unsigned long value;
+ if (!priv->has_touchpad_switch)
+ return;
+
/* Without reading from EC touchpad LED doesn't switch state */
if (!read_ec_data(priv->adev->handle, VPCCMD_R_TOUCHPAD, &value)) {
/* Some IdeaPads don't really turn off touchpad - they only
@@ -989,6 +995,9 @@ static int ideapad_acpi_add(struct platform_device *pdev)
priv->platform_device = pdev;
priv->has_hw_rfkill_switch = dmi_check_system(hw_rfkill_list);
+ /* Most ideapads with ELAN0634 touchpad don't use EC touchpad switch */
+ priv->has_touchpad_switch = !acpi_dev_present("ELAN0634", NULL, -1);
+
ret = ideapad_sysfs_init(priv);
if (ret)
return ret;
@@ -1006,6 +1015,10 @@ static int ideapad_acpi_add(struct platform_device *pdev)
if (!priv->has_hw_rfkill_switch)
write_ec_cmd(priv->adev->handle, VPCCMD_W_RF, 1);
+ /* The same for Touchpad */
+ if (!priv->has_touchpad_switch)
+ write_ec_cmd(priv->adev->handle, VPCCMD_W_TOUCHPAD, 1);
+
for (i = 0; i < IDEAPAD_RFKILL_DEV_NUM; i++)
if (test_bit(ideapad_rfk_data[i].cfgbit, &priv->cfg))
ideapad_register_rfkill(priv, i);
diff --git a/drivers/platform/x86/intel-vbtn.c b/drivers/platform/x86/intel-vbtn.c
index 3b49a1f4061b..30a9062d2b4b 100644
--- a/drivers/platform/x86/intel-vbtn.c
+++ b/drivers/platform/x86/intel-vbtn.c
@@ -207,19 +207,19 @@ static const struct dmi_system_id dmi_switches_allow_list[] = {
{
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
- DMI_MATCH(DMI_PRODUCT_NAME, "HP Stream x360 Convertible PC 11"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion 13 x360 PC"),
},
},
{
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
- DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion 13 x360 PC"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Switch SA5-271"),
},
},
{
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Switch SA5-271"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7352"),
},
},
{} /* Array terminator */
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index e03df2881dc6..f3e8eca8d86d 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -8783,6 +8783,7 @@ static const struct tpacpi_quirk fan_quirk_table[] __initconst = {
TPACPI_Q_LNV3('N', '1', 'T', TPACPI_FAN_2CTL), /* P71 */
TPACPI_Q_LNV3('N', '1', 'U', TPACPI_FAN_2CTL), /* P51 */
TPACPI_Q_LNV3('N', '2', 'C', TPACPI_FAN_2CTL), /* P52 / P72 */
+ TPACPI_Q_LNV3('N', '2', 'N', TPACPI_FAN_2CTL), /* P53 / P73 */
TPACPI_Q_LNV3('N', '2', 'E', TPACPI_FAN_2CTL), /* P1 / X1 Extreme (1st gen) */
TPACPI_Q_LNV3('N', '2', 'O', TPACPI_FAN_2CTL), /* P1 / X1 Extreme (2nd gen) */
TPACPI_Q_LNV3('N', '2', 'V', TPACPI_FAN_2CTL), /* P1 / X1 Extreme (3nd gen) */
@@ -9951,9 +9952,9 @@ static int tpacpi_proxsensor_init(struct ibm_init_struct *iibm)
if ((palm_err == -ENODEV) && (lap_err == -ENODEV))
return 0;
/* Otherwise, if there was an error return it */
- if (palm_err && (palm_err != ENODEV))
+ if (palm_err && (palm_err != -ENODEV))
return palm_err;
- if (lap_err && (lap_err != ENODEV))
+ if (lap_err && (lap_err != -ENODEV))
return lap_err;
if (has_palmsensor) {
diff --git a/drivers/platform/x86/touchscreen_dmi.c b/drivers/platform/x86/touchscreen_dmi.c
index 5783139d0a11..c4de932302d6 100644
--- a/drivers/platform/x86/touchscreen_dmi.c
+++ b/drivers/platform/x86/touchscreen_dmi.c
@@ -263,6 +263,16 @@ static const struct ts_dmi_data digma_citi_e200_data = {
.properties = digma_citi_e200_props,
};
+static const struct property_entry estar_beauty_hd_props[] = {
+ PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
+ { }
+};
+
+static const struct ts_dmi_data estar_beauty_hd_data = {
+ .acpi_name = "GDIX1001:00",
+ .properties = estar_beauty_hd_props,
+};
+
static const struct property_entry gp_electronic_t701_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-x", 960),
PROPERTY_ENTRY_U32("touchscreen-size-y", 640),
@@ -943,6 +953,14 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
},
},
{
+ /* Estar Beauty HD (MID 7316R) */
+ .driver_data = (void *)&estar_beauty_hd_data,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Estar"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "eSTAR BEAUTY HD Intel Quad core"),
+ },
+ },
+ {
/* GP-electronic T701 */
.driver_data = (void *)&gp_electronic_t701_data,
.matches = {
diff --git a/drivers/ptp/Kconfig b/drivers/ptp/Kconfig
index 476d7c7fe70a..f2edef0df40f 100644
--- a/drivers/ptp/Kconfig
+++ b/drivers/ptp/Kconfig
@@ -64,6 +64,7 @@ config DP83640_PHY
depends on NETWORK_PHY_TIMESTAMPING
depends on PHYLIB
depends on PTP_1588_CLOCK
+ select CRC32
help
Supports the DP83640 PHYTER with IEEE 1588 features.
@@ -78,6 +79,7 @@ config DP83640_PHY
config PTP_1588_CLOCK_INES
tristate "ZHAW InES PTP time stamping IP core"
depends on NETWORK_PHY_TIMESTAMPING
+ depends on HAS_IOMEM
depends on PHYLIB
depends on PTP_1588_CLOCK
help
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index 53fa84f4d1e1..5abdd29fb9f3 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -881,6 +881,7 @@ config REGULATOR_QCOM_RPM
config REGULATOR_QCOM_RPMH
tristate "Qualcomm Technologies, Inc. RPMh regulator driver"
depends on QCOM_RPMH || (QCOM_RPMH=n && COMPILE_TEST)
+ depends on QCOM_COMMAND_DB || (QCOM_COMMAND_DB=n && COMPILE_TEST)
help
This driver supports control of PMIC regulators via the RPMh hardware
block found on Qualcomm Technologies Inc. SoCs. RPMh regulator
diff --git a/drivers/regulator/bd718x7-regulator.c b/drivers/regulator/bd718x7-regulator.c
index e6d5d98c3cea..9309765d0450 100644
--- a/drivers/regulator/bd718x7-regulator.c
+++ b/drivers/regulator/bd718x7-regulator.c
@@ -15,6 +15,36 @@
#include <linux/regulator/of_regulator.h>
#include <linux/slab.h>
+/* Typical regulator startup times as per data sheet in uS */
+#define BD71847_BUCK1_STARTUP_TIME 144
+#define BD71847_BUCK2_STARTUP_TIME 162
+#define BD71847_BUCK3_STARTUP_TIME 162
+#define BD71847_BUCK4_STARTUP_TIME 240
+#define BD71847_BUCK5_STARTUP_TIME 270
+#define BD71847_BUCK6_STARTUP_TIME 200
+#define BD71847_LDO1_STARTUP_TIME 440
+#define BD71847_LDO2_STARTUP_TIME 370
+#define BD71847_LDO3_STARTUP_TIME 310
+#define BD71847_LDO4_STARTUP_TIME 400
+#define BD71847_LDO5_STARTUP_TIME 530
+#define BD71847_LDO6_STARTUP_TIME 400
+
+#define BD71837_BUCK1_STARTUP_TIME 160
+#define BD71837_BUCK2_STARTUP_TIME 180
+#define BD71837_BUCK3_STARTUP_TIME 180
+#define BD71837_BUCK4_STARTUP_TIME 180
+#define BD71837_BUCK5_STARTUP_TIME 160
+#define BD71837_BUCK6_STARTUP_TIME 240
+#define BD71837_BUCK7_STARTUP_TIME 220
+#define BD71837_BUCK8_STARTUP_TIME 200
+#define BD71837_LDO1_STARTUP_TIME 440
+#define BD71837_LDO2_STARTUP_TIME 370
+#define BD71837_LDO3_STARTUP_TIME 310
+#define BD71837_LDO4_STARTUP_TIME 400
+#define BD71837_LDO5_STARTUP_TIME 310
+#define BD71837_LDO6_STARTUP_TIME 400
+#define BD71837_LDO7_STARTUP_TIME 530
+
/*
* BD718(37/47/50) have two "enable control modes". ON/OFF can either be
* controlled by software - or by PMIC internal HW state machine. Whether
@@ -613,6 +643,7 @@ static struct bd718xx_regulator_data bd71847_regulators[] = {
.vsel_mask = DVS_BUCK_RUN_MASK,
.enable_reg = BD718XX_REG_BUCK1_CTRL,
.enable_mask = BD718XX_BUCK_EN,
+ .enable_time = BD71847_BUCK1_STARTUP_TIME,
.owner = THIS_MODULE,
.of_parse_cb = buck_set_hw_dvs_levels,
},
@@ -646,6 +677,7 @@ static struct bd718xx_regulator_data bd71847_regulators[] = {
.vsel_mask = DVS_BUCK_RUN_MASK,
.enable_reg = BD718XX_REG_BUCK2_CTRL,
.enable_mask = BD718XX_BUCK_EN,
+ .enable_time = BD71847_BUCK2_STARTUP_TIME,
.owner = THIS_MODULE,
.of_parse_cb = buck_set_hw_dvs_levels,
},
@@ -680,6 +712,7 @@ static struct bd718xx_regulator_data bd71847_regulators[] = {
.linear_range_selectors = bd71847_buck3_volt_range_sel,
.enable_reg = BD718XX_REG_1ST_NODVS_BUCK_CTRL,
.enable_mask = BD718XX_BUCK_EN,
+ .enable_time = BD71847_BUCK3_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@@ -706,6 +739,7 @@ static struct bd718xx_regulator_data bd71847_regulators[] = {
.vsel_range_mask = BD71847_BUCK4_RANGE_MASK,
.linear_range_selectors = bd71847_buck4_volt_range_sel,
.enable_mask = BD718XX_BUCK_EN,
+ .enable_time = BD71847_BUCK4_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@@ -727,6 +761,7 @@ static struct bd718xx_regulator_data bd71847_regulators[] = {
.vsel_mask = BD718XX_3RD_NODVS_BUCK_MASK,
.enable_reg = BD718XX_REG_3RD_NODVS_BUCK_CTRL,
.enable_mask = BD718XX_BUCK_EN,
+ .enable_time = BD71847_BUCK5_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@@ -750,6 +785,7 @@ static struct bd718xx_regulator_data bd71847_regulators[] = {
.vsel_mask = BD718XX_4TH_NODVS_BUCK_MASK,
.enable_reg = BD718XX_REG_4TH_NODVS_BUCK_CTRL,
.enable_mask = BD718XX_BUCK_EN,
+ .enable_time = BD71847_BUCK6_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@@ -775,6 +811,7 @@ static struct bd718xx_regulator_data bd71847_regulators[] = {
.linear_range_selectors = bd718xx_ldo1_volt_range_sel,
.enable_reg = BD718XX_REG_LDO1_VOLT,
.enable_mask = BD718XX_LDO_EN,
+ .enable_time = BD71847_LDO1_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@@ -796,6 +833,7 @@ static struct bd718xx_regulator_data bd71847_regulators[] = {
.n_voltages = ARRAY_SIZE(ldo_2_volts),
.enable_reg = BD718XX_REG_LDO2_VOLT,
.enable_mask = BD718XX_LDO_EN,
+ .enable_time = BD71847_LDO2_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@@ -818,6 +856,7 @@ static struct bd718xx_regulator_data bd71847_regulators[] = {
.vsel_mask = BD718XX_LDO3_MASK,
.enable_reg = BD718XX_REG_LDO3_VOLT,
.enable_mask = BD718XX_LDO_EN,
+ .enable_time = BD71847_LDO3_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@@ -840,6 +879,7 @@ static struct bd718xx_regulator_data bd71847_regulators[] = {
.vsel_mask = BD718XX_LDO4_MASK,
.enable_reg = BD718XX_REG_LDO4_VOLT,
.enable_mask = BD718XX_LDO_EN,
+ .enable_time = BD71847_LDO4_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@@ -865,6 +905,7 @@ static struct bd718xx_regulator_data bd71847_regulators[] = {
.linear_range_selectors = bd71847_ldo5_volt_range_sel,
.enable_reg = BD718XX_REG_LDO5_VOLT,
.enable_mask = BD718XX_LDO_EN,
+ .enable_time = BD71847_LDO5_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@@ -889,6 +930,7 @@ static struct bd718xx_regulator_data bd71847_regulators[] = {
.vsel_mask = BD718XX_LDO6_MASK,
.enable_reg = BD718XX_REG_LDO6_VOLT,
.enable_mask = BD718XX_LDO_EN,
+ .enable_time = BD71847_LDO6_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@@ -942,6 +984,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
.vsel_mask = DVS_BUCK_RUN_MASK,
.enable_reg = BD718XX_REG_BUCK1_CTRL,
.enable_mask = BD718XX_BUCK_EN,
+ .enable_time = BD71837_BUCK1_STARTUP_TIME,
.owner = THIS_MODULE,
.of_parse_cb = buck_set_hw_dvs_levels,
},
@@ -975,6 +1018,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
.vsel_mask = DVS_BUCK_RUN_MASK,
.enable_reg = BD718XX_REG_BUCK2_CTRL,
.enable_mask = BD718XX_BUCK_EN,
+ .enable_time = BD71837_BUCK2_STARTUP_TIME,
.owner = THIS_MODULE,
.of_parse_cb = buck_set_hw_dvs_levels,
},
@@ -1005,6 +1049,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
.vsel_mask = DVS_BUCK_RUN_MASK,
.enable_reg = BD71837_REG_BUCK3_CTRL,
.enable_mask = BD718XX_BUCK_EN,
+ .enable_time = BD71837_BUCK3_STARTUP_TIME,
.owner = THIS_MODULE,
.of_parse_cb = buck_set_hw_dvs_levels,
},
@@ -1033,6 +1078,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
.vsel_mask = DVS_BUCK_RUN_MASK,
.enable_reg = BD71837_REG_BUCK4_CTRL,
.enable_mask = BD718XX_BUCK_EN,
+ .enable_time = BD71837_BUCK4_STARTUP_TIME,
.owner = THIS_MODULE,
.of_parse_cb = buck_set_hw_dvs_levels,
},
@@ -1065,6 +1111,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
.linear_range_selectors = bd71837_buck5_volt_range_sel,
.enable_reg = BD718XX_REG_1ST_NODVS_BUCK_CTRL,
.enable_mask = BD718XX_BUCK_EN,
+ .enable_time = BD71837_BUCK5_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@@ -1088,6 +1135,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
.vsel_mask = BD71837_BUCK6_MASK,
.enable_reg = BD718XX_REG_2ND_NODVS_BUCK_CTRL,
.enable_mask = BD718XX_BUCK_EN,
+ .enable_time = BD71837_BUCK6_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@@ -1109,6 +1157,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
.vsel_mask = BD718XX_3RD_NODVS_BUCK_MASK,
.enable_reg = BD718XX_REG_3RD_NODVS_BUCK_CTRL,
.enable_mask = BD718XX_BUCK_EN,
+ .enable_time = BD71837_BUCK7_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@@ -1132,6 +1181,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
.vsel_mask = BD718XX_4TH_NODVS_BUCK_MASK,
.enable_reg = BD718XX_REG_4TH_NODVS_BUCK_CTRL,
.enable_mask = BD718XX_BUCK_EN,
+ .enable_time = BD71837_BUCK8_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@@ -1157,6 +1207,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
.linear_range_selectors = bd718xx_ldo1_volt_range_sel,
.enable_reg = BD718XX_REG_LDO1_VOLT,
.enable_mask = BD718XX_LDO_EN,
+ .enable_time = BD71837_LDO1_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@@ -1178,6 +1229,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
.n_voltages = ARRAY_SIZE(ldo_2_volts),
.enable_reg = BD718XX_REG_LDO2_VOLT,
.enable_mask = BD718XX_LDO_EN,
+ .enable_time = BD71837_LDO2_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@@ -1200,6 +1252,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
.vsel_mask = BD718XX_LDO3_MASK,
.enable_reg = BD718XX_REG_LDO3_VOLT,
.enable_mask = BD718XX_LDO_EN,
+ .enable_time = BD71837_LDO3_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@@ -1222,6 +1275,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
.vsel_mask = BD718XX_LDO4_MASK,
.enable_reg = BD718XX_REG_LDO4_VOLT,
.enable_mask = BD718XX_LDO_EN,
+ .enable_time = BD71837_LDO4_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@@ -1246,6 +1300,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
.vsel_mask = BD71837_LDO5_MASK,
.enable_reg = BD718XX_REG_LDO5_VOLT,
.enable_mask = BD718XX_LDO_EN,
+ .enable_time = BD71837_LDO5_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@@ -1272,6 +1327,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
.vsel_mask = BD718XX_LDO6_MASK,
.enable_reg = BD718XX_REG_LDO6_VOLT,
.enable_mask = BD718XX_LDO_EN,
+ .enable_time = BD71837_LDO6_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@@ -1296,6 +1352,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
.vsel_mask = BD71837_LDO7_MASK,
.enable_reg = BD71837_REG_LDO7_VOLT,
.enable_mask = BD718XX_LDO_EN,
+ .enable_time = BD71837_LDO7_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
diff --git a/drivers/regulator/pf8x00-regulator.c b/drivers/regulator/pf8x00-regulator.c
index 308c27fa6ea8..af9918cd27aa 100644
--- a/drivers/regulator/pf8x00-regulator.c
+++ b/drivers/regulator/pf8x00-regulator.c
@@ -469,13 +469,17 @@ static int pf8x00_i2c_probe(struct i2c_client *client)
}
static const struct of_device_id pf8x00_dt_ids[] = {
- { .compatible = "nxp,pf8x00",},
+ { .compatible = "nxp,pf8100",},
+ { .compatible = "nxp,pf8121a",},
+ { .compatible = "nxp,pf8200",},
{ }
};
MODULE_DEVICE_TABLE(of, pf8x00_dt_ids);
static const struct i2c_device_id pf8x00_i2c_id[] = {
- { "pf8x00", 0 },
+ { "pf8100", 0 },
+ { "pf8121a", 0 },
+ { "pf8200", 0 },
{},
};
MODULE_DEVICE_TABLE(i2c, pf8x00_i2c_id);
diff --git a/drivers/regulator/qcom-rpmh-regulator.c b/drivers/regulator/qcom-rpmh-regulator.c
index fe030ec4b7db..c395a8dda6f7 100644
--- a/drivers/regulator/qcom-rpmh-regulator.c
+++ b/drivers/regulator/qcom-rpmh-regulator.c
@@ -726,7 +726,7 @@ static const struct rpmh_vreg_hw_data pmic5_ftsmps510 = {
static const struct rpmh_vreg_hw_data pmic5_hfsmps515 = {
.regulator_type = VRM,
.ops = &rpmh_regulator_vrm_ops,
- .voltage_range = REGULATOR_LINEAR_RANGE(2800000, 0, 4, 1600),
+ .voltage_range = REGULATOR_LINEAR_RANGE(2800000, 0, 4, 16000),
.n_voltages = 5,
.pmic_mode_map = pmic_mode_map_pmic5_smps,
.of_map_mode = rpmh_regulator_pmic4_smps_of_map_mode,
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index 6f5ddc3eab8c..28f637042d44 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -1079,7 +1079,8 @@ struct qeth_card *qeth_get_card_by_busid(char *bus_id);
void qeth_set_allowed_threads(struct qeth_card *card, unsigned long threads,
int clear_start_mask);
int qeth_threads_running(struct qeth_card *, unsigned long);
-int qeth_set_offline(struct qeth_card *card, bool resetting);
+int qeth_set_offline(struct qeth_card *card, const struct qeth_discipline *disc,
+ bool resetting);
int qeth_send_ipa_cmd(struct qeth_card *, struct qeth_cmd_buffer *,
int (*reply_cb)
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index f4b60294a969..cf18d87da41e 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -5507,12 +5507,12 @@ out:
return rc;
}
-static int qeth_set_online(struct qeth_card *card)
+static int qeth_set_online(struct qeth_card *card,
+ const struct qeth_discipline *disc)
{
bool carrier_ok;
int rc;
- mutex_lock(&card->discipline_mutex);
mutex_lock(&card->conf_mutex);
QETH_CARD_TEXT(card, 2, "setonlin");
@@ -5529,7 +5529,7 @@ static int qeth_set_online(struct qeth_card *card)
/* no need for locking / error handling at this early stage: */
qeth_set_real_num_tx_queues(card, qeth_tx_actual_queues(card));
- rc = card->discipline->set_online(card, carrier_ok);
+ rc = disc->set_online(card, carrier_ok);
if (rc)
goto err_online;
@@ -5537,7 +5537,6 @@ static int qeth_set_online(struct qeth_card *card)
kobject_uevent(&card->gdev->dev.kobj, KOBJ_CHANGE);
mutex_unlock(&card->conf_mutex);
- mutex_unlock(&card->discipline_mutex);
return 0;
err_online:
@@ -5552,15 +5551,14 @@ err_hardsetup:
qdio_free(CARD_DDEV(card));
mutex_unlock(&card->conf_mutex);
- mutex_unlock(&card->discipline_mutex);
return rc;
}
-int qeth_set_offline(struct qeth_card *card, bool resetting)
+int qeth_set_offline(struct qeth_card *card, const struct qeth_discipline *disc,
+ bool resetting)
{
int rc, rc2, rc3;
- mutex_lock(&card->discipline_mutex);
mutex_lock(&card->conf_mutex);
QETH_CARD_TEXT(card, 3, "setoffl");
@@ -5581,7 +5579,7 @@ int qeth_set_offline(struct qeth_card *card, bool resetting)
cancel_work_sync(&card->rx_mode_work);
- card->discipline->set_offline(card);
+ disc->set_offline(card);
qeth_qdio_clear_card(card, 0);
qeth_drain_output_queues(card);
@@ -5602,16 +5600,19 @@ int qeth_set_offline(struct qeth_card *card, bool resetting)
kobject_uevent(&card->gdev->dev.kobj, KOBJ_CHANGE);
mutex_unlock(&card->conf_mutex);
- mutex_unlock(&card->discipline_mutex);
return 0;
}
EXPORT_SYMBOL_GPL(qeth_set_offline);
static int qeth_do_reset(void *data)
{
+ const struct qeth_discipline *disc;
struct qeth_card *card = data;
int rc;
+ /* Lock-free, other users will block until we are done. */
+ disc = card->discipline;
+
QETH_CARD_TEXT(card, 2, "recover1");
if (!qeth_do_run_thread(card, QETH_RECOVER_THREAD))
return 0;
@@ -5619,8 +5620,8 @@ static int qeth_do_reset(void *data)
dev_warn(&card->gdev->dev,
"A recovery process has been started for the device\n");
- qeth_set_offline(card, true);
- rc = qeth_set_online(card);
+ qeth_set_offline(card, disc, true);
+ rc = qeth_set_online(card, disc);
if (!rc) {
dev_info(&card->gdev->dev,
"Device successfully recovered!\n");
@@ -6584,6 +6585,7 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev)
break;
default:
card->info.layer_enforced = true;
+ /* It's so early that we don't need the discipline_mutex yet. */
rc = qeth_core_load_discipline(card, enforced_disc);
if (rc)
goto err_load;
@@ -6616,10 +6618,12 @@ static void qeth_core_remove_device(struct ccwgroup_device *gdev)
QETH_CARD_TEXT(card, 2, "removedv");
+ mutex_lock(&card->discipline_mutex);
if (card->discipline) {
card->discipline->remove(gdev);
qeth_core_free_discipline(card);
}
+ mutex_unlock(&card->discipline_mutex);
qeth_free_qdio_queues(card);
@@ -6634,6 +6638,7 @@ static int qeth_core_set_online(struct ccwgroup_device *gdev)
int rc = 0;
enum qeth_discipline_id def_discipline;
+ mutex_lock(&card->discipline_mutex);
if (!card->discipline) {
def_discipline = IS_IQD(card) ? QETH_DISCIPLINE_LAYER3 :
QETH_DISCIPLINE_LAYER2;
@@ -6647,16 +6652,23 @@ static int qeth_core_set_online(struct ccwgroup_device *gdev)
}
}
- rc = qeth_set_online(card);
+ rc = qeth_set_online(card, card->discipline);
+
err:
+ mutex_unlock(&card->discipline_mutex);
return rc;
}
static int qeth_core_set_offline(struct ccwgroup_device *gdev)
{
struct qeth_card *card = dev_get_drvdata(&gdev->dev);
+ int rc;
- return qeth_set_offline(card, false);
+ mutex_lock(&card->discipline_mutex);
+ rc = qeth_set_offline(card, card->discipline, false);
+ mutex_unlock(&card->discipline_mutex);
+
+ return rc;
}
static void qeth_core_shutdown(struct ccwgroup_device *gdev)
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 4ed0fb0705a5..4254caf1d9b6 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -2208,7 +2208,7 @@ static void qeth_l2_remove_device(struct ccwgroup_device *gdev)
wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
if (gdev->state == CCWGROUP_ONLINE)
- qeth_set_offline(card, false);
+ qeth_set_offline(card, card->discipline, false);
cancel_work_sync(&card->close_dev_work);
if (card->dev->reg_state == NETREG_REGISTERED)
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index d138ac432d01..4c2cae7ae9a7 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -1813,7 +1813,7 @@ static netdev_features_t qeth_l3_osa_features_check(struct sk_buff *skb,
struct net_device *dev,
netdev_features_t features)
{
- if (qeth_get_ip_version(skb) != 4)
+ if (vlan_get_protocol(skb) != htons(ETH_P_IP))
features &= ~NETIF_F_HW_VLAN_CTAG_TX;
return qeth_features_check(skb, dev, features);
}
@@ -1971,7 +1971,7 @@ static void qeth_l3_remove_device(struct ccwgroup_device *cgdev)
wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
if (cgdev->state == CCWGROUP_ONLINE)
- qeth_set_offline(card, false);
+ qeth_set_offline(card, card->discipline, false);
cancel_work_sync(&card->close_dev_work);
if (card->dev->reg_state == NETREG_REGISTERED)
diff --git a/drivers/scsi/cxgbi/cxgb4i/Kconfig b/drivers/scsi/cxgbi/cxgb4i/Kconfig
index b206e266b4e7..8b0deece9758 100644
--- a/drivers/scsi/cxgbi/cxgb4i/Kconfig
+++ b/drivers/scsi/cxgbi/cxgb4i/Kconfig
@@ -4,6 +4,7 @@ config SCSI_CXGB4_ISCSI
depends on PCI && INET && (IPV6 || IPV6=n)
depends on THERMAL || !THERMAL
depends on ETHERNET
+ depends on TLS || TLS=n
select NET_VENDOR_CHELSIO
select CHELSIO_T4
select CHELSIO_LIB
diff --git a/drivers/scsi/fnic/vnic_dev.c b/drivers/scsi/fnic/vnic_dev.c
index a2beee6e09f0..5988c300cc82 100644
--- a/drivers/scsi/fnic/vnic_dev.c
+++ b/drivers/scsi/fnic/vnic_dev.c
@@ -444,7 +444,8 @@ static int vnic_dev_init_devcmd2(struct vnic_dev *vdev)
fetch_index = ioread32(&vdev->devcmd2->wq.ctrl->fetch_index);
if (fetch_index == 0xFFFFFFFF) { /* check for hardware gone */
pr_err("error in devcmd2 init");
- return -ENODEV;
+ err = -ENODEV;
+ goto err_free_wq;
}
/*
@@ -460,7 +461,7 @@ static int vnic_dev_init_devcmd2(struct vnic_dev *vdev)
err = vnic_dev_alloc_desc_ring(vdev, &vdev->devcmd2->results_ring,
DEVCMD2_RING_SIZE, DEVCMD2_DESC_SIZE);
if (err)
- goto err_free_wq;
+ goto err_disable_wq;
vdev->devcmd2->result =
(struct devcmd2_result *) vdev->devcmd2->results_ring.descs;
@@ -481,8 +482,9 @@ static int vnic_dev_init_devcmd2(struct vnic_dev *vdev)
err_free_desc_ring:
vnic_dev_free_desc_ring(vdev, &vdev->devcmd2->results_ring);
-err_free_wq:
+err_disable_wq:
vnic_wq_disable(&vdev->devcmd2->wq);
+err_free_wq:
vnic_wq_free(&vdev->devcmd2->wq);
err_free_devcmd2:
kfree(vdev->devcmd2);
diff --git a/drivers/scsi/hisi_sas/hisi_sas.h b/drivers/scsi/hisi_sas/hisi_sas.h
index 2b28dd405600..e821dd32dd28 100644
--- a/drivers/scsi/hisi_sas/hisi_sas.h
+++ b/drivers/scsi/hisi_sas/hisi_sas.h
@@ -14,6 +14,7 @@
#include <linux/debugfs.h>
#include <linux/dmapool.h>
#include <linux/iopoll.h>
+#include <linux/irq.h>
#include <linux/lcm.h>
#include <linux/libata.h>
#include <linux/mfd/syscon.h>
@@ -294,6 +295,7 @@ enum {
struct hisi_sas_hw {
int (*hw_init)(struct hisi_hba *hisi_hba);
+ int (*interrupt_preinit)(struct hisi_hba *hisi_hba);
void (*setup_itct)(struct hisi_hba *hisi_hba,
struct hisi_sas_device *device);
int (*slot_index_alloc)(struct hisi_hba *hisi_hba,
@@ -393,6 +395,8 @@ struct hisi_hba {
u32 refclk_frequency_mhz;
u8 sas_addr[SAS_ADDR_SIZE];
+ int *irq_map; /* v2 hw */
+
int n_phy;
spinlock_t lock;
struct semaphore sem;
diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
index b6d4419c32f2..cf0bfac920a8 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
@@ -2614,6 +2614,13 @@ err_out:
return NULL;
}
+static int hisi_sas_interrupt_preinit(struct hisi_hba *hisi_hba)
+{
+ if (hisi_hba->hw->interrupt_preinit)
+ return hisi_hba->hw->interrupt_preinit(hisi_hba);
+ return 0;
+}
+
int hisi_sas_probe(struct platform_device *pdev,
const struct hisi_sas_hw *hw)
{
@@ -2671,6 +2678,10 @@ int hisi_sas_probe(struct platform_device *pdev,
sha->sas_port[i] = &hisi_hba->port[i].sas_port;
}
+ rc = hisi_sas_interrupt_preinit(hisi_hba);
+ if (rc)
+ goto err_out_ha;
+
rc = scsi_add_host(shost, &pdev->dev);
if (rc)
goto err_out_ha;
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
index b57177b52fac..9adfdefef9ca 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
@@ -3302,6 +3302,28 @@ static irq_handler_t fatal_interrupts[HISI_SAS_FATAL_INT_NR] = {
fatal_axi_int_v2_hw
};
+#define CQ0_IRQ_INDEX (96)
+
+static int hisi_sas_v2_interrupt_preinit(struct hisi_hba *hisi_hba)
+{
+ struct platform_device *pdev = hisi_hba->platform_dev;
+ struct Scsi_Host *shost = hisi_hba->shost;
+ struct irq_affinity desc = {
+ .pre_vectors = CQ0_IRQ_INDEX,
+ .post_vectors = 16,
+ };
+ int resv = desc.pre_vectors + desc.post_vectors, minvec = resv + 1, nvec;
+
+ nvec = devm_platform_get_irqs_affinity(pdev, &desc, minvec, 128,
+ &hisi_hba->irq_map);
+ if (nvec < 0)
+ return nvec;
+
+ shost->nr_hw_queues = hisi_hba->cq_nvecs = nvec - resv;
+
+ return 0;
+}
+
/*
* There is a limitation in the hip06 chipset that we need
* to map in all mbigen interrupts, even if they are not used.
@@ -3310,14 +3332,11 @@ static int interrupt_init_v2_hw(struct hisi_hba *hisi_hba)
{
struct platform_device *pdev = hisi_hba->platform_dev;
struct device *dev = &pdev->dev;
- int irq, rc = 0, irq_map[128];
+ int irq, rc = 0;
int i, phy_no, fatal_no, queue_no;
- for (i = 0; i < 128; i++)
- irq_map[i] = platform_get_irq(pdev, i);
-
for (i = 0; i < HISI_SAS_PHY_INT_NR; i++) {
- irq = irq_map[i + 1]; /* Phy up/down is irq1 */
+ irq = hisi_hba->irq_map[i + 1]; /* Phy up/down is irq1 */
rc = devm_request_irq(dev, irq, phy_interrupts[i], 0,
DRV_NAME " phy", hisi_hba);
if (rc) {
@@ -3331,7 +3350,7 @@ static int interrupt_init_v2_hw(struct hisi_hba *hisi_hba)
for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) {
struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
- irq = irq_map[phy_no + 72];
+ irq = hisi_hba->irq_map[phy_no + 72];
rc = devm_request_irq(dev, irq, sata_int_v2_hw, 0,
DRV_NAME " sata", phy);
if (rc) {
@@ -3343,7 +3362,7 @@ static int interrupt_init_v2_hw(struct hisi_hba *hisi_hba)
}
for (fatal_no = 0; fatal_no < HISI_SAS_FATAL_INT_NR; fatal_no++) {
- irq = irq_map[fatal_no + 81];
+ irq = hisi_hba->irq_map[fatal_no + 81];
rc = devm_request_irq(dev, irq, fatal_interrupts[fatal_no], 0,
DRV_NAME " fatal", hisi_hba);
if (rc) {
@@ -3354,24 +3373,22 @@ static int interrupt_init_v2_hw(struct hisi_hba *hisi_hba)
}
}
- for (queue_no = 0; queue_no < hisi_hba->queue_count; queue_no++) {
+ for (queue_no = 0; queue_no < hisi_hba->cq_nvecs; queue_no++) {
struct hisi_sas_cq *cq = &hisi_hba->cq[queue_no];
- cq->irq_no = irq_map[queue_no + 96];
+ cq->irq_no = hisi_hba->irq_map[queue_no + 96];
rc = devm_request_threaded_irq(dev, cq->irq_no,
cq_interrupt_v2_hw,
cq_thread_v2_hw, IRQF_ONESHOT,
DRV_NAME " cq", cq);
if (rc) {
dev_err(dev, "irq init: could not request cq interrupt %d, rc=%d\n",
- irq, rc);
+ cq->irq_no, rc);
rc = -ENOENT;
goto err_out;
}
+ cq->irq_mask = irq_get_affinity_mask(cq->irq_no);
}
-
- hisi_hba->cq_nvecs = hisi_hba->queue_count;
-
err_out:
return rc;
}
@@ -3529,6 +3546,26 @@ static struct device_attribute *host_attrs_v2_hw[] = {
NULL
};
+static int map_queues_v2_hw(struct Scsi_Host *shost)
+{
+ struct hisi_hba *hisi_hba = shost_priv(shost);
+ struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT];
+ const struct cpumask *mask;
+ unsigned int queue, cpu;
+
+ for (queue = 0; queue < qmap->nr_queues; queue++) {
+ mask = irq_get_affinity_mask(hisi_hba->irq_map[96 + queue]);
+ if (!mask)
+ continue;
+
+ for_each_cpu(cpu, mask)
+ qmap->mq_map[cpu] = qmap->queue_offset + queue;
+ }
+
+ return 0;
+
+}
+
static struct scsi_host_template sht_v2_hw = {
.name = DRV_NAME,
.proc_name = DRV_NAME,
@@ -3553,10 +3590,13 @@ static struct scsi_host_template sht_v2_hw = {
#endif
.shost_attrs = host_attrs_v2_hw,
.host_reset = hisi_sas_host_reset,
+ .map_queues = map_queues_v2_hw,
+ .host_tagset = 1,
};
static const struct hisi_sas_hw hisi_sas_v2_hw = {
.hw_init = hisi_sas_v2_init,
+ .interrupt_preinit = hisi_sas_v2_interrupt_preinit,
.setup_itct = setup_itct_v2_hw,
.slot_index_alloc = slot_index_alloc_quirk_v2_hw,
.alloc_dev = alloc_dev_quirk_v2_hw,
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index 42e4d35e0d35..65f168c41d23 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -1744,7 +1744,7 @@ static int ibmvfc_queuecommand_lck(struct scsi_cmnd *cmnd,
iu->pri_task_attr = IBMVFC_SIMPLE_TASK;
}
- vfc_cmd->correlation = cpu_to_be64(evt);
+ vfc_cmd->correlation = cpu_to_be64((u64)evt);
if (likely(!(rc = ibmvfc_map_sg_data(cmnd, evt, vfc_cmd, vhost->dev))))
return ibmvfc_send_event(evt, vhost, 0);
@@ -2418,7 +2418,7 @@ static int ibmvfc_abort_task_set(struct scsi_device *sdev)
tmf->flags = cpu_to_be16((IBMVFC_NO_MEM_DESC | IBMVFC_TMF));
evt->sync_iu = &rsp_iu;
- tmf->correlation = cpu_to_be64(evt);
+ tmf->correlation = cpu_to_be64((u64)evt);
init_completion(&evt->comp);
rsp_rc = ibmvfc_send_event(evt, vhost, default_timeout);
@@ -3007,8 +3007,10 @@ static int ibmvfc_slave_configure(struct scsi_device *sdev)
unsigned long flags = 0;
spin_lock_irqsave(shost->host_lock, flags);
- if (sdev->type == TYPE_DISK)
+ if (sdev->type == TYPE_DISK) {
sdev->allow_restart = 1;
+ blk_queue_rq_timeout(sdev->request_queue, 120 * HZ);
+ }
spin_unlock_irqrestore(shost->host_lock, flags);
return 0;
}
diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
index d71afae6191c..841000445b9a 100644
--- a/drivers/scsi/libfc/fc_exch.c
+++ b/drivers/scsi/libfc/fc_exch.c
@@ -1623,8 +1623,13 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
rc = fc_exch_done_locked(ep);
WARN_ON(fc_seq_exch(sp) != ep);
spin_unlock_bh(&ep->ex_lock);
- if (!rc)
+ if (!rc) {
fc_exch_delete(ep);
+ } else {
+ FC_EXCH_DBG(ep, "ep is completed already,"
+ "hence skip calling the resp\n");
+ goto skip_resp;
+ }
}
/*
@@ -1643,6 +1648,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
if (!fc_invoke_resp(ep, sp, fp))
fc_frame_free(fp);
+skip_resp:
fc_exch_release(ep);
return;
rel:
@@ -1899,10 +1905,16 @@ static void fc_exch_reset(struct fc_exch *ep)
fc_exch_hold(ep);
- if (!rc)
+ if (!rc) {
fc_exch_delete(ep);
+ } else {
+ FC_EXCH_DBG(ep, "ep is completed already,"
+ "hence skip calling the resp\n");
+ goto skip_resp;
+ }
fc_invoke_resp(ep, sp, ERR_PTR(-FC_EX_CLOSED));
+skip_resp:
fc_seq_set_resp(sp, NULL, ep->arg);
fc_exch_release(ep);
}
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 6e4bf05c6d77..63a4f48bdc75 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -37,6 +37,7 @@
#include <linux/poll.h>
#include <linux/vmalloc.h>
#include <linux/irq_poll.h>
+#include <linux/blk-mq-pci.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
@@ -113,6 +114,10 @@ unsigned int enable_sdev_max_qd;
module_param(enable_sdev_max_qd, int, 0444);
MODULE_PARM_DESC(enable_sdev_max_qd, "Enable sdev max qd as can_queue. Default: 0");
+int host_tagset_enable = 1;
+module_param(host_tagset_enable, int, 0444);
+MODULE_PARM_DESC(host_tagset_enable, "Shared host tagset enable/disable Default: enable(1)");
+
MODULE_LICENSE("GPL");
MODULE_VERSION(MEGASAS_VERSION);
MODULE_AUTHOR("[email protected]");
@@ -3119,6 +3124,19 @@ megasas_bios_param(struct scsi_device *sdev, struct block_device *bdev,
return 0;
}
+static int megasas_map_queues(struct Scsi_Host *shost)
+{
+ struct megasas_instance *instance;
+
+ instance = (struct megasas_instance *)shost->hostdata;
+
+ if (shost->nr_hw_queues == 1)
+ return 0;
+
+ return blk_mq_pci_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT],
+ instance->pdev, instance->low_latency_index_start);
+}
+
static void megasas_aen_polling(struct work_struct *work);
/**
@@ -3427,6 +3445,7 @@ static struct scsi_host_template megasas_template = {
.eh_timed_out = megasas_reset_timer,
.shost_attrs = megaraid_host_attrs,
.bios_param = megasas_bios_param,
+ .map_queues = megasas_map_queues,
.change_queue_depth = scsi_change_queue_depth,
.max_segment_size = 0xffffffff,
};
@@ -6808,6 +6827,26 @@ static int megasas_io_attach(struct megasas_instance *instance)
host->max_lun = MEGASAS_MAX_LUN;
host->max_cmd_len = 16;
+ /* Use shared host tagset only for fusion adaptors
+ * if there are managed interrupts (smp affinity enabled case).
+ * Single msix_vectors in kdump, so shared host tag is also disabled.
+ */
+
+ host->host_tagset = 0;
+ host->nr_hw_queues = 1;
+
+ if ((instance->adapter_type != MFI_SERIES) &&
+ (instance->msix_vectors > instance->low_latency_index_start) &&
+ host_tagset_enable &&
+ instance->smp_affinity_enable) {
+ host->host_tagset = 1;
+ host->nr_hw_queues = instance->msix_vectors -
+ instance->low_latency_index_start;
+ }
+
+ dev_info(&instance->pdev->dev,
+ "Max firmware commands: %d shared with nr_hw_queues = %d\n",
+ instance->max_fw_cmds, host->nr_hw_queues);
/*
* Notify the mid-layer about the new controller
*/
@@ -8205,11 +8244,9 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
goto out;
}
+ /* always store 64 bits regardless of addressing */
sense_ptr = (void *)cmd->frame + ioc->sense_off;
- if (instance->consistent_mask_64bit)
- put_unaligned_le64(sense_handle, sense_ptr);
- else
- put_unaligned_le32(sense_handle, sense_ptr);
+ put_unaligned_le64(sense_handle, sense_ptr);
}
/*
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index b0c01cf0428f..fd607287608e 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -359,24 +359,29 @@ megasas_get_msix_index(struct megasas_instance *instance,
{
int sdev_busy;
- /* nr_hw_queue = 1 for MegaRAID */
- struct blk_mq_hw_ctx *hctx =
- scmd->device->request_queue->queue_hw_ctx[0];
-
- sdev_busy = atomic_read(&hctx->nr_active);
+ /* TBD - if sml remove device_busy in future, driver
+ * should track counter in internal structure.
+ */
+ sdev_busy = atomic_read(&scmd->device->device_busy);
if (instance->perf_mode == MR_BALANCED_PERF_MODE &&
- sdev_busy > (data_arms * MR_DEVICE_HIGH_IOPS_DEPTH))
+ sdev_busy > (data_arms * MR_DEVICE_HIGH_IOPS_DEPTH)) {
cmd->request_desc->SCSIIO.MSIxIndex =
mega_mod64((atomic64_add_return(1, &instance->high_iops_outstanding) /
MR_HIGH_IOPS_BATCH_COUNT), instance->low_latency_index_start);
- else if (instance->msix_load_balance)
+ } else if (instance->msix_load_balance) {
cmd->request_desc->SCSIIO.MSIxIndex =
(mega_mod64(atomic64_add_return(1, &instance->total_io_count),
instance->msix_vectors));
- else
+ } else if (instance->host->nr_hw_queues > 1) {
+ u32 tag = blk_mq_unique_tag(scmd->request);
+
+ cmd->request_desc->SCSIIO.MSIxIndex = blk_mq_unique_tag_to_hwq(tag) +
+ instance->low_latency_index_start;
+ } else {
cmd->request_desc->SCSIIO.MSIxIndex =
instance->reply_map[raw_smp_processor_id()];
+ }
}
/**
@@ -956,9 +961,6 @@ megasas_alloc_cmds_fusion(struct megasas_instance *instance)
if (megasas_alloc_cmdlist_fusion(instance))
goto fail_exit;
- dev_info(&instance->pdev->dev, "Configured max firmware commands: %d\n",
- instance->max_fw_cmds);
-
/* The first 256 bytes (SMID 0) is not used. Don't add to the cmd list */
io_req_base = fusion->io_request_frames + MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
io_req_base_phys = fusion->io_request_frames_phys + MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
@@ -1102,8 +1104,9 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
MR_HIGH_IOPS_QUEUE_COUNT) && cur_intr_coalescing)
instance->perf_mode = MR_BALANCED_PERF_MODE;
- dev_info(&instance->pdev->dev, "Performance mode :%s\n",
- MEGASAS_PERF_MODE_2STR(instance->perf_mode));
+ dev_info(&instance->pdev->dev, "Performance mode :%s (latency index = %d)\n",
+ MEGASAS_PERF_MODE_2STR(instance->perf_mode),
+ instance->low_latency_index_start);
instance->fw_sync_cache_support = (scratch_pad_1 &
MR_CAN_HANDLE_SYNC_CACHE_OFFSET) ? 1 : 0;
diff --git a/drivers/scsi/mpt3sas/Kconfig b/drivers/scsi/mpt3sas/Kconfig
index 86209455172d..c299f7e078fb 100644
--- a/drivers/scsi/mpt3sas/Kconfig
+++ b/drivers/scsi/mpt3sas/Kconfig
@@ -79,5 +79,5 @@ config SCSI_MPT2SAS
select SCSI_MPT3SAS
depends on PCI && SCSI
help
- Dummy config option for backwards compatiblity: configure the MPT3SAS
+ Dummy config option for backwards compatibility: configure the MPT3SAS
driver instead.
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index 969baf4cd3f5..6e23dc3209fe 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -5034,7 +5034,7 @@ _base_check_for_trigger_pages_support(struct MPT3SAS_ADAPTER *ioc)
static void
_base_get_diag_triggers(struct MPT3SAS_ADAPTER *ioc)
{
- u16 trigger_flags;
+ int trigger_flags;
/*
* Default setting of master trigger.
diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
index f5fc7f518f8a..47ad64b06623 100644
--- a/drivers/scsi/qedi/qedi_main.c
+++ b/drivers/scsi/qedi/qedi_main.c
@@ -2245,7 +2245,7 @@ qedi_show_boot_tgt_info(struct qedi_ctx *qedi, int type,
chap_name);
break;
case ISCSI_BOOT_TGT_CHAP_SECRET:
- rc = sprintf(buf, "%.*s\n", NVM_ISCSI_CFG_CHAP_NAME_MAX_LEN,
+ rc = sprintf(buf, "%.*s\n", NVM_ISCSI_CFG_CHAP_PWD_MAX_LEN,
chap_secret);
break;
case ISCSI_BOOT_TGT_REV_CHAP_NAME:
@@ -2253,7 +2253,7 @@ qedi_show_boot_tgt_info(struct qedi_ctx *qedi, int type,
mchap_name);
break;
case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
- rc = sprintf(buf, "%.*s\n", NVM_ISCSI_CFG_CHAP_NAME_MAX_LEN,
+ rc = sprintf(buf, "%.*s\n", NVM_ISCSI_CFG_CHAP_PWD_MAX_LEN,
mchap_secret);
break;
case ISCSI_BOOT_TGT_FLAGS:
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 24c0f7ec0351..4a08c450b756 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -6740,7 +6740,7 @@ static int __init scsi_debug_init(void)
k = sdeb_zbc_model_str(sdeb_zbc_model_s);
if (k < 0) {
ret = k;
- goto free_vm;
+ goto free_q_arr;
}
sdeb_zbc_model = k;
switch (sdeb_zbc_model) {
@@ -6753,7 +6753,8 @@ static int __init scsi_debug_init(void)
break;
default:
pr_err("Invalid ZBC model\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto free_q_arr;
}
}
if (sdeb_zbc_model != BLK_ZONED_NONE) {
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 4848ae3c7b56..b3f14f05340a 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -249,7 +249,8 @@ int __scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
req = blk_get_request(sdev->request_queue,
data_direction == DMA_TO_DEVICE ?
- REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, BLK_MQ_REQ_PREEMPT);
+ REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN,
+ rq_flags & RQF_PM ? BLK_MQ_REQ_PM : 0);
if (IS_ERR(req))
return ret;
rq = scsi_req(req);
@@ -1206,6 +1207,8 @@ static blk_status_t
scsi_device_state_check(struct scsi_device *sdev, struct request *req)
{
switch (sdev->sdev_state) {
+ case SDEV_CREATED:
+ return BLK_STS_OK;
case SDEV_OFFLINE:
case SDEV_TRANSPORT_OFFLINE:
/*
@@ -1232,18 +1235,18 @@ scsi_device_state_check(struct scsi_device *sdev, struct request *req)
return BLK_STS_RESOURCE;
case SDEV_QUIESCE:
/*
- * If the devices is blocked we defer normal commands.
+ * If the device is blocked we only accept power management
+ * commands.
*/
- if (req && !(req->rq_flags & RQF_PREEMPT))
+ if (req && WARN_ON_ONCE(!(req->rq_flags & RQF_PM)))
return BLK_STS_RESOURCE;
return BLK_STS_OK;
default:
/*
* For any other not fully online state we only allow
- * special commands. In particular any user initiated
- * command is not allowed.
+ * power management commands.
*/
- if (req && !(req->rq_flags & RQF_PREEMPT))
+ if (req && !(req->rq_flags & RQF_PM))
return BLK_STS_IOERR;
return BLK_STS_OK;
}
@@ -2516,15 +2519,13 @@ void sdev_evt_send_simple(struct scsi_device *sdev,
EXPORT_SYMBOL_GPL(sdev_evt_send_simple);
/**
- * scsi_device_quiesce - Block user issued commands.
+ * scsi_device_quiesce - Block all commands except power management.
* @sdev: scsi device to quiesce.
*
* This works by trying to transition to the SDEV_QUIESCE state
* (which must be a legal transition). When the device is in this
- * state, only special requests will be accepted, all others will
- * be deferred. Since special requests may also be requeued requests,
- * a successful return doesn't guarantee the device will be
- * totally quiescent.
+ * state, only power management requests will be accepted, all others will
+ * be deferred.
*
* Must be called with user context, may sleep.
*
@@ -2586,12 +2587,12 @@ void scsi_device_resume(struct scsi_device *sdev)
* device deleted during suspend)
*/
mutex_lock(&sdev->state_mutex);
+ if (sdev->sdev_state == SDEV_QUIESCE)
+ scsi_device_set_state(sdev, SDEV_RUNNING);
if (sdev->quiesced_by) {
sdev->quiesced_by = NULL;
blk_clear_pm_only(sdev->request_queue);
}
- if (sdev->sdev_state == SDEV_QUIESCE)
- scsi_device_set_state(sdev, SDEV_RUNNING);
mutex_unlock(&sdev->state_mutex);
}
EXPORT_SYMBOL(scsi_device_resume);
diff --git a/drivers/scsi/scsi_transport_spi.c b/drivers/scsi/scsi_transport_spi.c
index f3d5b1bbd5aa..c37dd15d16d2 100644
--- a/drivers/scsi/scsi_transport_spi.c
+++ b/drivers/scsi/scsi_transport_spi.c
@@ -117,12 +117,16 @@ static int spi_execute(struct scsi_device *sdev, const void *cmd,
sshdr = &sshdr_tmp;
for(i = 0; i < DV_RETRIES; i++) {
+ /*
+ * The purpose of the RQF_PM flag below is to bypass the
+ * SDEV_QUIESCE state.
+ */
result = scsi_execute(sdev, cmd, dir, buffer, bufflen, sense,
sshdr, DV_TIMEOUT, /* retries */ 1,
REQ_FAILFAST_DEV |
REQ_FAILFAST_TRANSPORT |
REQ_FAILFAST_DRIVER,
- 0, NULL);
+ RQF_PM, NULL);
if (driver_byte(result) != DRIVER_SENSE ||
sshdr->sense_key != UNIT_ATTENTION)
break;
@@ -1005,23 +1009,26 @@ spi_dv_device(struct scsi_device *sdev)
*/
lock_system_sleep();
+ if (scsi_autopm_get_device(sdev))
+ goto unlock_system_sleep;
+
if (unlikely(spi_dv_in_progress(starget)))
- goto unlock;
+ goto put_autopm;
if (unlikely(scsi_device_get(sdev)))
- goto unlock;
+ goto put_autopm;
spi_dv_in_progress(starget) = 1;
buffer = kzalloc(len, GFP_KERNEL);
if (unlikely(!buffer))
- goto out_put;
+ goto put_sdev;
/* We need to verify that the actual device will quiesce; the
* later target quiesce is just a nice to have */
if (unlikely(scsi_device_quiesce(sdev)))
- goto out_free;
+ goto free_buffer;
scsi_target_quiesce(starget);
@@ -1041,12 +1048,16 @@ spi_dv_device(struct scsi_device *sdev)
spi_initial_dv(starget) = 1;
- out_free:
+free_buffer:
kfree(buffer);
- out_put:
+
+put_sdev:
spi_dv_in_progress(starget) = 0;
scsi_device_put(sdev);
-unlock:
+put_autopm:
+ scsi_autopm_put_device(sdev);
+
+unlock_system_sleep:
unlock_system_sleep();
}
EXPORT_SYMBOL(spi_dv_device);
diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
index cba1cf6a1c12..1e939a2a387f 100644
--- a/drivers/scsi/scsi_transport_srp.c
+++ b/drivers/scsi/scsi_transport_srp.c
@@ -541,7 +541,14 @@ int srp_reconnect_rport(struct srp_rport *rport)
res = mutex_lock_interruptible(&rport->mutex);
if (res)
goto out;
- scsi_target_block(&shost->shost_gendev);
+ if (rport->state != SRP_RPORT_FAIL_FAST)
+ /*
+ * sdev state must be SDEV_TRANSPORT_OFFLINE, transition
+ * to SDEV_BLOCK is illegal. Calling scsi_target_unblock()
+ * later is ok though, scsi_internal_device_unblock_nowait()
+ * treats SDEV_TRANSPORT_OFFLINE like SDEV_BLOCK.
+ */
+ scsi_target_block(&shost->shost_gendev);
res = rport->state != SRP_RPORT_LOST ? i->f->reconnect(rport) : -ENODEV;
pr_debug("%s (state %d): transport.reconnect() returned %d\n",
dev_name(&shost->shost_gendev), rport->state, res);
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 679c2c025047..a3d2d4bc4a3d 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -984,8 +984,10 @@ static blk_status_t sd_setup_write_zeroes_cmnd(struct scsi_cmnd *cmd)
}
}
- if (sdp->no_write_same)
+ if (sdp->no_write_same) {
+ rq->rq_flags |= RQF_QUIET;
return BLK_STS_TARGET;
+ }
if (sdkp->ws16 || lba > 0xffffffff || nr_blocks > 0xffff)
return sd_setup_write_same16_cmnd(cmd, false);
@@ -3510,10 +3512,8 @@ static int sd_probe(struct device *dev)
static int sd_remove(struct device *dev)
{
struct scsi_disk *sdkp;
- dev_t devt;
sdkp = dev_get_drvdata(dev);
- devt = disk_devt(sdkp->disk);
scsi_autopm_get_device(sdkp->device);
async_synchronize_full_domain(&scsi_sd_pm_domain);
diff --git a/drivers/scsi/ufs/Kconfig b/drivers/scsi/ufs/Kconfig
index 3f6dfed4fe84..b915b38c2b27 100644
--- a/drivers/scsi/ufs/Kconfig
+++ b/drivers/scsi/ufs/Kconfig
@@ -72,6 +72,7 @@ config SCSI_UFS_DWC_TC_PCI
config SCSI_UFSHCD_PLATFORM
tristate "Platform bus based UFS Controller support"
depends on SCSI_UFSHCD
+ depends on HAS_IOMEM
help
This selects the UFS host controller support. Select this if
you have an UFS controller on Platform bus.
diff --git a/drivers/scsi/ufs/ufs-mediatek-trace.h b/drivers/scsi/ufs/ufs-mediatek-trace.h
index fd6f84c1b4e2..895e82ea6ece 100644
--- a/drivers/scsi/ufs/ufs-mediatek-trace.h
+++ b/drivers/scsi/ufs/ufs-mediatek-trace.h
@@ -31,6 +31,6 @@ TRACE_EVENT(ufs_mtk_event,
#undef TRACE_INCLUDE_PATH
#undef TRACE_INCLUDE_FILE
-#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_PATH ../../drivers/scsi/ufs/
#define TRACE_INCLUDE_FILE ufs-mediatek-trace
#include <trace/define_trace.h>
diff --git a/drivers/scsi/ufs/ufs-mediatek.c b/drivers/scsi/ufs/ufs-mediatek.c
index 3522458db3bb..80618af7c872 100644
--- a/drivers/scsi/ufs/ufs-mediatek.c
+++ b/drivers/scsi/ufs/ufs-mediatek.c
@@ -70,6 +70,13 @@ static bool ufs_mtk_is_va09_supported(struct ufs_hba *hba)
return !!(host->caps & UFS_MTK_CAP_VA09_PWR_CTRL);
}
+static bool ufs_mtk_is_broken_vcc(struct ufs_hba *hba)
+{
+ struct ufs_mtk_host *host = ufshcd_get_variant(hba);
+
+ return !!(host->caps & UFS_MTK_CAP_BROKEN_VCC);
+}
+
static void ufs_mtk_cfg_unipro_cg(struct ufs_hba *hba, bool enable)
{
u32 tmp;
@@ -514,6 +521,9 @@ static void ufs_mtk_init_host_caps(struct ufs_hba *hba)
if (of_property_read_bool(np, "mediatek,ufs-disable-ah8"))
host->caps |= UFS_MTK_CAP_DISABLE_AH8;
+ if (of_property_read_bool(np, "mediatek,ufs-broken-vcc"))
+ host->caps |= UFS_MTK_CAP_BROKEN_VCC;
+
dev_info(hba->dev, "caps: 0x%x", host->caps);
}
@@ -1003,6 +1013,17 @@ static int ufs_mtk_apply_dev_quirks(struct ufs_hba *hba)
static void ufs_mtk_fixup_dev_quirks(struct ufs_hba *hba)
{
ufshcd_fixup_dev_quirks(hba, ufs_mtk_dev_fixups);
+
+ if (ufs_mtk_is_broken_vcc(hba) && hba->vreg_info.vcc &&
+ (hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_AFTER_LPM)) {
+ hba->vreg_info.vcc->always_on = true;
+ /*
+ * VCC will be kept always-on thus we don't
+ * need any delay during regulator operations
+ */
+ hba->dev_quirks &= ~(UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM |
+ UFS_DEVICE_QUIRK_DELAY_AFTER_LPM);
+ }
}
static void ufs_mtk_event_notify(struct ufs_hba *hba,
diff --git a/drivers/scsi/ufs/ufs-mediatek.h b/drivers/scsi/ufs/ufs-mediatek.h
index 93d35097dfb0..3f0d3bb769e8 100644
--- a/drivers/scsi/ufs/ufs-mediatek.h
+++ b/drivers/scsi/ufs/ufs-mediatek.h
@@ -81,6 +81,7 @@ enum ufs_mtk_host_caps {
UFS_MTK_CAP_BOOST_CRYPT_ENGINE = 1 << 0,
UFS_MTK_CAP_VA09_PWR_CTRL = 1 << 1,
UFS_MTK_CAP_DISABLE_AH8 = 1 << 2,
+ UFS_MTK_CAP_BROKEN_VCC = 1 << 3,
};
struct ufs_mtk_crypt_cfg {
diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h
index d593edb48767..14dfda735adf 100644
--- a/drivers/scsi/ufs/ufs.h
+++ b/drivers/scsi/ufs/ufs.h
@@ -330,7 +330,6 @@ enum {
UFS_DEV_WRITE_BOOSTER_SUP = BIT(8),
};
-#define POWER_DESC_MAX_SIZE 0x62
#define POWER_DESC_MAX_ACTV_ICC_LVLS 16
/* Attribute bActiveICCLevel parameter bit masks definitions */
@@ -513,6 +512,7 @@ struct ufs_query_res {
struct ufs_vreg {
struct regulator *reg;
const char *name;
+ bool always_on;
bool enabled;
int min_uV;
int max_uV;
diff --git a/drivers/scsi/ufs/ufshcd-pci.c b/drivers/scsi/ufs/ufshcd-pci.c
index df3a564c3e33..fadd566025b8 100644
--- a/drivers/scsi/ufs/ufshcd-pci.c
+++ b/drivers/scsi/ufs/ufshcd-pci.c
@@ -148,6 +148,8 @@ static int ufs_intel_common_init(struct ufs_hba *hba)
{
struct intel_host *host;
+ hba->caps |= UFSHCD_CAP_RPM_AUTOSUSPEND;
+
host = devm_kzalloc(hba->dev, sizeof(*host), GFP_KERNEL);
if (!host)
return -ENOMEM;
@@ -163,6 +165,41 @@ static void ufs_intel_common_exit(struct ufs_hba *hba)
intel_ltr_hide(hba->dev);
}
+static int ufs_intel_resume(struct ufs_hba *hba, enum ufs_pm_op op)
+{
+ /*
+ * To support S4 (suspend-to-disk) with spm_lvl other than 5, the base
+ * address registers must be restored because the restore kernel can
+ * have used different addresses.
+ */
+ ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr),
+ REG_UTP_TRANSFER_REQ_LIST_BASE_L);
+ ufshcd_writel(hba, upper_32_bits(hba->utrdl_dma_addr),
+ REG_UTP_TRANSFER_REQ_LIST_BASE_H);
+ ufshcd_writel(hba, lower_32_bits(hba->utmrdl_dma_addr),
+ REG_UTP_TASK_REQ_LIST_BASE_L);
+ ufshcd_writel(hba, upper_32_bits(hba->utmrdl_dma_addr),
+ REG_UTP_TASK_REQ_LIST_BASE_H);
+
+ if (ufshcd_is_link_hibern8(hba)) {
+ int ret = ufshcd_uic_hibern8_exit(hba);
+
+ if (!ret) {
+ ufshcd_set_link_active(hba);
+ } else {
+ dev_err(hba->dev, "%s: hibern8 exit failed %d\n",
+ __func__, ret);
+ /*
+ * Force reset and restore. Any other actions can lead
+ * to an unrecoverable state.
+ */
+ ufshcd_set_link_off(hba);
+ }
+ }
+
+ return 0;
+}
+
static int ufs_intel_ehl_init(struct ufs_hba *hba)
{
hba->quirks |= UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8;
@@ -174,6 +211,7 @@ static struct ufs_hba_variant_ops ufs_intel_cnl_hba_vops = {
.init = ufs_intel_common_init,
.exit = ufs_intel_common_exit,
.link_startup_notify = ufs_intel_link_startup_notify,
+ .resume = ufs_intel_resume,
};
static struct ufs_hba_variant_ops ufs_intel_ehl_hba_vops = {
@@ -181,6 +219,7 @@ static struct ufs_hba_variant_ops ufs_intel_ehl_hba_vops = {
.init = ufs_intel_ehl_init,
.exit = ufs_intel_common_exit,
.link_startup_notify = ufs_intel_link_startup_notify,
+ .resume = ufs_intel_resume,
};
#ifdef CONFIG_PM_SLEEP
@@ -207,6 +246,30 @@ static int ufshcd_pci_resume(struct device *dev)
{
return ufshcd_system_resume(dev_get_drvdata(dev));
}
+
+/**
+ * ufshcd_pci_poweroff - suspend-to-disk poweroff function
+ * @dev: pointer to PCI device handle
+ *
+ * Returns 0 if successful
+ * Returns non-zero otherwise
+ */
+static int ufshcd_pci_poweroff(struct device *dev)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ int spm_lvl = hba->spm_lvl;
+ int ret;
+
+ /*
+ * For poweroff we need to set the UFS device to PowerDown mode.
+ * Force spm_lvl to ensure that.
+ */
+ hba->spm_lvl = 5;
+ ret = ufshcd_system_suspend(hba);
+ hba->spm_lvl = spm_lvl;
+ return ret;
+}
+
#endif /* !CONFIG_PM_SLEEP */
#ifdef CONFIG_PM
@@ -302,8 +365,14 @@ ufshcd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
static const struct dev_pm_ops ufshcd_pci_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(ufshcd_pci_suspend,
- ufshcd_pci_resume)
+#ifdef CONFIG_PM_SLEEP
+ .suspend = ufshcd_pci_suspend,
+ .resume = ufshcd_pci_resume,
+ .freeze = ufshcd_pci_suspend,
+ .thaw = ufshcd_pci_resume,
+ .poweroff = ufshcd_pci_poweroff,
+ .restore = ufshcd_pci_resume,
+#endif
SET_RUNTIME_PM_OPS(ufshcd_pci_runtime_suspend,
ufshcd_pci_runtime_resume,
ufshcd_pci_runtime_idle)
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 9902b7e3aa4a..fb32d122f2e3 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -225,6 +225,7 @@ static int ufshcd_reset_and_restore(struct ufs_hba *hba);
static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd);
static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag);
static void ufshcd_hba_exit(struct ufs_hba *hba);
+static int ufshcd_clear_ua_wluns(struct ufs_hba *hba);
static int ufshcd_probe_hba(struct ufs_hba *hba, bool async);
static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on);
static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba);
@@ -288,7 +289,8 @@ static inline void ufshcd_wb_config(struct ufs_hba *hba)
if (ret)
dev_err(hba->dev, "%s: En WB flush during H8: failed: %d\n",
__func__, ret);
- ufshcd_wb_toggle_flush(hba, true);
+ if (!(hba->quirks & UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL))
+ ufshcd_wb_toggle_flush(hba, true);
}
static void ufshcd_scsi_unblock_requests(struct ufs_hba *hba)
@@ -580,6 +582,23 @@ static void ufshcd_print_pwr_info(struct ufs_hba *hba)
hba->pwr_info.hs_rate);
}
+static void ufshcd_device_reset(struct ufs_hba *hba)
+{
+ int err;
+
+ err = ufshcd_vops_device_reset(hba);
+
+ if (!err) {
+ ufshcd_set_ufs_dev_active(hba);
+ if (ufshcd_is_wb_allowed(hba)) {
+ hba->wb_enabled = false;
+ hba->wb_buf_flush_enabled = false;
+ }
+ }
+ if (err != -EOPNOTSUPP)
+ ufshcd_update_evt_hist(hba, UFS_EVT_DEV_RESET, err);
+}
+
void ufshcd_delay_us(unsigned long us, unsigned long tolerance)
{
if (!us)
@@ -3665,7 +3684,7 @@ static int ufshcd_dme_enable(struct ufs_hba *hba)
ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
if (ret)
dev_err(hba->dev,
- "dme-reset: error code %d\n", ret);
+ "dme-enable: error code %d\n", ret);
return ret;
}
@@ -3964,7 +3983,7 @@ int ufshcd_link_recovery(struct ufs_hba *hba)
spin_unlock_irqrestore(hba->host->host_lock, flags);
/* Reset the attached device */
- ufshcd_vops_device_reset(hba);
+ ufshcd_device_reset(hba);
ret = ufshcd_host_reset_and_restore(hba);
@@ -3977,6 +3996,8 @@ int ufshcd_link_recovery(struct ufs_hba *hba)
if (ret)
dev_err(hba->dev, "%s: link recovery failed, err %d",
__func__, ret);
+ else
+ ufshcd_clear_ua_wluns(hba);
return ret;
}
@@ -4973,7 +4994,8 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
break;
} /* end of switch */
- if ((host_byte(result) != DID_OK) && !hba->silence_err_logs)
+ if ((host_byte(result) != DID_OK) &&
+ (host_byte(result) != DID_REQUEUE) && !hba->silence_err_logs)
ufshcd_print_trs(hba, 1 << lrbp->task_tag, true);
return result;
}
@@ -5418,9 +5440,6 @@ static int ufshcd_wb_toggle_flush_during_h8(struct ufs_hba *hba, bool set)
static inline void ufshcd_wb_toggle_flush(struct ufs_hba *hba, bool enable)
{
- if (hba->quirks & UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL)
- return;
-
if (enable)
ufshcd_wb_buf_flush_enable(hba);
else
@@ -5985,6 +6004,9 @@ skip_err_handling:
ufshcd_scsi_unblock_requests(hba);
ufshcd_err_handling_unprepare(hba);
up(&hba->eh_sem);
+
+ if (!err && needs_reset)
+ ufshcd_clear_ua_wluns(hba);
}
/**
@@ -6279,9 +6301,13 @@ static irqreturn_t ufshcd_intr(int irq, void *__hba)
intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
}
- if (enabled_intr_status && retval == IRQ_NONE) {
- dev_err(hba->dev, "%s: Unhandled interrupt 0x%08x\n",
- __func__, intr_status);
+ if (enabled_intr_status && retval == IRQ_NONE &&
+ !ufshcd_eh_in_progress(hba)) {
+ dev_err(hba->dev, "%s: Unhandled interrupt 0x%08x (0x%08x, 0x%08x)\n",
+ __func__,
+ intr_status,
+ hba->ufs_stats.last_intr_status,
+ enabled_intr_status);
ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: ");
}
@@ -6325,7 +6351,10 @@ static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
* Even though we use wait_event() which sleeps indefinitely,
* the maximum wait time is bounded by %TM_CMD_TIMEOUT.
*/
- req = blk_get_request(q, REQ_OP_DRV_OUT, BLK_MQ_REQ_RESERVED);
+ req = blk_get_request(q, REQ_OP_DRV_OUT, 0);
+ if (IS_ERR(req))
+ return PTR_ERR(req);
+
req->end_io_data = &wait;
free_slot = req->tag;
WARN_ON_ONCE(free_slot < 0 || free_slot >= hba->nutmrs);
@@ -6643,19 +6672,16 @@ static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
{
struct Scsi_Host *host;
struct ufs_hba *hba;
- unsigned int tag;
u32 pos;
int err;
- u8 resp = 0xF;
- struct ufshcd_lrb *lrbp;
+ u8 resp = 0xF, lun;
unsigned long flags;
host = cmd->device->host;
hba = shost_priv(host);
- tag = cmd->request->tag;
- lrbp = &hba->lrb[tag];
- err = ufshcd_issue_tm_cmd(hba, lrbp->lun, 0, UFS_LOGICAL_RESET, &resp);
+ lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun);
+ err = ufshcd_issue_tm_cmd(hba, lun, 0, UFS_LOGICAL_RESET, &resp);
if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
if (!err)
err = resp;
@@ -6664,7 +6690,7 @@ static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
/* clear the commands that were pending for corresponding LUN */
for_each_set_bit(pos, &hba->outstanding_reqs, hba->nutrs) {
- if (hba->lrb[pos].lun == lrbp->lun) {
+ if (hba->lrb[pos].lun == lun) {
err = ufshcd_clear_cmd(hba, pos);
if (err)
break;
@@ -6925,13 +6951,11 @@ static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
ufshcd_set_clk_freq(hba, true);
err = ufshcd_hba_enable(hba);
- if (err)
- goto out;
/* Establish the link again and restore the device */
- err = ufshcd_probe_hba(hba, false);
+ if (!err)
+ err = ufshcd_probe_hba(hba, false);
-out:
if (err)
dev_err(hba->dev, "%s: Host init failed %d\n", __func__, err);
ufshcd_update_evt_hist(hba, UFS_EVT_HOST_RESET, (u32)err);
@@ -6968,7 +6992,7 @@ static int ufshcd_reset_and_restore(struct ufs_hba *hba)
do {
/* Reset the attached device */
- ufshcd_vops_device_reset(hba);
+ ufshcd_device_reset(hba);
err = ufshcd_host_reset_and_restore(hba);
} while (err && --retries);
@@ -7702,6 +7726,8 @@ static int ufshcd_add_lus(struct ufs_hba *hba)
if (ret)
goto out;
+ ufshcd_clear_ua_wluns(hba);
+
/* Initialize devfreq after UFS device is detected */
if (ufshcd_is_clkscaling_supported(hba)) {
memcpy(&hba->clk_scaling.saved_pwr_info.info,
@@ -7903,8 +7929,6 @@ out:
pm_runtime_put_sync(hba->dev);
ufshcd_exit_clk_scaling(hba);
ufshcd_hba_exit(hba);
- } else {
- ufshcd_clear_ua_wluns(hba);
}
}
@@ -8045,7 +8069,7 @@ static int ufshcd_disable_vreg(struct device *dev, struct ufs_vreg *vreg)
{
int ret = 0;
- if (!vreg || !vreg->enabled)
+ if (!vreg || !vreg->enabled || vreg->always_on)
goto out;
ret = regulator_disable(vreg->reg);
@@ -8414,13 +8438,7 @@ static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
* handling context.
*/
hba->host->eh_noresume = 1;
- if (hba->wlun_dev_clr_ua) {
- ret = ufshcd_send_request_sense(hba, sdp);
- if (ret)
- goto out;
- /* Unit attention condition is cleared now */
- hba->wlun_dev_clr_ua = false;
- }
+ ufshcd_clear_ua_wluns(hba);
cmd[4] = pwr_mode << 4;
@@ -8441,7 +8459,7 @@ static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
if (!ret)
hba->curr_dev_pwr_mode = pwr_mode;
-out:
+
scsi_device_put(sdp);
hba->host->eh_noresume = 0;
return ret;
@@ -8685,6 +8703,8 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
ufshcd_wb_need_flush(hba));
}
+ flush_work(&hba->eeh_work);
+
if (req_dev_pwr_mode != hba->curr_dev_pwr_mode) {
if (!ufshcd_is_runtime_pm(pm_op))
/* ensure that bkops is disabled */
@@ -8697,8 +8717,6 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
}
}
- flush_work(&hba->eeh_work);
-
/*
* In the case of DeepSleep, the device is expected to remain powered
* with the link off, so do not check for bkops.
@@ -8747,7 +8765,7 @@ set_link_active:
* further below.
*/
if (ufshcd_is_ufs_dev_deepsleep(hba)) {
- ufshcd_vops_device_reset(hba);
+ ufshcd_device_reset(hba);
WARN_ON(!ufshcd_is_link_off(hba));
}
if (ufshcd_is_link_hibern8(hba) && !ufshcd_uic_hibern8_exit(hba))
@@ -8757,7 +8775,7 @@ set_link_active:
set_dev_active:
/* Can also get here needing to exit DeepSleep */
if (ufshcd_is_ufs_dev_deepsleep(hba)) {
- ufshcd_vops_device_reset(hba);
+ ufshcd_device_reset(hba);
ufshcd_host_reset_and_restore(hba);
}
if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE))
@@ -8767,6 +8785,7 @@ enable_gating:
ufshcd_resume_clkscaling(hba);
hba->clk_gating.is_suspended = false;
hba->dev_info.b_rpm_dev_flush_capable = false;
+ ufshcd_clear_ua_wluns(hba);
ufshcd_release(hba);
out:
if (hba->dev_info.b_rpm_dev_flush_capable) {
@@ -8877,6 +8896,8 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
cancel_delayed_work(&hba->rpm_dev_flush_recheck_work);
}
+ ufshcd_clear_ua_wluns(hba);
+
/* Schedule clock gating in case of no access to UFS device yet */
ufshcd_release(hba);
@@ -8925,7 +8946,8 @@ int ufshcd_system_suspend(struct ufs_hba *hba)
if ((ufs_get_pm_lvl_to_dev_pwr_mode(hba->spm_lvl) ==
hba->curr_dev_pwr_mode) &&
(ufs_get_pm_lvl_to_link_pwr_state(hba->spm_lvl) ==
- hba->uic_link_state))
+ hba->uic_link_state) &&
+ !hba->dev_info.b_rpm_dev_flush_capable)
goto out;
if (pm_runtime_suspended(hba->dev)) {
@@ -9353,7 +9375,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
}
/* Reset the attached device */
- ufshcd_vops_device_reset(hba);
+ ufshcd_device_reset(hba);
ufshcd_init_crypto(hba);
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index f8c2467dc014..aa9ea3552323 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -1218,16 +1218,12 @@ static inline void ufshcd_vops_dbg_register_dump(struct ufs_hba *hba)
hba->vops->dbg_register_dump(hba);
}
-static inline void ufshcd_vops_device_reset(struct ufs_hba *hba)
+static inline int ufshcd_vops_device_reset(struct ufs_hba *hba)
{
- if (hba->vops && hba->vops->device_reset) {
- int err = hba->vops->device_reset(hba);
-
- if (!err)
- ufshcd_set_ufs_dev_active(hba);
- if (err != -EOPNOTSUPP)
- ufshcd_update_evt_hist(hba, UFS_EVT_DEV_RESET, err);
- }
+ if (hba->vops && hba->vops->device_reset)
+ return hba->vops->device_reset(hba);
+
+ return -EOPNOTSUPP;
}
static inline void ufshcd_vops_config_scaling_param(struct ufs_hba *hba,
diff --git a/drivers/sh/intc/core.c b/drivers/sh/intc/core.c
index f8e070d67fa3..a14684ffe4c1 100644
--- a/drivers/sh/intc/core.c
+++ b/drivers/sh/intc/core.c
@@ -214,7 +214,7 @@ int __init register_intc_controller(struct intc_desc *desc)
d->window[k].phys = res->start;
d->window[k].size = resource_size(res);
d->window[k].virt = ioremap(res->start,
- resource_size(res));
+ resource_size(res));
if (!d->window[k].virt)
goto err2;
}
diff --git a/drivers/sh/intc/virq-debugfs.c b/drivers/sh/intc/virq-debugfs.c
index 9e62ba9311f0..939915a07d99 100644
--- a/drivers/sh/intc/virq-debugfs.c
+++ b/drivers/sh/intc/virq-debugfs.c
@@ -16,7 +16,7 @@
#include <linux/debugfs.h>
#include "internals.h"
-static int intc_irq_xlate_debug(struct seq_file *m, void *priv)
+static int intc_irq_xlate_show(struct seq_file *m, void *priv)
{
int i;
@@ -37,17 +37,7 @@ static int intc_irq_xlate_debug(struct seq_file *m, void *priv)
return 0;
}
-static int intc_irq_xlate_open(struct inode *inode, struct file *file)
-{
- return single_open(file, intc_irq_xlate_debug, inode->i_private);
-}
-
-static const struct file_operations intc_irq_xlate_fops = {
- .open = intc_irq_xlate_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(intc_irq_xlate);
static int __init intc_irq_xlate_init(void)
{
diff --git a/drivers/soc/litex/litex_soc_ctrl.c b/drivers/soc/litex/litex_soc_ctrl.c
index 1217cafdfd4d..9b0766384570 100644
--- a/drivers/soc/litex/litex_soc_ctrl.c
+++ b/drivers/soc/litex/litex_soc_ctrl.c
@@ -140,12 +140,13 @@ struct litex_soc_ctrl_device {
void __iomem *base;
};
+#ifdef CONFIG_OF
static const struct of_device_id litex_soc_ctrl_of_match[] = {
{.compatible = "litex,soc-controller"},
{},
};
-
MODULE_DEVICE_TABLE(of, litex_soc_ctrl_of_match);
+#endif /* CONFIG_OF */
static int litex_soc_ctrl_probe(struct platform_device *pdev)
{
diff --git a/drivers/spi/spi-altera.c b/drivers/spi/spi-altera.c
index 809bfff3690a..cbc4c28c1541 100644
--- a/drivers/spi/spi-altera.c
+++ b/drivers/spi/spi-altera.c
@@ -189,24 +189,26 @@ static int altera_spi_txrx(struct spi_master *master,
/* send the first byte */
altera_spi_tx_word(hw);
- } else {
- while (hw->count < hw->len) {
- altera_spi_tx_word(hw);
- for (;;) {
- altr_spi_readl(hw, ALTERA_SPI_STATUS, &val);
- if (val & ALTERA_SPI_STATUS_RRDY_MSK)
- break;
+ return 1;
+ }
+
+ while (hw->count < hw->len) {
+ altera_spi_tx_word(hw);
- cpu_relax();
- }
+ for (;;) {
+ altr_spi_readl(hw, ALTERA_SPI_STATUS, &val);
+ if (val & ALTERA_SPI_STATUS_RRDY_MSK)
+ break;
- altera_spi_rx_word(hw);
+ cpu_relax();
}
- spi_finalize_current_transfer(master);
+
+ altera_spi_rx_word(hw);
}
+ spi_finalize_current_transfer(master);
- return t->len;
+ return 0;
}
static irqreturn_t altera_spi_irq(int irq, void *dev)
diff --git a/drivers/spi/spi-cadence.c b/drivers/spi/spi-cadence.c
index 70467b9d61ba..a3afd1b9ac56 100644
--- a/drivers/spi/spi-cadence.c
+++ b/drivers/spi/spi-cadence.c
@@ -115,6 +115,7 @@ struct cdns_spi {
void __iomem *regs;
struct clk *ref_clk;
struct clk *pclk;
+ unsigned int clk_rate;
u32 speed_hz;
const u8 *txbuf;
u8 *rxbuf;
@@ -250,7 +251,7 @@ static void cdns_spi_config_clock_freq(struct spi_device *spi,
u32 ctrl_reg, baud_rate_val;
unsigned long frequency;
- frequency = clk_get_rate(xspi->ref_clk);
+ frequency = xspi->clk_rate;
ctrl_reg = cdns_spi_read(xspi, CDNS_SPI_CR);
@@ -558,8 +559,9 @@ static int cdns_spi_probe(struct platform_device *pdev)
master->auto_runtime_pm = true;
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
+ xspi->clk_rate = clk_get_rate(xspi->ref_clk);
/* Set to default valid value */
- master->max_speed_hz = clk_get_rate(xspi->ref_clk) / 4;
+ master->max_speed_hz = xspi->clk_rate / 4;
xspi->speed_hz = master->max_speed_hz;
master->bits_per_word_mask = SPI_BPW_MASK(8);
diff --git a/drivers/spi/spi-fsl-spi.c b/drivers/spi/spi-fsl-spi.c
index 9494257e1c33..6d8e0a05a535 100644
--- a/drivers/spi/spi-fsl-spi.c
+++ b/drivers/spi/spi-fsl-spi.c
@@ -115,14 +115,13 @@ static void fsl_spi_chipselect(struct spi_device *spi, int value)
{
struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master);
struct fsl_spi_platform_data *pdata;
- bool pol = spi->mode & SPI_CS_HIGH;
struct spi_mpc8xxx_cs *cs = spi->controller_state;
pdata = spi->dev.parent->parent->platform_data;
if (value == BITBANG_CS_INACTIVE) {
if (pdata->cs_control)
- pdata->cs_control(spi, !pol);
+ pdata->cs_control(spi, false);
}
if (value == BITBANG_CS_ACTIVE) {
@@ -134,7 +133,7 @@ static void fsl_spi_chipselect(struct spi_device *spi, int value)
fsl_spi_change_mode(spi);
if (pdata->cs_control)
- pdata->cs_control(spi, pol);
+ pdata->cs_control(spi, true);
}
}
diff --git a/drivers/spi/spi-geni-qcom.c b/drivers/spi/spi-geni-qcom.c
index 512e925d5ea4..881f645661cc 100644
--- a/drivers/spi/spi-geni-qcom.c
+++ b/drivers/spi/spi-geni-qcom.c
@@ -83,6 +83,7 @@ struct spi_geni_master {
spinlock_t lock;
int irq;
bool cs_flag;
+ bool abort_failed;
};
static int get_spi_clk_cfg(unsigned int speed_hz,
@@ -141,8 +142,49 @@ static void handle_fifo_timeout(struct spi_master *spi,
spin_unlock_irq(&mas->lock);
time_left = wait_for_completion_timeout(&mas->abort_done, HZ);
- if (!time_left)
+ if (!time_left) {
dev_err(mas->dev, "Failed to cancel/abort m_cmd\n");
+
+ /*
+ * No need for a lock since SPI core has a lock and we never
+ * access this from an interrupt.
+ */
+ mas->abort_failed = true;
+ }
+}
+
+static bool spi_geni_is_abort_still_pending(struct spi_geni_master *mas)
+{
+ struct geni_se *se = &mas->se;
+ u32 m_irq, m_irq_en;
+
+ if (!mas->abort_failed)
+ return false;
+
+ /*
+ * The only known case where a transfer times out and then a cancel
+ * times out then an abort times out is if something is blocking our
+ * interrupt handler from running. Avoid starting any new transfers
+ * until that sorts itself out.
+ */
+ spin_lock_irq(&mas->lock);
+ m_irq = readl(se->base + SE_GENI_M_IRQ_STATUS);
+ m_irq_en = readl(se->base + SE_GENI_M_IRQ_EN);
+ spin_unlock_irq(&mas->lock);
+
+ if (m_irq & m_irq_en) {
+ dev_err(mas->dev, "Interrupts pending after abort: %#010x\n",
+ m_irq & m_irq_en);
+ return true;
+ }
+
+ /*
+ * If we're here the problem resolved itself so no need to check more
+ * on future transfers.
+ */
+ mas->abort_failed = false;
+
+ return false;
}
static void spi_geni_set_cs(struct spi_device *slv, bool set_flag)
@@ -158,10 +200,21 @@ static void spi_geni_set_cs(struct spi_device *slv, bool set_flag)
if (set_flag == mas->cs_flag)
return;
- mas->cs_flag = set_flag;
-
pm_runtime_get_sync(mas->dev);
+
+ if (spi_geni_is_abort_still_pending(mas)) {
+ dev_err(mas->dev, "Can't set chip select\n");
+ goto exit;
+ }
+
spin_lock_irq(&mas->lock);
+ if (mas->cur_xfer) {
+ dev_err(mas->dev, "Can't set CS when prev xfer running\n");
+ spin_unlock_irq(&mas->lock);
+ goto exit;
+ }
+
+ mas->cs_flag = set_flag;
reinit_completion(&mas->cs_done);
if (set_flag)
geni_se_setup_m_cmd(se, SPI_CS_ASSERT, 0);
@@ -170,9 +223,12 @@ static void spi_geni_set_cs(struct spi_device *slv, bool set_flag)
spin_unlock_irq(&mas->lock);
time_left = wait_for_completion_timeout(&mas->cs_done, HZ);
- if (!time_left)
+ if (!time_left) {
+ dev_warn(mas->dev, "Timeout setting chip select\n");
handle_fifo_timeout(spi, NULL);
+ }
+exit:
pm_runtime_put(mas->dev);
}
@@ -280,6 +336,9 @@ static int spi_geni_prepare_message(struct spi_master *spi,
int ret;
struct spi_geni_master *mas = spi_master_get_devdata(spi);
+ if (spi_geni_is_abort_still_pending(mas))
+ return -EBUSY;
+
ret = setup_fifo_params(spi_msg->spi, spi);
if (ret)
dev_err(mas->dev, "Couldn't select mode %d\n", ret);
@@ -354,6 +413,12 @@ static bool geni_spi_handle_tx(struct spi_geni_master *mas)
unsigned int bytes_per_fifo_word = geni_byte_per_fifo_word(mas);
unsigned int i = 0;
+ /* Stop the watermark IRQ if nothing to send */
+ if (!mas->cur_xfer) {
+ writel(0, se->base + SE_GENI_TX_WATERMARK_REG);
+ return false;
+ }
+
max_bytes = (mas->tx_fifo_depth - mas->tx_wm) * bytes_per_fifo_word;
if (mas->tx_rem_bytes < max_bytes)
max_bytes = mas->tx_rem_bytes;
@@ -396,6 +461,14 @@ static void geni_spi_handle_rx(struct spi_geni_master *mas)
if (rx_last_byte_valid && rx_last_byte_valid < 4)
rx_bytes -= bytes_per_fifo_word - rx_last_byte_valid;
}
+
+ /* Clear out the FIFO and bail if nowhere to put it */
+ if (!mas->cur_xfer) {
+ for (i = 0; i < DIV_ROUND_UP(rx_bytes, bytes_per_fifo_word); i++)
+ readl(se->base + SE_GENI_RX_FIFOn);
+ return;
+ }
+
if (mas->rx_rem_bytes < rx_bytes)
rx_bytes = mas->rx_rem_bytes;
@@ -495,6 +568,9 @@ static int spi_geni_transfer_one(struct spi_master *spi,
{
struct spi_geni_master *mas = spi_master_get_devdata(spi);
+ if (spi_geni_is_abort_still_pending(mas))
+ return -EBUSY;
+
/* Terminate and return success for 0 byte length transfer */
if (!xfer->len)
return 0;
diff --git a/drivers/spi/spi-stm32.c b/drivers/spi/spi-stm32.c
index 471dedf3d339..6017209c6d2f 100644
--- a/drivers/spi/spi-stm32.c
+++ b/drivers/spi/spi-stm32.c
@@ -493,9 +493,9 @@ static u32 stm32h7_spi_prepare_fthlv(struct stm32_spi *spi, u32 xfer_len)
/* align packet size with data registers access */
if (spi->cur_bpw > 8)
- fthlv -= (fthlv % 2); /* multiple of 2 */
+ fthlv += (fthlv % 2) ? 1 : 0;
else
- fthlv -= (fthlv % 4); /* multiple of 4 */
+ fthlv += (fthlv % 4) ? (4 - (fthlv % 4)) : 0;
if (!fthlv)
fthlv = 1;
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 51d7c004fbab..720ab34784c1 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -1108,6 +1108,7 @@ static int spi_transfer_wait(struct spi_controller *ctlr,
{
struct spi_statistics *statm = &ctlr->statistics;
struct spi_statistics *stats = &msg->spi->statistics;
+ u32 speed_hz = xfer->speed_hz;
unsigned long long ms;
if (spi_controller_is_slave(ctlr)) {
@@ -1116,8 +1117,11 @@ static int spi_transfer_wait(struct spi_controller *ctlr,
return -EINTR;
}
} else {
+ if (!speed_hz)
+ speed_hz = 100000;
+
ms = 8LL * 1000LL * xfer->len;
- do_div(ms, xfer->speed_hz);
+ do_div(ms, speed_hz);
ms += ms + 200; /* some tolerance */
if (ms > UINT_MAX)
@@ -3378,8 +3382,9 @@ int spi_setup(struct spi_device *spi)
if (status)
return status;
- if (!spi->max_speed_hz ||
- spi->max_speed_hz > spi->controller->max_speed_hz)
+ if (spi->controller->max_speed_hz &&
+ (!spi->max_speed_hz ||
+ spi->max_speed_hz > spi->controller->max_speed_hz))
spi->max_speed_hz = spi->controller->max_speed_hz;
mutex_lock(&spi->controller->io_mutex);
diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
index d99231c737fb..80d74cce2a01 100644
--- a/drivers/staging/comedi/comedi_fops.c
+++ b/drivers/staging/comedi/comedi_fops.c
@@ -2987,7 +2987,9 @@ static int put_compat_cmd(struct comedi32_cmd_struct __user *cmd32,
v32.chanlist_len = cmd->chanlist_len;
v32.data = ptr_to_compat(cmd->data);
v32.data_len = cmd->data_len;
- return copy_to_user(cmd32, &v32, sizeof(v32));
+ if (copy_to_user(cmd32, &v32, sizeof(v32)))
+ return -EFAULT;
+ return 0;
}
/* Handle 32-bit COMEDI_CMD ioctl. */
diff --git a/drivers/staging/hikey9xx/hisi-spmi-controller.c b/drivers/staging/hikey9xx/hisi-spmi-controller.c
index 861aedd5de48..0d42bc65f39b 100644
--- a/drivers/staging/hikey9xx/hisi-spmi-controller.c
+++ b/drivers/staging/hikey9xx/hisi-spmi-controller.c
@@ -278,21 +278,24 @@ static int spmi_controller_probe(struct platform_device *pdev)
iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!iores) {
dev_err(&pdev->dev, "can not get resource!\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_put_controller;
}
spmi_controller->base = devm_ioremap(&pdev->dev, iores->start,
resource_size(iores));
if (!spmi_controller->base) {
dev_err(&pdev->dev, "can not remap base addr!\n");
- return -EADDRNOTAVAIL;
+ ret = -EADDRNOTAVAIL;
+ goto err_put_controller;
}
ret = of_property_read_u32(pdev->dev.of_node, "spmi-channel",
&spmi_controller->channel);
if (ret) {
dev_err(&pdev->dev, "can not get channel\n");
- return -ENODEV;
+ ret = -ENODEV;
+ goto err_put_controller;
}
platform_set_drvdata(pdev, spmi_controller);
@@ -309,9 +312,15 @@ static int spmi_controller_probe(struct platform_device *pdev)
ctrl->write_cmd = spmi_write_cmd;
ret = spmi_controller_add(ctrl);
- if (ret)
- dev_err(&pdev->dev, "spmi_add_controller failed with error %d!\n", ret);
+ if (ret) {
+ dev_err(&pdev->dev, "spmi_controller_add failed with error %d!\n", ret);
+ goto err_put_controller;
+ }
+
+ return 0;
+err_put_controller:
+ spmi_controller_put(ctrl);
return ret;
}
@@ -320,7 +329,7 @@ static int spmi_del_controller(struct platform_device *pdev)
struct spmi_controller *ctrl = platform_get_drvdata(pdev);
spmi_controller_remove(ctrl);
- kfree(ctrl);
+ spmi_controller_put(ctrl);
return 0;
}
diff --git a/drivers/staging/media/atomisp/pci/atomisp_subdev.c b/drivers/staging/media/atomisp/pci/atomisp_subdev.c
index 52b9fb18c87f..b666cb23e5ca 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_subdev.c
+++ b/drivers/staging/media/atomisp/pci/atomisp_subdev.c
@@ -1062,26 +1062,6 @@ static const struct v4l2_ctrl_config ctrl_select_isp_version = {
.def = 0,
};
-#if 0 /* #ifdef CONFIG_ION */
-/*
- * Control for ISP ion device fd
- *
- * userspace will open ion device and pass the fd to kernel.
- * this fd will be used to map shared fd to buffer.
- */
-/* V4L2_CID_ATOMISP_ION_DEVICE_FD is not defined */
-static const struct v4l2_ctrl_config ctrl_ion_dev_fd = {
- .ops = &ctrl_ops,
- .id = V4L2_CID_ATOMISP_ION_DEVICE_FD,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Ion Device Fd",
- .min = -1,
- .max = 1024,
- .step = 1,
- .def = ION_FD_UNSET
-};
-#endif
-
static void atomisp_init_subdev_pipe(struct atomisp_sub_device *asd,
struct atomisp_video_pipe *pipe, enum v4l2_buf_type buf_type)
{
diff --git a/drivers/staging/mt7621-dma/mtk-hsdma.c b/drivers/staging/mt7621-dma/mtk-hsdma.c
index d241349214e7..bc4bb4374313 100644
--- a/drivers/staging/mt7621-dma/mtk-hsdma.c
+++ b/drivers/staging/mt7621-dma/mtk-hsdma.c
@@ -712,7 +712,7 @@ static int mtk_hsdma_probe(struct platform_device *pdev)
ret = dma_async_device_register(dd);
if (ret) {
dev_err(&pdev->dev, "failed to register dma device\n");
- return ret;
+ goto err_uninit_hsdma;
}
ret = of_dma_controller_register(pdev->dev.of_node,
@@ -728,6 +728,8 @@ static int mtk_hsdma_probe(struct platform_device *pdev)
err_unregister:
dma_async_device_unregister(dd);
+err_uninit_hsdma:
+ mtk_hsdma_uninit(hsdma);
return ret;
}
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index 6b171fff007b..a5991df23581 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -562,8 +562,6 @@ tcmu_get_block_page(struct tcmu_dev *udev, uint32_t dbi)
static inline void tcmu_free_cmd(struct tcmu_cmd *tcmu_cmd)
{
- if (tcmu_cmd->se_cmd)
- tcmu_cmd->se_cmd->priv = NULL;
kfree(tcmu_cmd->dbi);
kmem_cache_free(tcmu_cmd_cache, tcmu_cmd);
}
@@ -1174,11 +1172,12 @@ tcmu_queue_cmd(struct se_cmd *se_cmd)
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
mutex_lock(&udev->cmdr_lock);
- se_cmd->priv = tcmu_cmd;
if (!(se_cmd->transport_state & CMD_T_ABORTED))
ret = queue_cmd_ring(tcmu_cmd, &scsi_ret);
if (ret < 0)
tcmu_free_cmd(tcmu_cmd);
+ else
+ se_cmd->priv = tcmu_cmd;
mutex_unlock(&udev->cmdr_lock);
return scsi_ret;
}
@@ -1241,6 +1240,7 @@ tcmu_tmr_notify(struct se_device *se_dev, enum tcm_tmreq_table tmf,
list_del_init(&cmd->queue_entry);
tcmu_free_cmd(cmd);
+ se_cmd->priv = NULL;
target_complete_cmd(se_cmd, SAM_STAT_TASK_ABORTED);
unqueued = true;
}
@@ -1332,6 +1332,7 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *
}
done:
+ se_cmd->priv = NULL;
if (read_len_valid) {
pr_debug("read_len = %d\n", read_len);
target_complete_cmd_with_length(cmd->se_cmd,
@@ -1478,6 +1479,7 @@ static void tcmu_check_expired_queue_cmd(struct tcmu_cmd *cmd)
se_cmd = cmd->se_cmd;
tcmu_free_cmd(cmd);
+ se_cmd->priv = NULL;
target_complete_cmd(se_cmd, SAM_STAT_TASK_SET_FULL);
}
@@ -1592,6 +1594,7 @@ static void run_qfull_queue(struct tcmu_dev *udev, bool fail)
* removed then LIO core will do the right thing and
* fail the retry.
*/
+ tcmu_cmd->se_cmd->priv = NULL;
target_complete_cmd(tcmu_cmd->se_cmd, SAM_STAT_BUSY);
tcmu_free_cmd(tcmu_cmd);
continue;
@@ -1605,6 +1608,7 @@ static void run_qfull_queue(struct tcmu_dev *udev, bool fail)
* Ignore scsi_ret for now. target_complete_cmd
* drops it.
*/
+ tcmu_cmd->se_cmd->priv = NULL;
target_complete_cmd(tcmu_cmd->se_cmd,
SAM_STAT_CHECK_CONDITION);
tcmu_free_cmd(tcmu_cmd);
@@ -2212,6 +2216,7 @@ static void tcmu_reset_ring(struct tcmu_dev *udev, u8 err_level)
if (!test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
WARN_ON(!cmd->se_cmd);
list_del_init(&cmd->queue_entry);
+ cmd->se_cmd->priv = NULL;
if (err_level == 1) {
/*
* Userspace was not able to start the
diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c
index 44e15d7fb2f0..66d6f1d06f21 100644
--- a/drivers/target/target_core_xcopy.c
+++ b/drivers/target/target_core_xcopy.c
@@ -46,60 +46,83 @@ static int target_xcopy_gen_naa_ieee(struct se_device *dev, unsigned char *buf)
return 0;
}
-struct xcopy_dev_search_info {
- const unsigned char *dev_wwn;
- struct se_device *found_dev;
-};
-
+/**
+ * target_xcopy_locate_se_dev_e4_iter - compare XCOPY NAA device identifiers
+ *
+ * @se_dev: device being considered for match
+ * @dev_wwn: XCOPY requested NAA dev_wwn
+ * @return: 1 on match, 0 on no-match
+ */
static int target_xcopy_locate_se_dev_e4_iter(struct se_device *se_dev,
- void *data)
+ const unsigned char *dev_wwn)
{
- struct xcopy_dev_search_info *info = data;
unsigned char tmp_dev_wwn[XCOPY_NAA_IEEE_REGEX_LEN];
int rc;
- if (!se_dev->dev_attrib.emulate_3pc)
+ if (!se_dev->dev_attrib.emulate_3pc) {
+ pr_debug("XCOPY: emulate_3pc disabled on se_dev %p\n", se_dev);
return 0;
+ }
memset(&tmp_dev_wwn[0], 0, XCOPY_NAA_IEEE_REGEX_LEN);
target_xcopy_gen_naa_ieee(se_dev, &tmp_dev_wwn[0]);
- rc = memcmp(&tmp_dev_wwn[0], info->dev_wwn, XCOPY_NAA_IEEE_REGEX_LEN);
- if (rc != 0)
- return 0;
-
- info->found_dev = se_dev;
- pr_debug("XCOPY 0xe4: located se_dev: %p\n", se_dev);
-
- rc = target_depend_item(&se_dev->dev_group.cg_item);
+ rc = memcmp(&tmp_dev_wwn[0], dev_wwn, XCOPY_NAA_IEEE_REGEX_LEN);
if (rc != 0) {
- pr_err("configfs_depend_item attempt failed: %d for se_dev: %p\n",
- rc, se_dev);
- return rc;
+ pr_debug("XCOPY: skip non-matching: %*ph\n",
+ XCOPY_NAA_IEEE_REGEX_LEN, tmp_dev_wwn);
+ return 0;
}
+ pr_debug("XCOPY 0xe4: located se_dev: %p\n", se_dev);
- pr_debug("Called configfs_depend_item for se_dev: %p se_dev->se_dev_group: %p\n",
- se_dev, &se_dev->dev_group);
return 1;
}
-static int target_xcopy_locate_se_dev_e4(const unsigned char *dev_wwn,
- struct se_device **found_dev)
+static int target_xcopy_locate_se_dev_e4(struct se_session *sess,
+ const unsigned char *dev_wwn,
+ struct se_device **_found_dev,
+ struct percpu_ref **_found_lun_ref)
{
- struct xcopy_dev_search_info info;
- int ret;
-
- memset(&info, 0, sizeof(info));
- info.dev_wwn = dev_wwn;
-
- ret = target_for_each_device(target_xcopy_locate_se_dev_e4_iter, &info);
- if (ret == 1) {
- *found_dev = info.found_dev;
- return 0;
- } else {
- pr_debug_ratelimited("Unable to locate 0xe4 descriptor for EXTENDED_COPY\n");
- return -EINVAL;
+ struct se_dev_entry *deve;
+ struct se_node_acl *nacl;
+ struct se_lun *this_lun = NULL;
+ struct se_device *found_dev = NULL;
+
+ /* cmd with NULL sess indicates no associated $FABRIC_MOD */
+ if (!sess)
+ goto err_out;
+
+ pr_debug("XCOPY 0xe4: searching for: %*ph\n",
+ XCOPY_NAA_IEEE_REGEX_LEN, dev_wwn);
+
+ nacl = sess->se_node_acl;
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) {
+ struct se_device *this_dev;
+ int rc;
+
+ this_lun = rcu_dereference(deve->se_lun);
+ this_dev = rcu_dereference_raw(this_lun->lun_se_dev);
+
+ rc = target_xcopy_locate_se_dev_e4_iter(this_dev, dev_wwn);
+ if (rc) {
+ if (percpu_ref_tryget_live(&this_lun->lun_ref))
+ found_dev = this_dev;
+ break;
+ }
}
+ rcu_read_unlock();
+ if (found_dev == NULL)
+ goto err_out;
+
+ pr_debug("lun_ref held for se_dev: %p se_dev->se_dev_group: %p\n",
+ found_dev, &found_dev->dev_group);
+ *_found_dev = found_dev;
+ *_found_lun_ref = &this_lun->lun_ref;
+ return 0;
+err_out:
+ pr_debug_ratelimited("Unable to locate 0xe4 descriptor for EXTENDED_COPY\n");
+ return -EINVAL;
}
static int target_xcopy_parse_tiddesc_e4(struct se_cmd *se_cmd, struct xcopy_op *xop,
@@ -246,12 +269,16 @@ static int target_xcopy_parse_target_descriptors(struct se_cmd *se_cmd,
switch (xop->op_origin) {
case XCOL_SOURCE_RECV_OP:
- rc = target_xcopy_locate_se_dev_e4(xop->dst_tid_wwn,
- &xop->dst_dev);
+ rc = target_xcopy_locate_se_dev_e4(se_cmd->se_sess,
+ xop->dst_tid_wwn,
+ &xop->dst_dev,
+ &xop->remote_lun_ref);
break;
case XCOL_DEST_RECV_OP:
- rc = target_xcopy_locate_se_dev_e4(xop->src_tid_wwn,
- &xop->src_dev);
+ rc = target_xcopy_locate_se_dev_e4(se_cmd->se_sess,
+ xop->src_tid_wwn,
+ &xop->src_dev,
+ &xop->remote_lun_ref);
break;
default:
pr_err("XCOPY CSCD descriptor IDs not found in CSCD list - "
@@ -391,18 +418,12 @@ static int xcopy_pt_get_cmd_state(struct se_cmd *se_cmd)
static void xcopy_pt_undepend_remotedev(struct xcopy_op *xop)
{
- struct se_device *remote_dev;
-
if (xop->op_origin == XCOL_SOURCE_RECV_OP)
- remote_dev = xop->dst_dev;
+ pr_debug("putting dst lun_ref for %p\n", xop->dst_dev);
else
- remote_dev = xop->src_dev;
-
- pr_debug("Calling configfs_undepend_item for"
- " remote_dev: %p remote_dev->dev_group: %p\n",
- remote_dev, &remote_dev->dev_group.cg_item);
+ pr_debug("putting src lun_ref for %p\n", xop->src_dev);
- target_undepend_item(&remote_dev->dev_group.cg_item);
+ percpu_ref_put(xop->remote_lun_ref);
}
static void xcopy_pt_release_cmd(struct se_cmd *se_cmd)
diff --git a/drivers/target/target_core_xcopy.h b/drivers/target/target_core_xcopy.h
index c56a1bde9417..e5f20005179a 100644
--- a/drivers/target/target_core_xcopy.h
+++ b/drivers/target/target_core_xcopy.h
@@ -27,6 +27,7 @@ struct xcopy_op {
struct se_device *dst_dev;
unsigned char dst_tid_wwn[XCOPY_NAA_IEEE_REGEX_LEN];
unsigned char local_dev_wwn[XCOPY_NAA_IEEE_REGEX_LEN];
+ struct percpu_ref *remote_lun_ref;
sector_t src_lba;
sector_t dst_lba;
diff --git a/drivers/thunderbolt/icm.c b/drivers/thunderbolt/icm.c
index 8b7f941a9bb7..b8c4159bc32d 100644
--- a/drivers/thunderbolt/icm.c
+++ b/drivers/thunderbolt/icm.c
@@ -2316,7 +2316,7 @@ static int icm_usb4_switch_nvm_authenticate_status(struct tb_switch *sw,
if (auth && auth->reply.route_hi == sw->config.route_hi &&
auth->reply.route_lo == sw->config.route_lo) {
- tb_dbg(tb, "NVM_AUTH found for %llx flags 0x%#x status %#x\n",
+ tb_dbg(tb, "NVM_AUTH found for %llx flags %#x status %#x\n",
tb_route(sw), auth->reply.hdr.flags, auth->reply.status);
if (auth->reply.hdr.flags & ICM_FLAGS_ERROR)
ret = -EIO;
diff --git a/drivers/tty/Kconfig b/drivers/tty/Kconfig
index 47a6e42f0d04..e15cd6b5bb99 100644
--- a/drivers/tty/Kconfig
+++ b/drivers/tty/Kconfig
@@ -401,6 +401,20 @@ config MIPS_EJTAG_FDC_KGDB_CHAN
help
FDC channel number to use for KGDB.
+config NULL_TTY
+ tristate "NULL TTY driver"
+ help
+ Say Y here if you want a NULL TTY which simply discards messages.
+
+ This is useful to allow userspace applications which expect a console
+ device to work without modifications even when no console is
+ available or desired.
+
+ In order to use this driver, you should redirect the console to this
+ TTY, or boot the kernel with console=ttynull.
+
+ If unsure, say N.
+
config TRACE_ROUTER
tristate "Trace data router for MIPI P1149.7 cJTAG standard"
depends on TRACE_SINK
diff --git a/drivers/tty/Makefile b/drivers/tty/Makefile
index 3c1c5a9240a7..b3ccae932660 100644
--- a/drivers/tty/Makefile
+++ b/drivers/tty/Makefile
@@ -2,7 +2,7 @@
obj-$(CONFIG_TTY) += tty_io.o n_tty.o tty_ioctl.o tty_ldisc.o \
tty_buffer.o tty_port.o tty_mutex.o \
tty_ldsem.o tty_baudrate.o tty_jobctrl.o \
- n_null.o ttynull.o
+ n_null.o
obj-$(CONFIG_LEGACY_PTYS) += pty.o
obj-$(CONFIG_UNIX98_PTYS) += pty.o
obj-$(CONFIG_AUDIT) += tty_audit.o
@@ -25,6 +25,7 @@ obj-$(CONFIG_ISI) += isicom.o
obj-$(CONFIG_MOXA_INTELLIO) += moxa.o
obj-$(CONFIG_MOXA_SMARTIO) += mxser.o
obj-$(CONFIG_NOZOMI) += nozomi.o
+obj-$(CONFIG_NULL_TTY) += ttynull.o
obj-$(CONFIG_ROCKETPORT) += rocket.o
obj-$(CONFIG_SYNCLINK_GT) += synclink_gt.o
obj-$(CONFIG_PPC_EPAPR_HV_BYTECHAN) += ehv_bytechan.o
diff --git a/drivers/tty/serial/mvebu-uart.c b/drivers/tty/serial/mvebu-uart.c
index 118b29912289..e0c00a1b0763 100644
--- a/drivers/tty/serial/mvebu-uart.c
+++ b/drivers/tty/serial/mvebu-uart.c
@@ -648,6 +648,14 @@ static void wait_for_xmitr(struct uart_port *port)
(val & STAT_TX_RDY(port)), 1, 10000);
}
+static void wait_for_xmite(struct uart_port *port)
+{
+ u32 val;
+
+ readl_poll_timeout_atomic(port->membase + UART_STAT, val,
+ (val & STAT_TX_EMP), 1, 10000);
+}
+
static void mvebu_uart_console_putchar(struct uart_port *port, int ch)
{
wait_for_xmitr(port);
@@ -675,7 +683,7 @@ static void mvebu_uart_console_write(struct console *co, const char *s,
uart_console_write(port, s, count, mvebu_uart_console_putchar);
- wait_for_xmitr(port);
+ wait_for_xmite(port);
if (ier)
writel(ier, port->membase + UART_CTRL(port));
diff --git a/drivers/tty/serial/sifive.c b/drivers/tty/serial/sifive.c
index 1066eebe3b28..328d5a78792f 100644
--- a/drivers/tty/serial/sifive.c
+++ b/drivers/tty/serial/sifive.c
@@ -1000,6 +1000,7 @@ static int sifive_serial_probe(struct platform_device *pdev)
/* Set up clock divider */
ssp->clkin_rate = clk_get_rate(ssp->clk);
ssp->baud_rate = SIFIVE_DEFAULT_BAUD_RATE;
+ ssp->port.uartclk = ssp->baud_rate * 16;
__ssp_update_div(ssp);
platform_set_drvdata(pdev, ssp);
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index 8034489337d7..4a208a95e921 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -143,9 +143,8 @@ LIST_HEAD(tty_drivers); /* linked list of tty drivers */
DEFINE_MUTEX(tty_mutex);
static ssize_t tty_read(struct file *, char __user *, size_t, loff_t *);
-static ssize_t tty_write(struct file *, const char __user *, size_t, loff_t *);
-ssize_t redirected_tty_write(struct file *, const char __user *,
- size_t, loff_t *);
+static ssize_t tty_write(struct kiocb *, struct iov_iter *);
+ssize_t redirected_tty_write(struct kiocb *, struct iov_iter *);
static __poll_t tty_poll(struct file *, poll_table *);
static int tty_open(struct inode *, struct file *);
long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
@@ -438,8 +437,7 @@ static ssize_t hung_up_tty_read(struct file *file, char __user *buf,
return 0;
}
-static ssize_t hung_up_tty_write(struct file *file, const char __user *buf,
- size_t count, loff_t *ppos)
+static ssize_t hung_up_tty_write(struct kiocb *iocb, struct iov_iter *from)
{
return -EIO;
}
@@ -478,7 +476,8 @@ static void tty_show_fdinfo(struct seq_file *m, struct file *file)
static const struct file_operations tty_fops = {
.llseek = no_llseek,
.read = tty_read,
- .write = tty_write,
+ .write_iter = tty_write,
+ .splice_write = iter_file_splice_write,
.poll = tty_poll,
.unlocked_ioctl = tty_ioctl,
.compat_ioctl = tty_compat_ioctl,
@@ -491,7 +490,8 @@ static const struct file_operations tty_fops = {
static const struct file_operations console_fops = {
.llseek = no_llseek,
.read = tty_read,
- .write = redirected_tty_write,
+ .write_iter = redirected_tty_write,
+ .splice_write = iter_file_splice_write,
.poll = tty_poll,
.unlocked_ioctl = tty_ioctl,
.compat_ioctl = tty_compat_ioctl,
@@ -503,7 +503,7 @@ static const struct file_operations console_fops = {
static const struct file_operations hung_up_tty_fops = {
.llseek = no_llseek,
.read = hung_up_tty_read,
- .write = hung_up_tty_write,
+ .write_iter = hung_up_tty_write,
.poll = hung_up_tty_poll,
.unlocked_ioctl = hung_up_tty_ioctl,
.compat_ioctl = hung_up_tty_compat_ioctl,
@@ -606,9 +606,9 @@ static void __tty_hangup(struct tty_struct *tty, int exit_session)
/* This breaks for file handles being sent over AF_UNIX sockets ? */
list_for_each_entry(priv, &tty->tty_files, list) {
filp = priv->file;
- if (filp->f_op->write == redirected_tty_write)
+ if (filp->f_op->write_iter == redirected_tty_write)
cons_filp = filp;
- if (filp->f_op->write != tty_write)
+ if (filp->f_op->write_iter != tty_write)
continue;
closecount++;
__tty_fasync(-1, filp, 0); /* can't block */
@@ -901,9 +901,9 @@ static inline ssize_t do_tty_write(
ssize_t (*write)(struct tty_struct *, struct file *, const unsigned char *, size_t),
struct tty_struct *tty,
struct file *file,
- const char __user *buf,
- size_t count)
+ struct iov_iter *from)
{
+ size_t count = iov_iter_count(from);
ssize_t ret, written = 0;
unsigned int chunk;
@@ -955,14 +955,20 @@ static inline ssize_t do_tty_write(
size_t size = count;
if (size > chunk)
size = chunk;
+
ret = -EFAULT;
- if (copy_from_user(tty->write_buf, buf, size))
+ if (copy_from_iter(tty->write_buf, size, from) != size)
break;
+
ret = write(tty, file, tty->write_buf, size);
if (ret <= 0)
break;
+
+ /* FIXME! Have Al check this! */
+ if (ret != size)
+ iov_iter_revert(from, size-ret);
+
written += ret;
- buf += ret;
count -= ret;
if (!count)
break;
@@ -1022,9 +1028,9 @@ void tty_write_message(struct tty_struct *tty, char *msg)
* write method will not be invoked in parallel for each device.
*/
-static ssize_t tty_write(struct file *file, const char __user *buf,
- size_t count, loff_t *ppos)
+static ssize_t tty_write(struct kiocb *iocb, struct iov_iter *from)
{
+ struct file *file = iocb->ki_filp;
struct tty_struct *tty = file_tty(file);
struct tty_ldisc *ld;
ssize_t ret;
@@ -1038,17 +1044,16 @@ static ssize_t tty_write(struct file *file, const char __user *buf,
tty_err(tty, "missing write_room method\n");
ld = tty_ldisc_ref_wait(tty);
if (!ld)
- return hung_up_tty_write(file, buf, count, ppos);
+ return hung_up_tty_write(iocb, from);
if (!ld->ops->write)
ret = -EIO;
else
- ret = do_tty_write(ld->ops->write, tty, file, buf, count);
+ ret = do_tty_write(ld->ops->write, tty, file, from);
tty_ldisc_deref(ld);
return ret;
}
-ssize_t redirected_tty_write(struct file *file, const char __user *buf,
- size_t count, loff_t *ppos)
+ssize_t redirected_tty_write(struct kiocb *iocb, struct iov_iter *iter)
{
struct file *p = NULL;
@@ -1059,11 +1064,11 @@ ssize_t redirected_tty_write(struct file *file, const char __user *buf,
if (p) {
ssize_t res;
- res = vfs_write(p, buf, count, &p->f_pos);
+ res = vfs_iocb_iter_write(p, iocb, iter);
fput(p);
return res;
}
- return tty_write(file, buf, count, ppos);
+ return tty_write(iocb, iter);
}
/*
@@ -2295,7 +2300,7 @@ static int tioccons(struct file *file)
{
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- if (file->f_op->write == redirected_tty_write) {
+ if (file->f_op->write_iter == redirected_tty_write) {
struct file *f;
spin_lock(&redirect_lock);
f = redirect;
diff --git a/drivers/tty/ttynull.c b/drivers/tty/ttynull.c
index eced70ec54e1..17f05b7eb6d3 100644
--- a/drivers/tty/ttynull.c
+++ b/drivers/tty/ttynull.c
@@ -2,13 +2,6 @@
/*
* Copyright (C) 2019 Axis Communications AB
*
- * The console is useful for userspace applications which expect a console
- * device to work without modifications even when no console is available
- * or desired.
- *
- * In order to use this driver, you should redirect the console to this
- * TTY, or boot the kernel with console=ttynull.
- *
* Based on ttyprintk.c:
* Copyright (C) 2010 Samo Pogacnik
*/
@@ -66,17 +59,6 @@ static struct console ttynull_console = {
.device = ttynull_device,
};
-void __init register_ttynull_console(void)
-{
- if (!ttynull_driver)
- return;
-
- if (add_preferred_console(ttynull_console.name, 0, NULL))
- return;
-
- register_console(&ttynull_console);
-}
-
static int __init ttynull_init(void)
{
struct tty_driver *driver;
diff --git a/drivers/usb/cdns3/cdns3-imx.c b/drivers/usb/cdns3/cdns3-imx.c
index 22a56c4dce67..7990fee03fe4 100644
--- a/drivers/usb/cdns3/cdns3-imx.c
+++ b/drivers/usb/cdns3/cdns3-imx.c
@@ -185,7 +185,11 @@ static int cdns_imx_probe(struct platform_device *pdev)
}
data->num_clks = ARRAY_SIZE(imx_cdns3_core_clks);
- data->clks = (struct clk_bulk_data *)imx_cdns3_core_clks;
+ data->clks = devm_kmemdup(dev, imx_cdns3_core_clks,
+ sizeof(imx_cdns3_core_clks), GFP_KERNEL);
+ if (!data->clks)
+ return -ENOMEM;
+
ret = devm_clk_bulk_get(dev, data->num_clks, data->clks);
if (ret)
return ret;
@@ -214,20 +218,16 @@ err:
return ret;
}
-static int cdns_imx_remove_core(struct device *dev, void *data)
-{
- struct platform_device *pdev = to_platform_device(dev);
-
- platform_device_unregister(pdev);
-
- return 0;
-}
-
static int cdns_imx_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
+ struct cdns_imx *data = dev_get_drvdata(dev);
- device_for_each_child(dev, NULL, cdns_imx_remove_core);
+ pm_runtime_get_sync(dev);
+ of_platform_depopulate(dev);
+ clk_bulk_disable_unprepare(data->num_clks, data->clks);
+ pm_runtime_disable(dev);
+ pm_runtime_put_noidle(dev);
platform_set_drvdata(pdev, NULL);
return 0;
diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c
index 9e12152ea46b..8b7bc10b6e8b 100644
--- a/drivers/usb/chipidea/ci_hdrc_imx.c
+++ b/drivers/usb/chipidea/ci_hdrc_imx.c
@@ -139,9 +139,13 @@ static struct imx_usbmisc_data *usbmisc_get_init_data(struct device *dev)
misc_pdev = of_find_device_by_node(args.np);
of_node_put(args.np);
- if (!misc_pdev || !platform_get_drvdata(misc_pdev))
+ if (!misc_pdev)
return ERR_PTR(-EPROBE_DEFER);
+ if (!platform_get_drvdata(misc_pdev)) {
+ put_device(&misc_pdev->dev);
+ return ERR_PTR(-EPROBE_DEFER);
+ }
data->dev = &misc_pdev->dev;
/*
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index f52f1bc0559f..781905745812 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -1895,6 +1895,10 @@ static const struct usb_device_id acm_ids[] = {
{ USB_DEVICE(0x04d8, 0xfd08),
.driver_info = IGNORE_DEVICE,
},
+
+ { USB_DEVICE(0x04d8, 0xf58b),
+ .driver_info = IGNORE_DEVICE,
+ },
#endif
/*Samsung phone in firmware update mode */
diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
index 02d0cfd23bb2..508b1c3f8b73 100644
--- a/drivers/usb/class/cdc-wdm.c
+++ b/drivers/usb/class/cdc-wdm.c
@@ -465,13 +465,23 @@ static int service_outstanding_interrupt(struct wdm_device *desc)
if (!desc->resp_count || !--desc->resp_count)
goto out;
+ if (test_bit(WDM_DISCONNECTING, &desc->flags)) {
+ rv = -ENODEV;
+ goto out;
+ }
+ if (test_bit(WDM_RESETTING, &desc->flags)) {
+ rv = -EIO;
+ goto out;
+ }
+
set_bit(WDM_RESPONDING, &desc->flags);
spin_unlock_irq(&desc->iuspin);
rv = usb_submit_urb(desc->response, GFP_KERNEL);
spin_lock_irq(&desc->iuspin);
if (rv) {
- dev_err(&desc->intf->dev,
- "usb_submit_urb failed with result %d\n", rv);
+ if (!test_bit(WDM_DISCONNECTING, &desc->flags))
+ dev_err(&desc->intf->dev,
+ "usb_submit_urb failed with result %d\n", rv);
/* make sure the next notification trigger a submit */
clear_bit(WDM_RESPONDING, &desc->flags);
@@ -1027,9 +1037,9 @@ static void wdm_disconnect(struct usb_interface *intf)
wake_up_all(&desc->wait);
mutex_lock(&desc->rlock);
mutex_lock(&desc->wlock);
- kill_urbs(desc);
cancel_work_sync(&desc->rxwork);
cancel_work_sync(&desc->service_outs_intr);
+ kill_urbs(desc);
mutex_unlock(&desc->wlock);
mutex_unlock(&desc->rlock);
diff --git a/drivers/usb/class/usblp.c b/drivers/usb/class/usblp.c
index 67cbd42421be..134dc2005ce9 100644
--- a/drivers/usb/class/usblp.c
+++ b/drivers/usb/class/usblp.c
@@ -274,8 +274,25 @@ static int usblp_ctrl_msg(struct usblp *usblp, int request, int type, int dir, i
#define usblp_reset(usblp)\
usblp_ctrl_msg(usblp, USBLP_REQ_RESET, USB_TYPE_CLASS, USB_DIR_OUT, USB_RECIP_OTHER, 0, NULL, 0)
-#define usblp_hp_channel_change_request(usblp, channel, buffer) \
- usblp_ctrl_msg(usblp, USBLP_REQ_HP_CHANNEL_CHANGE_REQUEST, USB_TYPE_VENDOR, USB_DIR_IN, USB_RECIP_INTERFACE, channel, buffer, 1)
+static int usblp_hp_channel_change_request(struct usblp *usblp, int channel, u8 *new_channel)
+{
+ u8 *buf;
+ int ret;
+
+ buf = kzalloc(1, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ ret = usblp_ctrl_msg(usblp, USBLP_REQ_HP_CHANNEL_CHANGE_REQUEST,
+ USB_TYPE_VENDOR, USB_DIR_IN, USB_RECIP_INTERFACE,
+ channel, buf, 1);
+ if (ret == 0)
+ *new_channel = buf[0];
+
+ kfree(buf);
+
+ return ret;
+}
/*
* See the description for usblp_select_alts() below for the usage
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 60886a7464c3..ad5a0f405a75 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -1649,14 +1649,12 @@ static void __usb_hcd_giveback_urb(struct urb *urb)
urb->status = status;
/*
* This function can be called in task context inside another remote
- * coverage collection section, but KCOV doesn't support that kind of
+ * coverage collection section, but kcov doesn't support that kind of
* recursion yet. Only collect coverage in softirq context for now.
*/
- if (in_serving_softirq())
- kcov_remote_start_usb((u64)urb->dev->bus->busnum);
+ kcov_remote_start_usb_softirq((u64)urb->dev->bus->busnum);
urb->complete(urb);
- if (in_serving_softirq())
- kcov_remote_stop();
+ kcov_remote_stop_softirq();
usb_anchor_resume_wakeups(anchor);
atomic_dec(&urb->use_count);
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index 2f95f08ca511..1b241f937d8f 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -285,6 +285,7 @@
/* Global USB2 PHY Vendor Control Register */
#define DWC3_GUSB2PHYACC_NEWREGREQ BIT(25)
+#define DWC3_GUSB2PHYACC_DONE BIT(24)
#define DWC3_GUSB2PHYACC_BUSY BIT(23)
#define DWC3_GUSB2PHYACC_WRITE BIT(22)
#define DWC3_GUSB2PHYACC_ADDR(n) (n << 16)
diff --git a/drivers/usb/dwc3/dwc3-meson-g12a.c b/drivers/usb/dwc3/dwc3-meson-g12a.c
index 417e05381b5d..bdf1f98dfad8 100644
--- a/drivers/usb/dwc3/dwc3-meson-g12a.c
+++ b/drivers/usb/dwc3/dwc3-meson-g12a.c
@@ -754,7 +754,7 @@ static int dwc3_meson_g12a_probe(struct platform_device *pdev)
ret = priv->drvdata->setup_regmaps(priv, base);
if (ret)
- return ret;
+ goto err_disable_clks;
if (priv->vbus) {
ret = regulator_enable(priv->vbus);
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 78cb4db8a6e4..ee44321fee38 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -1763,6 +1763,8 @@ static int dwc3_gadget_ep_dequeue(struct usb_ep *ep,
list_for_each_entry_safe(r, t, &dep->started_list, list)
dwc3_gadget_move_cancelled_request(r);
+ dep->flags &= ~DWC3_EP_WAIT_TRANSFER_COMPLETE;
+
goto out;
}
}
@@ -2083,6 +2085,7 @@ static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on, int suspend)
static void dwc3_gadget_disable_irq(struct dwc3 *dwc);
static void __dwc3_gadget_stop(struct dwc3 *dwc);
+static int __dwc3_gadget_start(struct dwc3 *dwc);
static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
{
@@ -2145,6 +2148,8 @@ static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
dwc->ev_buf->lpos = (dwc->ev_buf->lpos + count) %
dwc->ev_buf->length;
}
+ } else {
+ __dwc3_gadget_start(dwc);
}
ret = dwc3_gadget_run_stop(dwc, is_on, false);
@@ -2319,10 +2324,6 @@ static int dwc3_gadget_start(struct usb_gadget *g,
}
dwc->gadget_driver = driver;
-
- if (pm_runtime_active(dwc->dev))
- __dwc3_gadget_start(dwc);
-
spin_unlock_irqrestore(&dwc->lock, flags);
return 0;
@@ -2348,13 +2349,6 @@ static int dwc3_gadget_stop(struct usb_gadget *g)
unsigned long flags;
spin_lock_irqsave(&dwc->lock, flags);
-
- if (pm_runtime_suspended(dwc->dev))
- goto out;
-
- __dwc3_gadget_stop(dwc);
-
-out:
dwc->gadget_driver = NULL;
spin_unlock_irqrestore(&dwc->lock, flags);
diff --git a/drivers/usb/dwc3/ulpi.c b/drivers/usb/dwc3/ulpi.c
index aa213c9815f6..f23f4c9a557e 100644
--- a/drivers/usb/dwc3/ulpi.c
+++ b/drivers/usb/dwc3/ulpi.c
@@ -7,6 +7,8 @@
* Author: Heikki Krogerus <[email protected]>
*/
+#include <linux/delay.h>
+#include <linux/time64.h>
#include <linux/ulpi/regs.h>
#include "core.h"
@@ -17,14 +19,28 @@
DWC3_GUSB2PHYACC_ADDR(ULPI_ACCESS_EXTENDED) | \
DWC3_GUSB2PHYACC_EXTEND_ADDR(a) : DWC3_GUSB2PHYACC_ADDR(a))
-static int dwc3_ulpi_busyloop(struct dwc3 *dwc)
+#define DWC3_ULPI_BASE_DELAY DIV_ROUND_UP(NSEC_PER_SEC, 60000000L)
+
+static int dwc3_ulpi_busyloop(struct dwc3 *dwc, u8 addr, bool read)
{
- unsigned int count = 1000;
+ unsigned long ns = 5L * DWC3_ULPI_BASE_DELAY;
+ unsigned int count = 10000;
u32 reg;
+ if (addr >= ULPI_EXT_VENDOR_SPECIFIC)
+ ns += DWC3_ULPI_BASE_DELAY;
+
+ if (read)
+ ns += DWC3_ULPI_BASE_DELAY;
+
+ reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
+ if (reg & DWC3_GUSB2PHYCFG_SUSPHY)
+ usleep_range(1000, 1200);
+
while (count--) {
+ ndelay(ns);
reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYACC(0));
- if (!(reg & DWC3_GUSB2PHYACC_BUSY))
+ if (reg & DWC3_GUSB2PHYACC_DONE)
return 0;
cpu_relax();
}
@@ -38,16 +54,10 @@ static int dwc3_ulpi_read(struct device *dev, u8 addr)
u32 reg;
int ret;
- reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
- if (reg & DWC3_GUSB2PHYCFG_SUSPHY) {
- reg &= ~DWC3_GUSB2PHYCFG_SUSPHY;
- dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
- }
-
reg = DWC3_GUSB2PHYACC_NEWREGREQ | DWC3_ULPI_ADDR(addr);
dwc3_writel(dwc->regs, DWC3_GUSB2PHYACC(0), reg);
- ret = dwc3_ulpi_busyloop(dwc);
+ ret = dwc3_ulpi_busyloop(dwc, addr, true);
if (ret)
return ret;
@@ -61,17 +71,11 @@ static int dwc3_ulpi_write(struct device *dev, u8 addr, u8 val)
struct dwc3 *dwc = dev_get_drvdata(dev);
u32 reg;
- reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
- if (reg & DWC3_GUSB2PHYCFG_SUSPHY) {
- reg &= ~DWC3_GUSB2PHYCFG_SUSPHY;
- dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
- }
-
reg = DWC3_GUSB2PHYACC_NEWREGREQ | DWC3_ULPI_ADDR(addr);
reg |= DWC3_GUSB2PHYACC_WRITE | val;
dwc3_writel(dwc->regs, DWC3_GUSB2PHYACC(0), reg);
- return dwc3_ulpi_busyloop(dwc);
+ return dwc3_ulpi_busyloop(dwc, addr, false);
}
static const struct ulpi_ops dwc3_ulpi_ops = {
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index 7e47e6223089..2d152571a7de 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -265,6 +265,7 @@ config USB_CONFIGFS_NCM
depends on NET
select USB_U_ETHER
select USB_F_NCM
+ select CRC32
help
NCM is an advanced protocol for Ethernet encapsulation, allows
grouping of several ethernet frames into one USB transfer and
@@ -314,6 +315,7 @@ config USB_CONFIGFS_EEM
depends on NET
select USB_U_ETHER
select USB_F_EEM
+ select CRC32
help
CDC EEM is a newer USB standard that is somewhat simpler than CDC ECM
and therefore can be supported by more hardware. Technically ECM and
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index c6d455f2bb92..1a556a628971 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -392,8 +392,11 @@ int usb_function_deactivate(struct usb_function *function)
spin_lock_irqsave(&cdev->lock, flags);
- if (cdev->deactivations == 0)
+ if (cdev->deactivations == 0) {
+ spin_unlock_irqrestore(&cdev->lock, flags);
status = usb_gadget_deactivate(cdev->gadget);
+ spin_lock_irqsave(&cdev->lock, flags);
+ }
if (status == 0)
cdev->deactivations++;
@@ -424,8 +427,11 @@ int usb_function_activate(struct usb_function *function)
status = -EINVAL;
else {
cdev->deactivations--;
- if (cdev->deactivations == 0)
+ if (cdev->deactivations == 0) {
+ spin_unlock_irqrestore(&cdev->lock, flags);
status = usb_gadget_activate(cdev->gadget);
+ spin_lock_irqsave(&cdev->lock, flags);
+ }
}
spin_unlock_irqrestore(&cdev->lock, flags);
diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
index 56051bb97349..36ffb43f9c1a 100644
--- a/drivers/usb/gadget/configfs.c
+++ b/drivers/usb/gadget/configfs.c
@@ -221,9 +221,16 @@ static ssize_t gadget_dev_desc_bcdUSB_store(struct config_item *item,
static ssize_t gadget_dev_desc_UDC_show(struct config_item *item, char *page)
{
- char *udc_name = to_gadget_info(item)->composite.gadget_driver.udc_name;
+ struct gadget_info *gi = to_gadget_info(item);
+ char *udc_name;
+ int ret;
+
+ mutex_lock(&gi->lock);
+ udc_name = gi->composite.gadget_driver.udc_name;
+ ret = sprintf(page, "%s\n", udc_name ?: "");
+ mutex_unlock(&gi->lock);
- return sprintf(page, "%s\n", udc_name ?: "");
+ return ret;
}
static int unregister_gadget(struct gadget_info *gi)
@@ -1248,9 +1255,9 @@ static void purge_configs_funcs(struct gadget_info *gi)
cfg = container_of(c, struct config_usb_cfg, c);
- list_for_each_entry_safe(f, tmp, &c->functions, list) {
+ list_for_each_entry_safe_reverse(f, tmp, &c->functions, list) {
- list_move_tail(&f->list, &cfg->func_list);
+ list_move(&f->list, &cfg->func_list);
if (f->unbind) {
dev_dbg(&gi->cdev.gadget->dev,
"unbind function '%s'/%p\n",
@@ -1536,7 +1543,7 @@ static const struct usb_gadget_driver configfs_driver_template = {
.suspend = configfs_composite_suspend,
.resume = configfs_composite_resume,
- .max_speed = USB_SPEED_SUPER,
+ .max_speed = USB_SPEED_SUPER_PLUS,
.driver = {
.owner = THIS_MODULE,
.name = "configfs-gadget",
@@ -1576,7 +1583,7 @@ static struct config_group *gadgets_make(
gi->composite.unbind = configfs_do_nothing;
gi->composite.suspend = NULL;
gi->composite.resume = NULL;
- gi->composite.max_speed = USB_SPEED_SUPER;
+ gi->composite.max_speed = USB_SPEED_SUPER_PLUS;
spin_lock_init(&gi->spinlock);
mutex_init(&gi->lock);
diff --git a/drivers/usb/gadget/function/f_printer.c b/drivers/usb/gadget/function/f_printer.c
index 64a4112068fc..2f1eb2e81d30 100644
--- a/drivers/usb/gadget/function/f_printer.c
+++ b/drivers/usb/gadget/function/f_printer.c
@@ -1162,6 +1162,7 @@ fail_tx_reqs:
printer_req_free(dev->in_ep, req);
}
+ usb_free_all_descriptors(f);
return ret;
}
diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c
index 3633df6d7610..5d960b6603b6 100644
--- a/drivers/usb/gadget/function/f_uac2.c
+++ b/drivers/usb/gadget/function/f_uac2.c
@@ -271,7 +271,7 @@ static struct usb_endpoint_descriptor fs_epout_desc = {
.bEndpointAddress = USB_DIR_OUT,
.bmAttributes = USB_ENDPOINT_XFER_ISOC | USB_ENDPOINT_SYNC_ASYNC,
- .wMaxPacketSize = cpu_to_le16(1023),
+ /* .wMaxPacketSize = DYNAMIC */
.bInterval = 1,
};
@@ -280,7 +280,7 @@ static struct usb_endpoint_descriptor hs_epout_desc = {
.bDescriptorType = USB_DT_ENDPOINT,
.bmAttributes = USB_ENDPOINT_XFER_ISOC | USB_ENDPOINT_SYNC_ASYNC,
- .wMaxPacketSize = cpu_to_le16(1024),
+ /* .wMaxPacketSize = DYNAMIC */
.bInterval = 4,
};
@@ -348,7 +348,7 @@ static struct usb_endpoint_descriptor fs_epin_desc = {
.bEndpointAddress = USB_DIR_IN,
.bmAttributes = USB_ENDPOINT_XFER_ISOC | USB_ENDPOINT_SYNC_ASYNC,
- .wMaxPacketSize = cpu_to_le16(1023),
+ /* .wMaxPacketSize = DYNAMIC */
.bInterval = 1,
};
@@ -357,7 +357,7 @@ static struct usb_endpoint_descriptor hs_epin_desc = {
.bDescriptorType = USB_DT_ENDPOINT,
.bmAttributes = USB_ENDPOINT_XFER_ISOC | USB_ENDPOINT_SYNC_ASYNC,
- .wMaxPacketSize = cpu_to_le16(1024),
+ /* .wMaxPacketSize = DYNAMIC */
.bInterval = 4,
};
@@ -444,12 +444,28 @@ struct cntrl_range_lay3 {
__le32 dRES;
} __packed;
-static void set_ep_max_packet_size(const struct f_uac2_opts *uac2_opts,
+static int set_ep_max_packet_size(const struct f_uac2_opts *uac2_opts,
struct usb_endpoint_descriptor *ep_desc,
- unsigned int factor, bool is_playback)
+ enum usb_device_speed speed, bool is_playback)
{
int chmask, srate, ssize;
- u16 max_packet_size;
+ u16 max_size_bw, max_size_ep;
+ unsigned int factor;
+
+ switch (speed) {
+ case USB_SPEED_FULL:
+ max_size_ep = 1023;
+ factor = 1000;
+ break;
+
+ case USB_SPEED_HIGH:
+ max_size_ep = 1024;
+ factor = 8000;
+ break;
+
+ default:
+ return -EINVAL;
+ }
if (is_playback) {
chmask = uac2_opts->p_chmask;
@@ -461,10 +477,12 @@ static void set_ep_max_packet_size(const struct f_uac2_opts *uac2_opts,
ssize = uac2_opts->c_ssize;
}
- max_packet_size = num_channels(chmask) * ssize *
+ max_size_bw = num_channels(chmask) * ssize *
DIV_ROUND_UP(srate, factor / (1 << (ep_desc->bInterval - 1)));
- ep_desc->wMaxPacketSize = cpu_to_le16(min_t(u16, max_packet_size,
- le16_to_cpu(ep_desc->wMaxPacketSize)));
+ ep_desc->wMaxPacketSize = cpu_to_le16(min_t(u16, max_size_bw,
+ max_size_ep));
+
+ return 0;
}
/* Use macro to overcome line length limitation */
@@ -670,10 +688,33 @@ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn)
}
/* Calculate wMaxPacketSize according to audio bandwidth */
- set_ep_max_packet_size(uac2_opts, &fs_epin_desc, 1000, true);
- set_ep_max_packet_size(uac2_opts, &fs_epout_desc, 1000, false);
- set_ep_max_packet_size(uac2_opts, &hs_epin_desc, 8000, true);
- set_ep_max_packet_size(uac2_opts, &hs_epout_desc, 8000, false);
+ ret = set_ep_max_packet_size(uac2_opts, &fs_epin_desc, USB_SPEED_FULL,
+ true);
+ if (ret < 0) {
+ dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
+ return ret;
+ }
+
+ ret = set_ep_max_packet_size(uac2_opts, &fs_epout_desc, USB_SPEED_FULL,
+ false);
+ if (ret < 0) {
+ dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
+ return ret;
+ }
+
+ ret = set_ep_max_packet_size(uac2_opts, &hs_epin_desc, USB_SPEED_HIGH,
+ true);
+ if (ret < 0) {
+ dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
+ return ret;
+ }
+
+ ret = set_ep_max_packet_size(uac2_opts, &hs_epout_desc, USB_SPEED_HIGH,
+ false);
+ if (ret < 0) {
+ dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
+ return ret;
+ }
if (EPOUT_EN(uac2_opts)) {
agdev->out_ep = usb_ep_autoconfig(gadget, &fs_epout_desc);
diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c
index 31ea76adcc0d..c019f2b0c0af 100644
--- a/drivers/usb/gadget/function/u_ether.c
+++ b/drivers/usb/gadget/function/u_ether.c
@@ -45,9 +45,10 @@
#define UETH__VERSION "29-May-2008"
/* Experiments show that both Linux and Windows hosts allow up to 16k
- * frame sizes. Set the max size to 15k+52 to prevent allocating 32k
+ * frame sizes. Set the max MTU size to 15k+52 to prevent allocating 32k
* blocks and still have efficient handling. */
-#define GETHER_MAX_ETH_FRAME_LEN 15412
+#define GETHER_MAX_MTU_SIZE 15412
+#define GETHER_MAX_ETH_FRAME_LEN (GETHER_MAX_MTU_SIZE + ETH_HLEN)
struct eth_dev {
/* lock is held while accessing port_usb
@@ -786,7 +787,7 @@ struct eth_dev *gether_setup_name(struct usb_gadget *g,
/* MTU range: 14 - 15412 */
net->min_mtu = ETH_HLEN;
- net->max_mtu = GETHER_MAX_ETH_FRAME_LEN;
+ net->max_mtu = GETHER_MAX_MTU_SIZE;
dev->gadget = g;
SET_NETDEV_DEV(net, &g->dev);
@@ -848,7 +849,7 @@ struct net_device *gether_setup_name_default(const char *netname)
/* MTU range: 14 - 15412 */
net->min_mtu = ETH_HLEN;
- net->max_mtu = GETHER_MAX_ETH_FRAME_LEN;
+ net->max_mtu = GETHER_MAX_MTU_SIZE;
return net;
}
diff --git a/drivers/usb/gadget/legacy/acm_ms.c b/drivers/usb/gadget/legacy/acm_ms.c
index 59be2d8417c9..e8033e5f0c18 100644
--- a/drivers/usb/gadget/legacy/acm_ms.c
+++ b/drivers/usb/gadget/legacy/acm_ms.c
@@ -200,8 +200,10 @@ static int acm_ms_bind(struct usb_composite_dev *cdev)
struct usb_descriptor_header *usb_desc;
usb_desc = usb_otg_descriptor_alloc(gadget);
- if (!usb_desc)
+ if (!usb_desc) {
+ status = -ENOMEM;
goto fail_string_ids;
+ }
usb_otg_descriptor_init(gadget, usb_desc);
otg_desc[0] = usb_desc;
otg_desc[1] = NULL;
diff --git a/drivers/usb/gadget/udc/Kconfig b/drivers/usb/gadget/udc/Kconfig
index 1a12aab208b4..8c614bb86c66 100644
--- a/drivers/usb/gadget/udc/Kconfig
+++ b/drivers/usb/gadget/udc/Kconfig
@@ -90,7 +90,7 @@ config USB_BCM63XX_UDC
config USB_FSL_USB2
tristate "Freescale Highspeed USB DR Peripheral Controller"
- depends on FSL_SOC || ARCH_MXC
+ depends on FSL_SOC
help
Some of Freescale PowerPC and i.MX processors have a High Speed
Dual-Role(DR) USB controller, which supports device mode.
diff --git a/drivers/usb/gadget/udc/Makefile b/drivers/usb/gadget/udc/Makefile
index f5a7ce28aecd..a21f2224e7eb 100644
--- a/drivers/usb/gadget/udc/Makefile
+++ b/drivers/usb/gadget/udc/Makefile
@@ -23,7 +23,6 @@ obj-$(CONFIG_USB_ATMEL_USBA) += atmel_usba_udc.o
obj-$(CONFIG_USB_BCM63XX_UDC) += bcm63xx_udc.o
obj-$(CONFIG_USB_FSL_USB2) += fsl_usb2_udc.o
fsl_usb2_udc-y := fsl_udc_core.o
-fsl_usb2_udc-$(CONFIG_ARCH_MXC) += fsl_mxc_udc.o
obj-$(CONFIG_USB_TEGRA_XUDC) += tegra-xudc.o
obj-$(CONFIG_USB_M66592) += m66592-udc.o
obj-$(CONFIG_USB_R8A66597) += r8a66597-udc.o
diff --git a/drivers/usb/gadget/udc/aspeed-vhub/epn.c b/drivers/usb/gadget/udc/aspeed-vhub/epn.c
index 0bd6b20435b8..02d8bfae58fb 100644
--- a/drivers/usb/gadget/udc/aspeed-vhub/epn.c
+++ b/drivers/usb/gadget/udc/aspeed-vhub/epn.c
@@ -420,7 +420,10 @@ static void ast_vhub_stop_active_req(struct ast_vhub_ep *ep,
u32 state, reg, loops;
/* Stop DMA activity */
- writel(0, ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT);
+ if (ep->epn.desc_mode)
+ writel(VHUB_EP_DMA_CTRL_RESET, ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT);
+ else
+ writel(0, ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT);
/* Wait for it to complete */
for (loops = 0; loops < 1000; loops++) {
diff --git a/drivers/usb/gadget/udc/bdc/Kconfig b/drivers/usb/gadget/udc/bdc/Kconfig
index 3e88c7670b2e..fb01ff47b64c 100644
--- a/drivers/usb/gadget/udc/bdc/Kconfig
+++ b/drivers/usb/gadget/udc/bdc/Kconfig
@@ -17,7 +17,7 @@ if USB_BDC_UDC
comment "Platform Support"
config USB_BDC_PCI
tristate "BDC support for PCIe based platforms"
- depends on USB_PCI
+ depends on USB_PCI && BROKEN
default USB_BDC_UDC
help
Enable support for platforms which have BDC connected through PCIe, such as Lego3 FPGA platform.
diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c
index 5b5cfeb6c14a..ea114f922ccf 100644
--- a/drivers/usb/gadget/udc/core.c
+++ b/drivers/usb/gadget/udc/core.c
@@ -659,8 +659,7 @@ EXPORT_SYMBOL_GPL(usb_gadget_vbus_disconnect);
*
* Enables the D+ (or potentially D-) pullup. The host will start
* enumerating this gadget when the pullup is active and a VBUS session
- * is active (the link is powered). This pullup is always enabled unless
- * usb_gadget_disconnect() has been used to disable it.
+ * is active (the link is powered).
*
* Returns zero on success, else negative errno.
*/
@@ -1530,10 +1529,13 @@ static ssize_t soft_connect_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t n)
{
struct usb_udc *udc = container_of(dev, struct usb_udc, dev);
+ ssize_t ret;
+ mutex_lock(&udc_lock);
if (!udc->driver) {
dev_err(dev, "soft-connect without a gadget driver\n");
- return -EOPNOTSUPP;
+ ret = -EOPNOTSUPP;
+ goto out;
}
if (sysfs_streq(buf, "connect")) {
@@ -1544,10 +1546,14 @@ static ssize_t soft_connect_store(struct device *dev,
usb_gadget_udc_stop(udc);
} else {
dev_err(dev, "unsupported command '%s'\n", buf);
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
- return n;
+ ret = n;
+out:
+ mutex_unlock(&udc_lock);
+ return ret;
}
static DEVICE_ATTR_WO(soft_connect);
diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c
index ab5e978b5052..57067763b100 100644
--- a/drivers/usb/gadget/udc/dummy_hcd.c
+++ b/drivers/usb/gadget/udc/dummy_hcd.c
@@ -2118,9 +2118,21 @@ static int dummy_hub_control(
dum_hcd->port_status &= ~USB_PORT_STAT_POWER;
set_link_state(dum_hcd);
break;
- default:
+ case USB_PORT_FEAT_ENABLE:
+ case USB_PORT_FEAT_C_ENABLE:
+ case USB_PORT_FEAT_C_SUSPEND:
+ /* Not allowed for USB-3 */
+ if (hcd->speed == HCD_USB3)
+ goto error;
+ fallthrough;
+ case USB_PORT_FEAT_C_CONNECTION:
+ case USB_PORT_FEAT_C_RESET:
dum_hcd->port_status &= ~(1 << wValue);
set_link_state(dum_hcd);
+ break;
+ default:
+ /* Disallow INDICATOR and C_OVER_CURRENT */
+ goto error;
}
break;
case GetHubDescriptor:
@@ -2258,17 +2270,20 @@ static int dummy_hub_control(
}
fallthrough;
case USB_PORT_FEAT_RESET:
+ if (!(dum_hcd->port_status & USB_PORT_STAT_CONNECTION))
+ break;
/* if it's already enabled, disable */
if (hcd->speed == HCD_USB3) {
- dum_hcd->port_status = 0;
dum_hcd->port_status =
(USB_SS_PORT_STAT_POWER |
USB_PORT_STAT_CONNECTION |
USB_PORT_STAT_RESET);
- } else
+ } else {
dum_hcd->port_status &= ~(USB_PORT_STAT_ENABLE
| USB_PORT_STAT_LOW_SPEED
| USB_PORT_STAT_HIGH_SPEED);
+ dum_hcd->port_status |= USB_PORT_STAT_RESET;
+ }
/*
* We want to reset device status. All but the
* Self powered feature
@@ -2280,19 +2295,19 @@ static int dummy_hub_control(
* interval? Is it still 50msec as for HS?
*/
dum_hcd->re_timeout = jiffies + msecs_to_jiffies(50);
- fallthrough;
- default:
- if (hcd->speed == HCD_USB3) {
- if ((dum_hcd->port_status &
- USB_SS_PORT_STAT_POWER) != 0) {
- dum_hcd->port_status |= (1 << wValue);
- }
- } else
- if ((dum_hcd->port_status &
- USB_PORT_STAT_POWER) != 0) {
- dum_hcd->port_status |= (1 << wValue);
- }
set_link_state(dum_hcd);
+ break;
+ case USB_PORT_FEAT_C_CONNECTION:
+ case USB_PORT_FEAT_C_RESET:
+ case USB_PORT_FEAT_C_ENABLE:
+ case USB_PORT_FEAT_C_SUSPEND:
+ /* Not allowed for USB-3, and ignored for USB-2 */
+ if (hcd->speed == HCD_USB3)
+ goto error;
+ break;
+ default:
+ /* Disallow TEST, INDICATOR, and C_OVER_CURRENT */
+ goto error;
}
break;
case GetPortErrorCount:
diff --git a/drivers/usb/gadget/udc/fsl_mxc_udc.c b/drivers/usb/gadget/udc/fsl_mxc_udc.c
deleted file mode 100644
index 5a321992decc..000000000000
--- a/drivers/usb/gadget/udc/fsl_mxc_udc.c
+++ /dev/null
@@ -1,122 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-/*
- * Copyright (C) 2009
- * Guennadi Liakhovetski, DENX Software Engineering, <[email protected]>
- *
- * Description:
- * Helper routines for i.MX3x SoCs from Freescale, needed by the fsl_usb2_udc.c
- * driver to function correctly on these systems.
- */
-#include <linux/clk.h>
-#include <linux/delay.h>
-#include <linux/err.h>
-#include <linux/fsl_devices.h>
-#include <linux/mod_devicetable.h>
-#include <linux/platform_device.h>
-#include <linux/io.h>
-
-#include "fsl_usb2_udc.h"
-
-static struct clk *mxc_ahb_clk;
-static struct clk *mxc_per_clk;
-static struct clk *mxc_ipg_clk;
-
-/* workaround ENGcm09152 for i.MX35 */
-#define MX35_USBPHYCTRL_OFFSET 0x600
-#define USBPHYCTRL_OTGBASE_OFFSET 0x8
-#define USBPHYCTRL_EVDO (1 << 23)
-
-int fsl_udc_clk_init(struct platform_device *pdev)
-{
- struct fsl_usb2_platform_data *pdata;
- unsigned long freq;
- int ret;
-
- pdata = dev_get_platdata(&pdev->dev);
-
- mxc_ipg_clk = devm_clk_get(&pdev->dev, "ipg");
- if (IS_ERR(mxc_ipg_clk)) {
- dev_err(&pdev->dev, "clk_get(\"ipg\") failed\n");
- return PTR_ERR(mxc_ipg_clk);
- }
-
- mxc_ahb_clk = devm_clk_get(&pdev->dev, "ahb");
- if (IS_ERR(mxc_ahb_clk)) {
- dev_err(&pdev->dev, "clk_get(\"ahb\") failed\n");
- return PTR_ERR(mxc_ahb_clk);
- }
-
- mxc_per_clk = devm_clk_get(&pdev->dev, "per");
- if (IS_ERR(mxc_per_clk)) {
- dev_err(&pdev->dev, "clk_get(\"per\") failed\n");
- return PTR_ERR(mxc_per_clk);
- }
-
- clk_prepare_enable(mxc_ipg_clk);
- clk_prepare_enable(mxc_ahb_clk);
- clk_prepare_enable(mxc_per_clk);
-
- /* make sure USB_CLK is running at 60 MHz +/- 1000 Hz */
- if (!strcmp(pdev->id_entry->name, "imx-udc-mx27")) {
- freq = clk_get_rate(mxc_per_clk);
- if (pdata->phy_mode != FSL_USB2_PHY_ULPI &&
- (freq < 59999000 || freq > 60001000)) {
- dev_err(&pdev->dev, "USB_CLK=%lu, should be 60MHz\n", freq);
- ret = -EINVAL;
- goto eclkrate;
- }
- }
-
- return 0;
-
-eclkrate:
- clk_disable_unprepare(mxc_ipg_clk);
- clk_disable_unprepare(mxc_ahb_clk);
- clk_disable_unprepare(mxc_per_clk);
- mxc_per_clk = NULL;
- return ret;
-}
-
-int fsl_udc_clk_finalize(struct platform_device *pdev)
-{
- struct fsl_usb2_platform_data *pdata = dev_get_platdata(&pdev->dev);
- int ret = 0;
-
- /* workaround ENGcm09152 for i.MX35 */
- if (pdata->workaround & FLS_USB2_WORKAROUND_ENGCM09152) {
- unsigned int v;
- struct resource *res = platform_get_resource
- (pdev, IORESOURCE_MEM, 0);
- void __iomem *phy_regs = ioremap(res->start +
- MX35_USBPHYCTRL_OFFSET, 512);
- if (!phy_regs) {
- dev_err(&pdev->dev, "ioremap for phy address fails\n");
- ret = -EINVAL;
- goto ioremap_err;
- }
-
- v = readl(phy_regs + USBPHYCTRL_OTGBASE_OFFSET);
- writel(v | USBPHYCTRL_EVDO,
- phy_regs + USBPHYCTRL_OTGBASE_OFFSET);
-
- iounmap(phy_regs);
- }
-
-
-ioremap_err:
- /* ULPI transceivers don't need usbpll */
- if (pdata->phy_mode == FSL_USB2_PHY_ULPI) {
- clk_disable_unprepare(mxc_per_clk);
- mxc_per_clk = NULL;
- }
-
- return ret;
-}
-
-void fsl_udc_clk_release(void)
-{
- if (mxc_per_clk)
- clk_disable_unprepare(mxc_per_clk);
- clk_disable_unprepare(mxc_ahb_clk);
- clk_disable_unprepare(mxc_ipg_clk);
-}
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index e358ae17d51e..1926b328b6aa 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -574,6 +574,7 @@ static int ehci_run (struct usb_hcd *hcd)
struct ehci_hcd *ehci = hcd_to_ehci (hcd);
u32 temp;
u32 hcc_params;
+ int rc;
hcd->uses_new_polling = 1;
@@ -629,9 +630,20 @@ static int ehci_run (struct usb_hcd *hcd)
down_write(&ehci_cf_port_reset_rwsem);
ehci->rh_state = EHCI_RH_RUNNING;
ehci_writel(ehci, FLAG_CF, &ehci->regs->configured_flag);
+
+ /* Wait until HC become operational */
ehci_readl(ehci, &ehci->regs->command); /* unblock posted writes */
msleep(5);
+ rc = ehci_handshake(ehci, &ehci->regs->status, STS_HALT, 0, 100 * 1000);
+
up_write(&ehci_cf_port_reset_rwsem);
+
+ if (rc) {
+ ehci_err(ehci, "USB %x.%x, controller refused to start: %d\n",
+ ((ehci->sbrn & 0xf0)>>4), (ehci->sbrn & 0x0f), rc);
+ return rc;
+ }
+
ehci->last_periodic_enable = ktime_get_real();
temp = HC_VERSION(ehci, ehci_readl(ehci, &ehci->caps->hc_capbase));
diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
index 087402aec5cb..9f9ab5ccea88 100644
--- a/drivers/usb/host/ehci-hub.c
+++ b/drivers/usb/host/ehci-hub.c
@@ -345,6 +345,9 @@ static int ehci_bus_suspend (struct usb_hcd *hcd)
unlink_empty_async_suspended(ehci);
+ /* Some Synopsys controllers mistakenly leave IAA turned on */
+ ehci_writel(ehci, STS_IAA, &ehci->regs->status);
+
/* Any IAA cycle that started before the suspend is now invalid */
end_iaa_cycle(ehci);
ehci_handle_start_intr_unlinks(ehci);
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 5677b81c0915..cf0c93a90200 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -2931,6 +2931,8 @@ static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
trb->field[0] = cpu_to_le32(field1);
trb->field[1] = cpu_to_le32(field2);
trb->field[2] = cpu_to_le32(field3);
+ /* make sure TRB is fully written before giving it to the controller */
+ wmb();
trb->field[3] = cpu_to_le32(field4);
trace_xhci_queue_trb(ring, trb);
diff --git a/drivers/usb/host/xhci-tegra.c b/drivers/usb/host/xhci-tegra.c
index 934be1686352..50bb91b6a4b8 100644
--- a/drivers/usb/host/xhci-tegra.c
+++ b/drivers/usb/host/xhci-tegra.c
@@ -623,6 +623,13 @@ static void tegra_xusb_mbox_handle(struct tegra_xusb *tegra,
enable);
if (err < 0)
break;
+
+ /*
+ * wait 500us for LFPS detector to be disabled before
+ * sending ACK
+ */
+ if (!enable)
+ usleep_range(500, 1000);
}
if (err < 0) {
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 91ab81c3fc79..e86940571b4c 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -4770,19 +4770,19 @@ static u16 xhci_calculate_u1_timeout(struct xhci_hcd *xhci,
{
unsigned long long timeout_ns;
+ if (xhci->quirks & XHCI_INTEL_HOST)
+ timeout_ns = xhci_calculate_intel_u1_timeout(udev, desc);
+ else
+ timeout_ns = udev->u1_params.sel;
+
/* Prevent U1 if service interval is shorter than U1 exit latency */
if (usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) {
- if (xhci_service_interval_to_ns(desc) <= udev->u1_params.mel) {
+ if (xhci_service_interval_to_ns(desc) <= timeout_ns) {
dev_dbg(&udev->dev, "Disable U1, ESIT shorter than exit latency\n");
return USB3_LPM_DISABLED;
}
}
- if (xhci->quirks & XHCI_INTEL_HOST)
- timeout_ns = xhci_calculate_intel_u1_timeout(udev, desc);
- else
- timeout_ns = udev->u1_params.sel;
-
/* The U1 timeout is encoded in 1us intervals.
* Don't return a timeout of zero, because that's USB3_LPM_DISABLED.
*/
@@ -4834,19 +4834,19 @@ static u16 xhci_calculate_u2_timeout(struct xhci_hcd *xhci,
{
unsigned long long timeout_ns;
+ if (xhci->quirks & XHCI_INTEL_HOST)
+ timeout_ns = xhci_calculate_intel_u2_timeout(udev, desc);
+ else
+ timeout_ns = udev->u2_params.sel;
+
/* Prevent U2 if service interval is shorter than U2 exit latency */
if (usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) {
- if (xhci_service_interval_to_ns(desc) <= udev->u2_params.mel) {
+ if (xhci_service_interval_to_ns(desc) <= timeout_ns) {
dev_dbg(&udev->dev, "Disable U2, ESIT shorter than exit latency\n");
return USB3_LPM_DISABLED;
}
}
- if (xhci->quirks & XHCI_INTEL_HOST)
- timeout_ns = xhci_calculate_intel_u2_timeout(udev, desc);
- else
- timeout_ns = udev->u2_params.sel;
-
/* The U2 timeout is encoded in 256us intervals */
timeout_ns = DIV_ROUND_UP_ULL(timeout_ns, 256 * 1000);
/* If the necessary timeout value is bigger than what we can set in the
diff --git a/drivers/usb/misc/yurex.c b/drivers/usb/misc/yurex.c
index 73ebfa6e9715..c640f98d20c5 100644
--- a/drivers/usb/misc/yurex.c
+++ b/drivers/usb/misc/yurex.c
@@ -496,6 +496,9 @@ static ssize_t yurex_write(struct file *file, const char __user *user_buffer,
timeout = schedule_timeout(YUREX_WRITE_TIMEOUT);
finish_wait(&dev->waitq, &wait);
+ /* make sure URB is idle after timeout or (spurious) CMD_ACK */
+ usb_kill_urb(dev->cntl_urb);
+
mutex_unlock(&dev->io_mutex);
if (retval < 0) {
diff --git a/drivers/usb/serial/iuu_phoenix.c b/drivers/usb/serial/iuu_phoenix.c
index f1201d4de297..e8f06b41a503 100644
--- a/drivers/usb/serial/iuu_phoenix.c
+++ b/drivers/usb/serial/iuu_phoenix.c
@@ -532,23 +532,29 @@ static int iuu_uart_flush(struct usb_serial_port *port)
struct device *dev = &port->dev;
int i;
int status;
- u8 rxcmd = IUU_UART_RX;
+ u8 *rxcmd;
struct iuu_private *priv = usb_get_serial_port_data(port);
if (iuu_led(port, 0xF000, 0, 0, 0xFF) < 0)
return -EIO;
+ rxcmd = kmalloc(1, GFP_KERNEL);
+ if (!rxcmd)
+ return -ENOMEM;
+
+ rxcmd[0] = IUU_UART_RX;
+
for (i = 0; i < 2; i++) {
- status = bulk_immediate(port, &rxcmd, 1);
+ status = bulk_immediate(port, rxcmd, 1);
if (status != IUU_OPERATION_OK) {
dev_dbg(dev, "%s - uart_flush_write error\n", __func__);
- return status;
+ goto out_free;
}
status = read_immediate(port, &priv->len, 1);
if (status != IUU_OPERATION_OK) {
dev_dbg(dev, "%s - uart_flush_read error\n", __func__);
- return status;
+ goto out_free;
}
if (priv->len > 0) {
@@ -556,12 +562,16 @@ static int iuu_uart_flush(struct usb_serial_port *port)
status = read_immediate(port, priv->buf, priv->len);
if (status != IUU_OPERATION_OK) {
dev_dbg(dev, "%s - uart_flush_read error\n", __func__);
- return status;
+ goto out_free;
}
}
}
dev_dbg(dev, "%s - uart_flush_read OK!\n", __func__);
iuu_led(port, 0, 0xF000, 0, 0xFF);
+
+out_free:
+ kfree(rxcmd);
+
return status;
}
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 2c21e34235bb..3fe959104311 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -1117,6 +1117,8 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM12, 0xff, 0xff, 0xff),
.driver_info = RSVD(1) | RSVD(2) | RSVD(3) | RSVD(4) | NUMEP2 },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM12, 0xff, 0, 0) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, 0x0620, 0xff, 0xff, 0x30) }, /* EM160R-GL */
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, 0x0620, 0xff, 0, 0) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0xff, 0x30) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0, 0) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0xff, 0x10),
@@ -2057,6 +2059,7 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0105, 0xff), /* Fibocom NL678 series */
.driver_info = RSVD(6) },
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a0, 0xff) }, /* Fibocom NL668-AM/NL652-EU (laptop MBIM) */
+ { USB_DEVICE_INTERFACE_CLASS(0x2df3, 0x9d03, 0xff) }, /* LongSung M5710 */
{ USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1404, 0xff) }, /* GosunCn GM500 RNDIS */
{ USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1405, 0xff) }, /* GosunCn GM500 MBIM */
{ USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1406, 0xff) }, /* GosunCn GM500 ECM/NCM */
diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h
index 870e9cf3d5dc..f9677a5ec31b 100644
--- a/drivers/usb/storage/unusual_uas.h
+++ b/drivers/usb/storage/unusual_uas.h
@@ -91,6 +91,13 @@ UNUSUAL_DEV(0x152d, 0x0578, 0x0000, 0x9999,
US_FL_BROKEN_FUA),
/* Reported-by: Thinh Nguyen <[email protected]> */
+UNUSUAL_DEV(0x154b, 0xf00b, 0x0000, 0x9999,
+ "PNY",
+ "Pro Elite SSD",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_NO_ATA_1X),
+
+/* Reported-by: Thinh Nguyen <[email protected]> */
UNUSUAL_DEV(0x154b, 0xf00d, 0x0000, 0x9999,
"PNY",
"Pro Elite SSD",
diff --git a/drivers/usb/typec/altmodes/Kconfig b/drivers/usb/typec/altmodes/Kconfig
index 187690fd1a5b..60d375e9c3c7 100644
--- a/drivers/usb/typec/altmodes/Kconfig
+++ b/drivers/usb/typec/altmodes/Kconfig
@@ -20,6 +20,6 @@ config TYPEC_NVIDIA_ALTMODE
to enable support for VirtualLink devices with NVIDIA GPUs.
To compile this driver as a module, choose M here: the
- module will be called typec_displayport.
+ module will be called typec_nvidia.
endmenu
diff --git a/drivers/usb/typec/class.c b/drivers/usb/typec/class.c
index ebfd3113a9a8..8f77669f9cf4 100644
--- a/drivers/usb/typec/class.c
+++ b/drivers/usb/typec/class.c
@@ -766,6 +766,7 @@ int typec_partner_set_num_altmodes(struct typec_partner *partner, int num_altmod
return ret;
sysfs_notify(&partner->dev.kobj, NULL, "number_of_alternate_modes");
+ kobject_uevent(&partner->dev.kobj, KOBJ_CHANGE);
return 0;
}
@@ -923,6 +924,7 @@ int typec_plug_set_num_altmodes(struct typec_plug *plug, int num_altmodes)
return ret;
sysfs_notify(&plug->dev.kobj, NULL, "number_of_alternate_modes");
+ kobject_uevent(&plug->dev.kobj, KOBJ_CHANGE);
return 0;
}
diff --git a/drivers/usb/typec/mux/intel_pmc_mux.c b/drivers/usb/typec/mux/intel_pmc_mux.c
index cf37a59ce130..46a25b8db72e 100644
--- a/drivers/usb/typec/mux/intel_pmc_mux.c
+++ b/drivers/usb/typec/mux/intel_pmc_mux.c
@@ -207,10 +207,21 @@ static int
pmc_usb_mux_dp_hpd(struct pmc_usb_port *port, struct typec_displayport_data *dp)
{
u8 msg[2] = { };
+ int ret;
msg[0] = PMC_USB_DP_HPD;
msg[0] |= port->usb3_port << PMC_USB_MSG_USB3_PORT_SHIFT;
+ /* Configure HPD first if HPD,IRQ comes together */
+ if (!IOM_PORT_HPD_ASSERTED(port->iom_status) &&
+ dp->status & DP_STATUS_IRQ_HPD &&
+ dp->status & DP_STATUS_HPD_STATE) {
+ msg[1] = PMC_USB_DP_HPD_LVL;
+ ret = pmc_usb_command(port, msg, sizeof(msg));
+ if (ret)
+ return ret;
+ }
+
if (dp->status & DP_STATUS_IRQ_HPD)
msg[1] = PMC_USB_DP_HPD_IRQ;
diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c
index 66cde5e5f796..3209b5ddd30c 100644
--- a/drivers/usb/usbip/vhci_hcd.c
+++ b/drivers/usb/usbip/vhci_hcd.c
@@ -396,6 +396,8 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
default:
usbip_dbg_vhci_rh(" ClearPortFeature: default %x\n",
wValue);
+ if (wValue >= 32)
+ goto error;
vhci_hcd->port_status[rhport] &= ~(1 << wValue);
break;
}
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 531a00d703cd..c8784dfafdd7 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -863,6 +863,7 @@ static void handle_tx_zerocopy(struct vhost_net *net, struct socket *sock)
size_t len, total_len = 0;
int err;
struct vhost_net_ubuf_ref *ubufs;
+ struct ubuf_info *ubuf;
bool zcopy_used;
int sent_pkts = 0;
@@ -895,9 +896,7 @@ static void handle_tx_zerocopy(struct vhost_net *net, struct socket *sock)
/* use msg_control to pass vhost zerocopy ubuf info to skb */
if (zcopy_used) {
- struct ubuf_info *ubuf;
ubuf = nvq->ubuf_info + nvq->upend_idx;
-
vq->heads[nvq->upend_idx].id = cpu_to_vhost32(vq, head);
vq->heads[nvq->upend_idx].len = VHOST_DMA_IN_PROGRESS;
ubuf->callback = vhost_zerocopy_callback;
@@ -927,7 +926,8 @@ static void handle_tx_zerocopy(struct vhost_net *net, struct socket *sock)
err = sock->ops->sendmsg(sock, &msg, len);
if (unlikely(err < 0)) {
if (zcopy_used) {
- vhost_net_ubuf_put(ubufs);
+ if (vq->heads[ubuf->desc].len == VHOST_DMA_IN_PROGRESS)
+ vhost_net_ubuf_put(ubufs);
nvq->upend_idx = ((unsigned)nvq->upend_idx - 1)
% UIO_MAXIOV;
}
diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
index a483cec31d5c..5e78fb719602 100644
--- a/drivers/vhost/vsock.c
+++ b/drivers/vhost/vsock.c
@@ -30,7 +30,12 @@
#define VHOST_VSOCK_PKT_WEIGHT 256
enum {
- VHOST_VSOCK_FEATURES = VHOST_FEATURES,
+ VHOST_VSOCK_FEATURES = VHOST_FEATURES |
+ (1ULL << VIRTIO_F_ACCESS_PLATFORM)
+};
+
+enum {
+ VHOST_VSOCK_BACKEND_FEATURES = (1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2)
};
/* Used to track all the vhost_vsock instances on the system. */
@@ -94,6 +99,9 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
if (!vhost_vq_get_backend(vq))
goto out;
+ if (!vq_meta_prefetch(vq))
+ goto out;
+
/* Avoid further vmexits, we're already processing the virtqueue */
vhost_disable_notify(&vsock->dev, vq);
@@ -449,6 +457,9 @@ static void vhost_vsock_handle_tx_kick(struct vhost_work *work)
if (!vhost_vq_get_backend(vq))
goto out;
+ if (!vq_meta_prefetch(vq))
+ goto out;
+
vhost_disable_notify(&vsock->dev, vq);
do {
u32 len;
@@ -766,8 +777,12 @@ static int vhost_vsock_set_features(struct vhost_vsock *vsock, u64 features)
mutex_lock(&vsock->dev.mutex);
if ((features & (1 << VHOST_F_LOG_ALL)) &&
!vhost_log_access_ok(&vsock->dev)) {
- mutex_unlock(&vsock->dev.mutex);
- return -EFAULT;
+ goto err;
+ }
+
+ if ((features & (1ULL << VIRTIO_F_ACCESS_PLATFORM))) {
+ if (vhost_init_device_iotlb(&vsock->dev, true))
+ goto err;
}
for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
@@ -778,6 +793,10 @@ static int vhost_vsock_set_features(struct vhost_vsock *vsock, u64 features)
}
mutex_unlock(&vsock->dev.mutex);
return 0;
+
+err:
+ mutex_unlock(&vsock->dev.mutex);
+ return -EFAULT;
}
static long vhost_vsock_dev_ioctl(struct file *f, unsigned int ioctl,
@@ -811,6 +830,18 @@ static long vhost_vsock_dev_ioctl(struct file *f, unsigned int ioctl,
if (copy_from_user(&features, argp, sizeof(features)))
return -EFAULT;
return vhost_vsock_set_features(vsock, features);
+ case VHOST_GET_BACKEND_FEATURES:
+ features = VHOST_VSOCK_BACKEND_FEATURES;
+ if (copy_to_user(argp, &features, sizeof(features)))
+ return -EFAULT;
+ return 0;
+ case VHOST_SET_BACKEND_FEATURES:
+ if (copy_from_user(&features, argp, sizeof(features)))
+ return -EFAULT;
+ if (features & ~VHOST_VSOCK_BACKEND_FEATURES)
+ return -EOPNOTSUPP;
+ vhost_set_backend_features(&vsock->dev, features);
+ return 0;
default:
mutex_lock(&vsock->dev.mutex);
r = vhost_dev_ioctl(&vsock->dev, ioctl, argp);
@@ -823,6 +854,34 @@ static long vhost_vsock_dev_ioctl(struct file *f, unsigned int ioctl,
}
}
+static ssize_t vhost_vsock_chr_read_iter(struct kiocb *iocb, struct iov_iter *to)
+{
+ struct file *file = iocb->ki_filp;
+ struct vhost_vsock *vsock = file->private_data;
+ struct vhost_dev *dev = &vsock->dev;
+ int noblock = file->f_flags & O_NONBLOCK;
+
+ return vhost_chr_read_iter(dev, to, noblock);
+}
+
+static ssize_t vhost_vsock_chr_write_iter(struct kiocb *iocb,
+ struct iov_iter *from)
+{
+ struct file *file = iocb->ki_filp;
+ struct vhost_vsock *vsock = file->private_data;
+ struct vhost_dev *dev = &vsock->dev;
+
+ return vhost_chr_write_iter(dev, from);
+}
+
+static __poll_t vhost_vsock_chr_poll(struct file *file, poll_table *wait)
+{
+ struct vhost_vsock *vsock = file->private_data;
+ struct vhost_dev *dev = &vsock->dev;
+
+ return vhost_chr_poll(file, dev, wait);
+}
+
static const struct file_operations vhost_vsock_fops = {
.owner = THIS_MODULE,
.open = vhost_vsock_dev_open,
@@ -830,6 +889,9 @@ static const struct file_operations vhost_vsock_fops = {
.llseek = noop_llseek,
.unlocked_ioctl = vhost_vsock_dev_ioctl,
.compat_ioctl = compat_ptr_ioctl,
+ .read_iter = vhost_vsock_chr_read_iter,
+ .write_iter = vhost_vsock_chr_write_iter,
+ .poll = vhost_vsock_chr_poll,
};
static struct miscdevice vhost_vsock_misc = {
diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig
index cfb7f5612ef0..4f02db65dede 100644
--- a/drivers/video/fbdev/Kconfig
+++ b/drivers/video/fbdev/Kconfig
@@ -1269,6 +1269,7 @@ config FB_ATY
select FB_CFB_IMAGEBLIT
select FB_BACKLIGHT if FB_ATY_BACKLIGHT
select FB_MACMODES if PPC
+ select FB_ATY_CT if SPARC64 && PCI
help
This driver supports graphics boards with the ATI Mach64 chips.
Say Y if you have such a graphics board.
@@ -1279,7 +1280,6 @@ config FB_ATY
config FB_ATY_CT
bool "Mach64 CT/VT/GT/LT (incl. 3D RAGE) support"
depends on PCI && FB_ATY
- default y if SPARC64 && PCI
help
Say Y here to support use of ATI's 64-bit Rage boards (or other
boards based on the Mach64 CT, VT, GT, and LT chipsets) as a
diff --git a/drivers/video/fbdev/aty/atyfb_base.c b/drivers/video/fbdev/aty/atyfb_base.c
index c8feff0ee8da..83c8e809955a 100644
--- a/drivers/video/fbdev/aty/atyfb_base.c
+++ b/drivers/video/fbdev/aty/atyfb_base.c
@@ -2353,6 +2353,9 @@ static int aty_init(struct fb_info *info)
int gtb_memsize, has_var = 0;
struct fb_var_screeninfo var;
int ret;
+#ifdef CONFIG_ATARI
+ u8 dac_type;
+#endif
init_waitqueue_head(&par->vblank.wait);
spin_lock_init(&par->int_lock);
@@ -2360,13 +2363,12 @@ static int aty_init(struct fb_info *info)
#ifdef CONFIG_FB_ATY_GX
if (!M64_HAS(INTEGRATED)) {
u32 stat0;
- u8 dac_type, dac_subtype, clk_type;
+ u8 dac_subtype, clk_type;
stat0 = aty_ld_le32(CNFG_STAT0, par);
par->bus_type = (stat0 >> 0) & 0x07;
par->ram_type = (stat0 >> 3) & 0x07;
ramname = aty_gx_ram[par->ram_type];
/* FIXME: clockchip/RAMDAC probing? */
- dac_type = (aty_ld_le32(DAC_CNTL, par) >> 16) & 0x07;
#ifdef CONFIG_ATARI
clk_type = CLK_ATI18818_1;
dac_type = (stat0 >> 9) & 0x07;
@@ -2375,7 +2377,6 @@ static int aty_init(struct fb_info *info)
else
dac_subtype = (aty_ld_8(SCRATCH_REG1 + 1, par) & 0xF0) | dac_type;
#else
- dac_type = DAC_IBMRGB514;
dac_subtype = DAC_IBMRGB514;
clk_type = CLK_IBMRGB514;
#endif
@@ -3062,7 +3063,6 @@ static int atyfb_setup_sparc(struct pci_dev *pdev, struct fb_info *info,
if (dp == of_console_device) {
struct fb_var_screeninfo *var = &default_var;
unsigned int N, P, Q, M, T, R;
- u32 v_total, h_total;
struct crtc crtc;
u8 pll_regs[16];
u8 clock_cntl;
@@ -3078,9 +3078,6 @@ static int atyfb_setup_sparc(struct pci_dev *pdev, struct fb_info *info,
crtc.gen_cntl = aty_ld_le32(CRTC_GEN_CNTL, par);
aty_crtc_to_var(&crtc, var);
- h_total = var->xres + var->right_margin + var->hsync_len + var->left_margin;
- v_total = var->yres + var->lower_margin + var->vsync_len + var->upper_margin;
-
/*
* Read the PLL to figure actual Refresh Rate.
*/
diff --git a/drivers/video/fbdev/aty/mach64_ct.c b/drivers/video/fbdev/aty/mach64_ct.c
index f87cc81f4fa2..011b07e44e0d 100644
--- a/drivers/video/fbdev/aty/mach64_ct.c
+++ b/drivers/video/fbdev/aty/mach64_ct.c
@@ -281,10 +281,13 @@ static u32 aty_pll_to_var_ct(const struct fb_info *info, const union aty_pll *pl
void aty_set_pll_ct(const struct fb_info *info, const union aty_pll *pll)
{
struct atyfb_par *par = (struct atyfb_par *) info->par;
- u32 crtc_gen_cntl, lcd_gen_cntrl;
+ u32 crtc_gen_cntl;
u8 tmp, tmp2;
- lcd_gen_cntrl = 0;
+#ifdef CONFIG_FB_ATY_GENERIC_LCD
+ u32 lcd_gen_cntrl = 0;
+#endif
+
#ifdef DEBUG
printk("atyfb(%s): about to program:\n"
"pll_ext_cntl=0x%02x pll_gen_cntl=0x%02x pll_vclk_cntl=0x%02x\n",
@@ -402,7 +405,7 @@ static int aty_init_pll_ct(const struct fb_info *info, union aty_pll *pll)
struct atyfb_par *par = (struct atyfb_par *) info->par;
u8 mpost_div, xpost_div, sclk_post_div_real;
u32 q, memcntl, trp;
- u32 dsp_config, dsp_on_off, vga_dsp_config, vga_dsp_on_off;
+ u32 dsp_config;
#ifdef DEBUG
int pllmclk, pllsclk;
#endif
@@ -488,9 +491,9 @@ static int aty_init_pll_ct(const struct fb_info *info, union aty_pll *pll)
/* Allow BIOS to override */
dsp_config = aty_ld_le32(DSP_CONFIG, par);
- dsp_on_off = aty_ld_le32(DSP_ON_OFF, par);
- vga_dsp_config = aty_ld_le32(VGA_DSP_CONFIG, par);
- vga_dsp_on_off = aty_ld_le32(VGA_DSP_ON_OFF, par);
+ aty_ld_le32(DSP_ON_OFF, par);
+ aty_ld_le32(VGA_DSP_CONFIG, par);
+ aty_ld_le32(VGA_DSP_ON_OFF, par);
if (dsp_config)
pll->ct.dsp_loop_latency = (dsp_config & DSP_LOOP_LATENCY) >> 16;
diff --git a/drivers/video/fbdev/aty/radeon_monitor.c b/drivers/video/fbdev/aty/radeon_monitor.c
index 9966c58aa26c..df55e23b7a5a 100644
--- a/drivers/video/fbdev/aty/radeon_monitor.c
+++ b/drivers/video/fbdev/aty/radeon_monitor.c
@@ -488,12 +488,10 @@ void radeon_probe_screens(struct radeonfb_info *rinfo,
#if defined(DEBUG) && defined(CONFIG_FB_RADEON_I2C)
{
u8 *EDIDs[4] = { NULL, NULL, NULL, NULL };
- int mon_types[4] = {MT_NONE, MT_NONE, MT_NONE, MT_NONE};
int i;
for (i = 0; i < 4; i++)
- mon_types[i] = radeon_probe_i2c_connector(rinfo,
- i+1, &EDIDs[i]);
+ radeon_probe_i2c_connector(rinfo, i + 1, &EDIDs[i]);
}
#endif /* DEBUG */
/*
diff --git a/drivers/video/fbdev/bw2.c b/drivers/video/fbdev/bw2.c
index 0d9a6bb57a09..e7702fe1fe7d 100644
--- a/drivers/video/fbdev/bw2.c
+++ b/drivers/video/fbdev/bw2.c
@@ -116,7 +116,7 @@ struct bw2_par {
/**
* bw2_blank - Optional function. Blanks the display.
- * @blank_mode: the blank mode we want.
+ * @blank: the blank mode we want.
* @info: frame buffer structure that represents a single frame buffer
*/
static int
diff --git a/drivers/video/fbdev/cg3.c b/drivers/video/fbdev/cg3.c
index 77f6470ce665..bdcc3f6ab666 100644
--- a/drivers/video/fbdev/cg3.c
+++ b/drivers/video/fbdev/cg3.c
@@ -179,7 +179,7 @@ static int cg3_setcolreg(unsigned regno,
/**
* cg3_blank - Optional function. Blanks the display.
- * @blank_mode: the blank mode we want.
+ * @blank: the blank mode we want.
* @info: frame buffer structure that represents a single frame buffer
*/
static int cg3_blank(int blank, struct fb_info *info)
diff --git a/drivers/video/fbdev/cg6.c b/drivers/video/fbdev/cg6.c
index a1c68cd48d7e..97ef43c25974 100644
--- a/drivers/video/fbdev/cg6.c
+++ b/drivers/video/fbdev/cg6.c
@@ -511,7 +511,7 @@ static int cg6_setcolreg(unsigned regno,
/**
* cg6_blank - Blanks the display.
*
- * @blank_mode: the blank mode we want.
+ * @blank: the blank mode we want.
* @info: frame buffer structure that represents a single frame buffer
*/
static int cg6_blank(int blank, struct fb_info *info)
diff --git a/drivers/video/fbdev/cirrusfb.c b/drivers/video/fbdev/cirrusfb.c
index e9027172c0f5..93802abbbc72 100644
--- a/drivers/video/fbdev/cirrusfb.c
+++ b/drivers/video/fbdev/cirrusfb.c
@@ -2463,8 +2463,6 @@ static void AttrOn(const struct cirrusfb_info *cinfo)
*/
static void WHDR(const struct cirrusfb_info *cinfo, unsigned char val)
{
- unsigned char dummy;
-
if (is_laguna(cinfo))
return;
if (cinfo->btype == BT_PICASSO) {
@@ -2473,18 +2471,18 @@ static void WHDR(const struct cirrusfb_info *cinfo, unsigned char val)
WGen(cinfo, VGA_PEL_MSK, 0x00);
udelay(200);
/* next read dummy from pixel address (3c8) */
- dummy = RGen(cinfo, VGA_PEL_IW);
+ RGen(cinfo, VGA_PEL_IW);
udelay(200);
}
/* now do the usual stuff to access the HDR */
- dummy = RGen(cinfo, VGA_PEL_MSK);
+ RGen(cinfo, VGA_PEL_MSK);
udelay(200);
- dummy = RGen(cinfo, VGA_PEL_MSK);
+ RGen(cinfo, VGA_PEL_MSK);
udelay(200);
- dummy = RGen(cinfo, VGA_PEL_MSK);
+ RGen(cinfo, VGA_PEL_MSK);
udelay(200);
- dummy = RGen(cinfo, VGA_PEL_MSK);
+ RGen(cinfo, VGA_PEL_MSK);
udelay(200);
WGen(cinfo, VGA_PEL_MSK, val);
@@ -2492,7 +2490,7 @@ static void WHDR(const struct cirrusfb_info *cinfo, unsigned char val)
if (cinfo->btype == BT_PICASSO) {
/* now first reset HDR access counter */
- dummy = RGen(cinfo, VGA_PEL_IW);
+ RGen(cinfo, VGA_PEL_IW);
udelay(200);
/* and at the end, restore the mask value */
@@ -2800,9 +2798,9 @@ static void bestclock(long freq, int *nom, int *den, int *div)
#ifdef CIRRUSFB_DEBUG
-/**
+/*
* cirrusfb_dbg_print_regs
- * @base: If using newmmio, the newmmio base address, otherwise %NULL
+ * @regbase: If using newmmio, the newmmio base address, otherwise %NULL
* @reg_class: type of registers to read: %CRT, or %SEQ
*
* DESCRIPTION:
@@ -2847,7 +2845,7 @@ static void cirrusfb_dbg_print_regs(struct fb_info *info,
va_end(list);
}
-/**
+/*
* cirrusfb_dbg_reg_dump
* @base: If using newmmio, the newmmio base address, otherwise %NULL
*
diff --git a/drivers/video/fbdev/controlfb.c b/drivers/video/fbdev/controlfb.c
index 2df56bd303d2..509311471d51 100644
--- a/drivers/video/fbdev/controlfb.c
+++ b/drivers/video/fbdev/controlfb.c
@@ -64,9 +64,9 @@
#undef in_le32
#undef out_le32
#define in_8(addr) 0
-#define out_8(addr, val)
+#define out_8(addr, val) (void)(val)
#define in_le32(addr) 0
-#define out_le32(addr, val)
+#define out_le32(addr, val) (void)(val)
#define pgprot_cached_wthru(prot) (prot)
#else
static void invalid_vram_cache(void __force *addr)
diff --git a/drivers/video/fbdev/core/fb_notify.c b/drivers/video/fbdev/core/fb_notify.c
index 74c2da528884..10e3b9a74adc 100644
--- a/drivers/video/fbdev/core/fb_notify.c
+++ b/drivers/video/fbdev/core/fb_notify.c
@@ -19,6 +19,8 @@ static BLOCKING_NOTIFIER_HEAD(fb_notifier_list);
/**
* fb_register_client - register a client notifier
* @nb: notifier block to callback on events
+ *
+ * Return: 0 on success, negative error code on failure.
*/
int fb_register_client(struct notifier_block *nb)
{
@@ -29,6 +31,8 @@ EXPORT_SYMBOL(fb_register_client);
/**
* fb_unregister_client - unregister a client notifier
* @nb: notifier block to callback on events
+ *
+ * Return: 0 on success, negative error code on failure.
*/
int fb_unregister_client(struct notifier_block *nb)
{
@@ -38,7 +42,10 @@ EXPORT_SYMBOL(fb_unregister_client);
/**
* fb_notifier_call_chain - notify clients of fb_events
+ * @val: value passed to callback
+ * @v: pointer passed to callback
*
+ * Return: The return value of the last notifier function
*/
int fb_notifier_call_chain(unsigned long val, void *v)
{
diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c
index bf61598bf1c3..44a5cd2f54cc 100644
--- a/drivers/video/fbdev/core/fbcon.c
+++ b/drivers/video/fbdev/core/fbcon.c
@@ -56,8 +56,6 @@
* more details.
*/
-#undef FBCONDEBUG
-
#include <linux/module.h>
#include <linux/types.h>
#include <linux/fs.h>
@@ -82,12 +80,6 @@
#include "fbcon.h"
-#ifdef FBCONDEBUG
-# define DPRINTK(fmt, args...) printk(KERN_DEBUG "%s: " fmt, __func__ , ## args)
-#else
-# define DPRINTK(fmt, args...)
-#endif
-
/*
* FIXME: Locking
*
@@ -1015,11 +1007,11 @@ static const char *fbcon_startup(void)
rows /= vc->vc_font.height;
vc_resize(vc, cols, rows);
- DPRINTK("mode: %s\n", info->fix.id);
- DPRINTK("visual: %d\n", info->fix.visual);
- DPRINTK("res: %dx%d-%d\n", info->var.xres,
- info->var.yres,
- info->var.bits_per_pixel);
+ pr_debug("mode: %s\n", info->fix.id);
+ pr_debug("visual: %d\n", info->fix.visual);
+ pr_debug("res: %dx%d-%d\n", info->var.xres,
+ info->var.yres,
+ info->var.bits_per_pixel);
fbcon_add_cursor_timer(info);
return display_desc;
@@ -2013,7 +2005,7 @@ static int fbcon_resize(struct vc_data *vc, unsigned int width,
y_diff < 0 || y_diff > virt_fh) {
const struct fb_videomode *mode;
- DPRINTK("attempting resize %ix%i\n", var.xres, var.yres);
+ pr_debug("attempting resize %ix%i\n", var.xres, var.yres);
mode = fb_find_best_mode(&var, &info->modelist);
if (mode == NULL)
return -EINVAL;
@@ -2023,7 +2015,7 @@ static int fbcon_resize(struct vc_data *vc, unsigned int width,
if (virt_w > var.xres/virt_fw || virt_h > var.yres/virt_fh)
return -EINVAL;
- DPRINTK("resize now %ix%i\n", var.xres, var.yres);
+ pr_debug("resize now %ix%i\n", var.xres, var.yres);
if (con_is_visible(vc)) {
var.activate = FB_ACTIVATE_NOW |
FB_ACTIVATE_FORCE;
@@ -3299,8 +3291,7 @@ static void fbcon_exit(void)
if (info->queue.func)
pending = cancel_work_sync(&info->queue);
- DPRINTK("fbcon: %s pending work\n", (pending ? "canceled" :
- "no"));
+ pr_debug("fbcon: %s pending work\n", (pending ? "canceled" : "no"));
for (j = first_fb_vc; j <= last_fb_vc; j++) {
if (con2fb_map[j] == i) {
diff --git a/drivers/video/fbdev/core/fbmon.c b/drivers/video/fbdev/core/fbmon.c
index 1bf82dbc9e3c..b0e690f41025 100644
--- a/drivers/video/fbdev/core/fbmon.c
+++ b/drivers/video/fbdev/core/fbmon.c
@@ -605,6 +605,7 @@ static void get_detailed_timing(unsigned char *block,
* fb_create_modedb - create video mode database
* @edid: EDID data
* @dbsize: database size
+ * @specs: monitor specifications, may be NULL
*
* RETURNS: struct fb_videomode, @dbsize contains length of database
*
@@ -1100,7 +1101,6 @@ static u32 fb_get_hblank_by_hfreq(u32 hfreq, u32 xres)
* 2 * M
* M = 300;
* C = 30;
-
*/
static u32 fb_get_hblank_by_dclk(u32 dclk, u32 xres)
{
diff --git a/drivers/video/fbdev/efifb.c b/drivers/video/fbdev/efifb.c
index e57c00824965..b80ba3d2a9b8 100644
--- a/drivers/video/fbdev/efifb.c
+++ b/drivers/video/fbdev/efifb.c
@@ -139,7 +139,7 @@ static bool efifb_bgrt_sanity_check(struct screen_info *si, u32 bmp_width)
static void efifb_show_boot_graphics(struct fb_info *info)
{
- u32 bmp_width, bmp_height, bmp_pitch, screen_pitch, dst_x, y, src_y;
+ u32 bmp_width, bmp_height, bmp_pitch, dst_x, y, src_y;
struct screen_info *si = &screen_info;
struct bmp_file_header *file_header;
struct bmp_dib_header *dib_header;
@@ -193,7 +193,6 @@ static void efifb_show_boot_graphics(struct fb_info *info)
bmp_width = dib_header->width;
bmp_height = abs(dib_header->height);
bmp_pitch = round_up(3 * bmp_width, 4);
- screen_pitch = si->lfb_linelength;
if ((file_header->bitmap_offset + bmp_pitch * bmp_height) >
bgrt_image_size)
diff --git a/drivers/video/fbdev/ffb.c b/drivers/video/fbdev/ffb.c
index 948b73184433..b3d580e57221 100644
--- a/drivers/video/fbdev/ffb.c
+++ b/drivers/video/fbdev/ffb.c
@@ -667,7 +667,7 @@ static int ffb_setcolreg(unsigned regno,
/**
* ffb_blank - Optional function. Blanks the display.
- * @blank_mode: the blank mode we want.
+ * @blank: the blank mode we want.
* @info: frame buffer structure that represents a single frame buffer
*/
static int ffb_blank(int blank, struct fb_info *info)
diff --git a/drivers/video/fbdev/gbefb.c b/drivers/video/fbdev/gbefb.c
index 31270a8986e8..c5b99a4861e8 100644
--- a/drivers/video/fbdev/gbefb.c
+++ b/drivers/video/fbdev/gbefb.c
@@ -198,7 +198,7 @@ static void gbe_reset(void)
static void gbe_turn_off(void)
{
int i;
- unsigned int val, x, y, vpixen_off;
+ unsigned int val, y, vpixen_off;
gbe_turned_on = 0;
@@ -249,7 +249,6 @@ static void gbe_turn_off(void)
for (i = 0; i < 100000; i++) {
val = gbe->vt_xy;
- x = GET_GBE_FIELD(VT_XY, X, val);
y = GET_GBE_FIELD(VT_XY, Y, val);
if (y < vpixen_off)
break;
@@ -260,7 +259,6 @@ static void gbe_turn_off(void)
"gbefb: wait for vpixen_off timed out\n");
for (i = 0; i < 10000; i++) {
val = gbe->vt_xy;
- x = GET_GBE_FIELD(VT_XY, X, val);
y = GET_GBE_FIELD(VT_XY, Y, val);
if (y > vpixen_off)
break;
diff --git a/drivers/video/fbdev/goldfishfb.c b/drivers/video/fbdev/goldfishfb.c
index 9c83ec3f8e1f..2b885cd046fe 100644
--- a/drivers/video/fbdev/goldfishfb.c
+++ b/drivers/video/fbdev/goldfishfb.c
@@ -305,11 +305,13 @@ static const struct of_device_id goldfish_fb_of_match[] = {
};
MODULE_DEVICE_TABLE(of, goldfish_fb_of_match);
+#ifdef CONFIG_ACPI
static const struct acpi_device_id goldfish_fb_acpi_match[] = {
{ "GFSH0004", 0 },
{ },
};
MODULE_DEVICE_TABLE(acpi, goldfish_fb_acpi_match);
+#endif
static struct platform_driver goldfish_fb_driver = {
.probe = goldfish_fb_probe,
diff --git a/drivers/video/fbdev/hgafb.c b/drivers/video/fbdev/hgafb.c
index a45fcff1461f..8bbac7182ad3 100644
--- a/drivers/video/fbdev/hgafb.c
+++ b/drivers/video/fbdev/hgafb.c
@@ -357,8 +357,8 @@ error:
/**
* hgafb_open - open the framebuffer device
- * @info:pointer to fb_info object containing info for current hga board
- * @int:open by console system or userland.
+ * @info: pointer to fb_info object containing info for current hga board
+ * @init: open by console system or userland.
*/
static int hgafb_open(struct fb_info *info, int init)
@@ -370,9 +370,9 @@ static int hgafb_open(struct fb_info *info, int init)
}
/**
- * hgafb_open - open the framebuffer device
- * @info:pointer to fb_info object containing info for current hga board
- * @int:open by console system or userland.
+ * hgafb_release - open the framebuffer device
+ * @info: pointer to fb_info object containing info for current hga board
+ * @init: open by console system or userland.
*/
static int hgafb_release(struct fb_info *info, int init)
diff --git a/drivers/video/fbdev/leo.c b/drivers/video/fbdev/leo.c
index 40b11cce0ad6..3eb0f3583f4f 100644
--- a/drivers/video/fbdev/leo.c
+++ b/drivers/video/fbdev/leo.c
@@ -308,7 +308,7 @@ static int leo_setcolreg(unsigned regno,
/**
* leo_blank - Optional function. Blanks the display.
- * @blank_mode: the blank mode we want.
+ * @blank: the blank mode we want.
* @info: frame buffer structure that represents a single frame buffer
*/
static int leo_blank(int blank, struct fb_info *info)
diff --git a/drivers/video/fbdev/mmp/hw/mmp_spi.c b/drivers/video/fbdev/mmp/hw/mmp_spi.c
index 1911a47769b6..16401eb95c6c 100644
--- a/drivers/video/fbdev/mmp/hw/mmp_spi.c
+++ b/drivers/video/fbdev/mmp/hw/mmp_spi.c
@@ -17,8 +17,8 @@
/**
* spi_write - write command to the SPI port
+ * @spi: the SPI device.
* @data: can be 8/16/32-bit, MSB justified data to write.
- * @len: data length.
*
* Wait bus transfer complete IRQ.
* The caller is expected to perform the necessary locking.
diff --git a/drivers/video/fbdev/mx3fb.c b/drivers/video/fbdev/mx3fb.c
index 894617ddabcb..fabb271337ed 100644
--- a/drivers/video/fbdev/mx3fb.c
+++ b/drivers/video/fbdev/mx3fb.c
@@ -445,7 +445,6 @@ static void sdc_enable_channel(struct mx3fb_info *mx3_fbi)
static void sdc_disable_channel(struct mx3fb_info *mx3_fbi)
{
struct mx3fb_data *mx3fb = mx3_fbi->mx3fb;
- uint32_t enabled;
unsigned long flags;
if (mx3_fbi->txd == NULL)
@@ -453,7 +452,7 @@ static void sdc_disable_channel(struct mx3fb_info *mx3_fbi)
spin_lock_irqsave(&mx3fb->lock, flags);
- enabled = sdc_fb_uninit(mx3_fbi);
+ sdc_fb_uninit(mx3_fbi);
spin_unlock_irqrestore(&mx3fb->lock, flags);
@@ -732,7 +731,7 @@ static int mx3fb_unmap_video_memory(struct fb_info *fbi);
/**
* mx3fb_set_fix() - set fixed framebuffer parameters from variable settings.
- * @info: framebuffer information pointer
+ * @fbi: framebuffer information pointer
* @return: 0 on success or negative error code on failure.
*/
static int mx3fb_set_fix(struct fb_info *fbi)
@@ -740,7 +739,7 @@ static int mx3fb_set_fix(struct fb_info *fbi)
struct fb_fix_screeninfo *fix = &fbi->fix;
struct fb_var_screeninfo *var = &fbi->var;
- strncpy(fix->id, "DISP3 BG", 8);
+ memcpy(fix->id, "DISP3 BG", 8);
fix->line_length = var->xres_virtual * var->bits_per_pixel / 8;
@@ -1105,6 +1104,8 @@ static void __blank(int blank, struct fb_info *fbi)
/**
* mx3fb_blank() - blank the display.
+ * @blank: blank value for the panel
+ * @fbi: framebuffer information pointer
*/
static int mx3fb_blank(int blank, struct fb_info *fbi)
{
@@ -1126,7 +1127,7 @@ static int mx3fb_blank(int blank, struct fb_info *fbi)
/**
* mx3fb_pan_display() - pan or wrap the display
* @var: variable screen buffer information.
- * @info: framebuffer information pointer.
+ * @fbi: framebuffer information pointer.
*
* We look only at xoffset, yoffset and the FB_VMODE_YWRAP flag
*/
@@ -1387,6 +1388,8 @@ static int mx3fb_unmap_video_memory(struct fb_info *fbi)
/**
* mx3fb_init_fbinfo() - initialize framebuffer information object.
+ * @dev: the device
+ * @ops: framebuffer device operations
* @return: initialized framebuffer structure.
*/
static struct fb_info *mx3fb_init_fbinfo(struct device *dev,
diff --git a/drivers/video/fbdev/neofb.c b/drivers/video/fbdev/neofb.c
index 09a20d4ab35f..c0f4f402da3f 100644
--- a/drivers/video/fbdev/neofb.c
+++ b/drivers/video/fbdev/neofb.c
@@ -1843,7 +1843,6 @@ static int neo_init_hw(struct fb_info *info)
struct neofb_par *par = info->par;
int videoRam = 896;
int maxClock = 65000;
- int CursorMem = 1024;
int CursorOff = 0x100;
DBG("neo_init_hw");
@@ -1895,19 +1894,16 @@ static int neo_init_hw(struct fb_info *info)
case FB_ACCEL_NEOMAGIC_NM2070:
case FB_ACCEL_NEOMAGIC_NM2090:
case FB_ACCEL_NEOMAGIC_NM2093:
- CursorMem = 2048;
CursorOff = 0x100;
break;
case FB_ACCEL_NEOMAGIC_NM2097:
case FB_ACCEL_NEOMAGIC_NM2160:
- CursorMem = 1024;
CursorOff = 0x100;
break;
case FB_ACCEL_NEOMAGIC_NM2200:
case FB_ACCEL_NEOMAGIC_NM2230:
case FB_ACCEL_NEOMAGIC_NM2360:
case FB_ACCEL_NEOMAGIC_NM2380:
- CursorMem = 1024;
CursorOff = 0x1000;
par->neo2200 = (Neo2200 __iomem *) par->mmio_vbase;
diff --git a/drivers/video/fbdev/nvidia/nv_setup.c b/drivers/video/fbdev/nvidia/nv_setup.c
index 2fa68669613a..5404017e6957 100644
--- a/drivers/video/fbdev/nvidia/nv_setup.c
+++ b/drivers/video/fbdev/nvidia/nv_setup.c
@@ -89,9 +89,8 @@ u8 NVReadSeq(struct nvidia_par *par, u8 index)
}
void NVWriteAttr(struct nvidia_par *par, u8 index, u8 value)
{
- volatile u8 tmp;
- tmp = VGA_RD08(par->PCIO, par->IOBase + 0x0a);
+ VGA_RD08(par->PCIO, par->IOBase + 0x0a);
if (par->paletteEnabled)
index &= ~0x20;
else
@@ -101,9 +100,7 @@ void NVWriteAttr(struct nvidia_par *par, u8 index, u8 value)
}
u8 NVReadAttr(struct nvidia_par *par, u8 index)
{
- volatile u8 tmp;
-
- tmp = VGA_RD08(par->PCIO, par->IOBase + 0x0a);
+ VGA_RD08(par->PCIO, par->IOBase + 0x0a);
if (par->paletteEnabled)
index &= ~0x20;
else
diff --git a/drivers/video/fbdev/omap2/omapfb/displays/Kconfig b/drivers/video/fbdev/omap2/omapfb/displays/Kconfig
index 744416dc530e..3ca1bd7bb92f 100644
--- a/drivers/video/fbdev/omap2/omapfb/displays/Kconfig
+++ b/drivers/video/fbdev/omap2/omapfb/displays/Kconfig
@@ -43,6 +43,7 @@ config FB_OMAP2_PANEL_DPI
config FB_OMAP2_PANEL_DSI_CM
tristate "Generic DSI Command Mode Panel"
depends on BACKLIGHT_CLASS_DEVICE
+ depends on DRM_PANEL_DSI_CM = n
help
Driver for generic DSI command mode panels.
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dispc-compat.c b/drivers/video/fbdev/omap2/omapfb/dss/dispc-compat.c
index 3417618310ff..cc2ad787d493 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/dispc-compat.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/dispc-compat.c
@@ -75,7 +75,7 @@ static void dispc_dump_irqs(struct seq_file *s)
seq_printf(s, "irqs %d\n", stats.irq_count);
#define PIS(x) \
- seq_printf(s, "%-20s %10d\n", #x, stats.irqs[ffs(DISPC_IRQ_##x)-1]);
+ seq_printf(s, "%-20s %10d\n", #x, stats.irqs[ffs(DISPC_IRQ_##x)-1])
PIS(FRAMEDONE);
PIS(VSYNC);
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dsi.c b/drivers/video/fbdev/omap2/omapfb/dss/dsi.c
index 6f9c25fec994..58c7aa279ab1 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/dsi.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/dsi.c
@@ -1178,13 +1178,12 @@ static int dsi_regulator_init(struct platform_device *dsidev)
static void _dsi_print_reset_status(struct platform_device *dsidev)
{
- u32 l;
int b0, b1, b2;
/* A dummy read using the SCP interface to any DSIPHY register is
* required after DSIPHY reset to complete the reset of the DSI complex
* I/O. */
- l = dsi_read_reg(dsidev, DSI_DSIPHY_CFG5);
+ dsi_read_reg(dsidev, DSI_DSIPHY_CFG5);
if (dss_has_feature(FEAT_DSI_REVERSE_TXCLKESC)) {
b0 = 28;
@@ -1554,7 +1553,7 @@ static void dsi_dump_dsidev_irqs(struct platform_device *dsidev,
seq_printf(s, "irqs %d\n", stats.irq_count);
#define PIS(x) \
- seq_printf(s, "%-20s %10d\n", #x, stats.dsi_irqs[ffs(DSI_IRQ_##x)-1]);
+ seq_printf(s, "%-20s %10d\n", #x, stats.dsi_irqs[ffs(DSI_IRQ_##x)-1])
seq_printf(s, "-- DSI%d interrupts --\n", dsi->module_id + 1);
PIS(VC0);
@@ -3627,7 +3626,7 @@ static int dsi_proto_config(struct platform_device *dsidev)
static void dsi_proto_timings(struct platform_device *dsidev)
{
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
- unsigned tlpx, tclk_zero, tclk_prepare, tclk_trail;
+ unsigned tlpx, tclk_zero, tclk_prepare;
unsigned tclk_pre, tclk_post;
unsigned ths_prepare, ths_prepare_ths_zero, ths_zero;
unsigned ths_trail, ths_exit;
@@ -3646,7 +3645,6 @@ static void dsi_proto_timings(struct platform_device *dsidev)
r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG1);
tlpx = FLD_GET(r, 20, 16) * 2;
- tclk_trail = FLD_GET(r, 15, 8);
tclk_zero = FLD_GET(r, 7, 0);
r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG2);
@@ -4040,7 +4038,6 @@ static int dsi_update(struct omap_dss_device *dssdev, int channel,
{
struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
- u16 dw, dh;
dsi_perf_mark_setup(dsidev);
@@ -4049,11 +4046,8 @@ static int dsi_update(struct omap_dss_device *dssdev, int channel,
dsi->framedone_callback = callback;
dsi->framedone_data = data;
- dw = dsi->timings.x_res;
- dh = dsi->timings.y_res;
-
#ifdef DSI_PERF_MEASURE
- dsi->update_bytes = dw * dh *
+ dsi->update_bytes = dsi->timings.x_res * dsi->timings.y_res *
dsi_get_pixel_size(dsi->pix_fmt) / 8;
#endif
dsi_update_screen_dispc(dsidev);
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/hdmi4_core.c b/drivers/video/fbdev/omap2/omapfb/dss/hdmi4_core.c
index 726c190862d4..e6363a420933 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/hdmi4_core.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/hdmi4_core.c
@@ -679,7 +679,7 @@ int hdmi4_audio_config(struct hdmi_core_data *core, struct hdmi_wp_data *wp,
struct hdmi_audio_format audio_format;
struct hdmi_audio_dma audio_dma;
struct hdmi_core_audio_config acore;
- int err, n, cts, channel_count;
+ int n, cts, channel_count;
unsigned int fs_nr;
bool word_length_16b = false;
@@ -741,7 +741,7 @@ int hdmi4_audio_config(struct hdmi_core_data *core, struct hdmi_wp_data *wp,
return -EINVAL;
}
- err = hdmi_compute_acr(pclk, fs_nr, &n, &cts);
+ hdmi_compute_acr(pclk, fs_nr, &n, &cts);
/* Audio clock regeneration settings */
acore.n = n;
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/hdmi5_core.c b/drivers/video/fbdev/omap2/omapfb/dss/hdmi5_core.c
index eda29d3032e1..cb63bc0e92ca 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/hdmi5_core.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/hdmi5_core.c
@@ -790,7 +790,7 @@ int hdmi5_audio_config(struct hdmi_core_data *core, struct hdmi_wp_data *wp,
struct hdmi_audio_format audio_format;
struct hdmi_audio_dma audio_dma;
struct hdmi_core_audio_config core_cfg;
- int err, n, cts, channel_count;
+ int n, cts, channel_count;
unsigned int fs_nr;
bool word_length_16b = false;
@@ -833,7 +833,7 @@ int hdmi5_audio_config(struct hdmi_core_data *core, struct hdmi_wp_data *wp,
return -EINVAL;
}
- err = hdmi_compute_acr(pclk, fs_nr, &n, &cts);
+ hdmi_compute_acr(pclk, fs_nr, &n, &cts);
core_cfg.n = n;
core_cfg.cts = cts;
diff --git a/drivers/video/fbdev/p9100.c b/drivers/video/fbdev/p9100.c
index 6da672e92643..4e88a0a195ad 100644
--- a/drivers/video/fbdev/p9100.c
+++ b/drivers/video/fbdev/p9100.c
@@ -175,7 +175,7 @@ static int p9100_setcolreg(unsigned regno,
/**
* p9100_blank - Optional function. Blanks the display.
- * @blank_mode: the blank mode we want.
+ * @blank: the blank mode we want.
* @info: frame buffer structure that represents a single frame buffer
*/
static int
diff --git a/drivers/video/fbdev/pm2fb.c b/drivers/video/fbdev/pm2fb.c
index 27893fa139b0..c68725eebee3 100644
--- a/drivers/video/fbdev/pm2fb.c
+++ b/drivers/video/fbdev/pm2fb.c
@@ -1508,8 +1508,8 @@ static const struct fb_ops pm2fb_ops = {
*
* Initialise and allocate resource for PCI device.
*
- * @param pdev PCI device.
- * @param id PCI device ID.
+ * @pdev: PCI device.
+ * @id: PCI device ID.
*/
static int pm2fb_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
@@ -1715,7 +1715,7 @@ static int pm2fb_probe(struct pci_dev *pdev, const struct pci_device_id *id)
*
* Release all device resources.
*
- * @param pdev PCI device to clean up.
+ * @pdev: PCI device to clean up.
*/
static void pm2fb_remove(struct pci_dev *pdev)
{
@@ -1756,7 +1756,7 @@ MODULE_DEVICE_TABLE(pci, pm2fb_id_table);
#ifndef MODULE
-/**
+/*
* Parse user specified options.
*
* This is, comma-separated options following `video=pm2fb:'.
diff --git a/drivers/video/fbdev/riva/fbdev.c b/drivers/video/fbdev/riva/fbdev.c
index ce55b9d2e862..55554b0433cb 100644
--- a/drivers/video/fbdev/riva/fbdev.c
+++ b/drivers/video/fbdev/riva/fbdev.c
@@ -464,7 +464,7 @@ static inline void reverse_order(u32 *l)
/**
* rivafb_load_cursor_image - load cursor image to hardware
- * @data: address to monochrome bitmap (1 = foreground color, 0 = background)
+ * @data8: address to monochrome bitmap (1 = foreground color, 0 = background)
* @par: pointer to private data
* @w: width of cursor image in pixels
* @h: height of cursor image in scanlines
@@ -843,9 +843,9 @@ static void riva_update_var(struct fb_var_screeninfo *var,
/**
* rivafb_do_maximize -
* @info: pointer to fb_info object containing info for current riva board
- * @var:
- * @nom:
- * @den:
+ * @var: standard kernel fb changeable data
+ * @nom: nom
+ * @den: den
*
* DESCRIPTION:
* .
@@ -1214,7 +1214,6 @@ out:
/**
* rivafb_pan_display
* @var: standard kernel fb changeable data
- * @con: TODO
* @info: pointer to fb_info object containing info for current riva board
*
* DESCRIPTION:
diff --git a/drivers/video/fbdev/riva/riva_hw.c b/drivers/video/fbdev/riva/riva_hw.c
index bcf9c4b4de31..8b829b720064 100644
--- a/drivers/video/fbdev/riva/riva_hw.c
+++ b/drivers/video/fbdev/riva/riva_hw.c
@@ -836,17 +836,17 @@ static void nv10CalcArbitration
nv10_sim_state *arb
)
{
- int data, pagemiss, cas,width, video_enable, bpp;
- int nvclks, mclks, pclks, vpagemiss, crtpagemiss, vbs;
- int nvclk_fill, us_extra;
+ int data, pagemiss, width, video_enable, bpp;
+ int nvclks, mclks, pclks, vpagemiss, crtpagemiss;
+ int nvclk_fill;
int found, mclk_extra, mclk_loop, cbs, m1;
int mclk_freq, pclk_freq, nvclk_freq, mp_enable;
- int us_m, us_m_min, us_n, us_p, video_drain_rate, crtc_drain_rate;
- int vus_m, vus_n, vus_p;
- int vpm_us, us_video, vlwm, cpm_us, us_crt,clwm;
+ int us_m, us_m_min, us_n, us_p, crtc_drain_rate;
+ int vus_m;
+ int vpm_us, us_video, cpm_us, us_crt,clwm;
int clwm_rnd_down;
- int craw, m2us, us_pipe, us_pipe_min, vus_pipe, p1clk, p2;
- int pclks_2_top_fifo, min_mclk_extra;
+ int m2us, us_pipe_min, p1clk, p2;
+ int min_mclk_extra;
int us_min_mclk_extra;
fifo->valid = 1;
@@ -854,16 +854,13 @@ static void nv10CalcArbitration
mclk_freq = arb->mclk_khz;
nvclk_freq = arb->nvclk_khz;
pagemiss = arb->mem_page_miss;
- cas = arb->mem_latency;
width = arb->memory_width/64;
video_enable = arb->enable_video;
bpp = arb->pix_bpp;
mp_enable = arb->enable_mp;
clwm = 0;
- vlwm = 1024;
cbs = 512;
- vbs = 512;
pclks = 4; /* lwm detect. */
@@ -924,17 +921,11 @@ static void nv10CalcArbitration
us_min_mclk_extra = min_mclk_extra *1000*1000 / mclk_freq;
us_n = nvclks*1000*1000 / nvclk_freq;/* nvclk latency in us */
us_p = pclks*1000*1000 / pclk_freq;/* nvclk latency in us */
- us_pipe = us_m + us_n + us_p;
us_pipe_min = us_m_min + us_n + us_p;
- us_extra = 0;
vus_m = mclk_loop *1000*1000 / mclk_freq; /* Mclk latency in us */
- vus_n = (4)*1000*1000 / nvclk_freq;/* nvclk latency in us */
- vus_p = 0*1000*1000 / pclk_freq;/* pclk latency in us */
- vus_pipe = vus_m + vus_n + vus_p;
if(video_enable) {
- video_drain_rate = pclk_freq * 4; /* MB/s */
crtc_drain_rate = pclk_freq * bpp/8; /* MB/s */
vpagemiss = 1; /* self generating page miss */
@@ -993,7 +984,6 @@ static void nv10CalcArbitration
else if(crtc_drain_rate * 100 >= nvclk_fill * 98) {
clwm = 1024;
cbs = 512;
- us_extra = (cbs * 1000 * 1000)/ (8*width)/mclk_freq ;
}
}
}
@@ -1010,7 +1000,6 @@ static void nv10CalcArbitration
m1 = clwm + cbs - 1024; /* Amount of overfill */
m2us = us_pipe_min + us_min_mclk_extra;
- pclks_2_top_fifo = (1024-clwm)/(8*width);
/* pclk cycles to drain */
p1clk = m2us * pclk_freq/(1000*1000);
@@ -1038,7 +1027,6 @@ static void nv10CalcArbitration
min_mclk_extra--;
}
}
- craw = clwm;
if(clwm < (1024-cbs+8)) clwm = 1024-cbs+8;
data = (int)(clwm);
diff --git a/drivers/video/fbdev/s1d13xxxfb.c b/drivers/video/fbdev/s1d13xxxfb.c
index 4541afcf9386..d1b5f965bc96 100644
--- a/drivers/video/fbdev/s1d13xxxfb.c
+++ b/drivers/video/fbdev/s1d13xxxfb.c
@@ -45,7 +45,7 @@
#if 0
#define dbg(fmt, args...) do { printk(KERN_INFO fmt, ## args); } while(0)
#else
-#define dbg(fmt, args...) do { } while (0)
+#define dbg(fmt, args...) do { no_printk(KERN_INFO fmt, ## args); } while (0)
#endif
/*
@@ -512,7 +512,6 @@ s1d13xxxfb_bitblt_copyarea(struct fb_info *info, const struct fb_copyarea *area)
}
/**
- *
* s1d13xxxfb_bitblt_solidfill - accelerated solidfill function
* @info : framebuffer structure
* @rect : fb_fillrect structure
diff --git a/drivers/video/fbdev/s3c-fb.c b/drivers/video/fbdev/s3c-fb.c
index ba316bd56efd..3b134e1bbc38 100644
--- a/drivers/video/fbdev/s3c-fb.c
+++ b/drivers/video/fbdev/s3c-fb.c
@@ -75,6 +75,7 @@ struct s3c_fb;
* @buf_size: Offset of buffer size registers.
* @buf_end: Offset of buffer end registers.
* @osd: The base for the OSD registers.
+ * @osd_stride: stride of osd
* @palette: Address of palette memory, or 0 if none.
* @has_prtcon: Set if has PRTCON register.
* @has_shadowcon: Set if has SHADOWCON register.
@@ -155,7 +156,7 @@ struct s3c_fb_palette {
* @windata: The platform data supplied for the window configuration.
* @parent: The hardware that this window is part of.
* @fbinfo: Pointer pack to the framebuffer info for this window.
- * @varint: The variant information for this window.
+ * @variant: The variant information for this window.
* @palette_buffer: Buffer/cache to hold palette entries.
* @pseudo_palette: For use in TRUECOLOUR modes for entries 0..15/
* @index: The window number of this window.
@@ -336,7 +337,7 @@ static int s3c_fb_check_var(struct fb_var_screeninfo *var,
/**
* s3c_fb_calc_pixclk() - calculate the divider to create the pixel clock.
* @sfb: The hardware state.
- * @pixclock: The pixel clock wanted, in picoseconds.
+ * @pixclk: The pixel clock wanted, in picoseconds.
*
* Given the specified pixel clock, work out the necessary divider to get
* close to the output frequency.
@@ -733,7 +734,7 @@ static inline unsigned int chan_to_field(unsigned int chan,
* @red: The red field for the palette data.
* @green: The green field for the palette data.
* @blue: The blue field for the palette data.
- * @trans: The transparency (alpha) field for the palette data.
+ * @transp: The transparency (alpha) field for the palette data.
* @info: The framebuffer being changed.
*/
static int s3c_fb_setcolreg(unsigned regno,
@@ -1133,6 +1134,7 @@ static void s3c_fb_free_memory(struct s3c_fb *sfb, struct s3c_fb_win *win)
/**
* s3c_fb_release_win() - release resources for a framebuffer window.
+ * @sfb: The base resources for the hardware.
* @win: The window to cleanup the resources for.
*
* Release the resources that where claimed for the hardware window,
@@ -1160,6 +1162,7 @@ static void s3c_fb_release_win(struct s3c_fb *sfb, struct s3c_fb_win *win)
/**
* s3c_fb_probe_win() - register an hardware window
* @sfb: The base resources for the hardware
+ * @win_no: The window number
* @variant: The variant information for this window.
* @res: Pointer to where to place the resultant window.
*
@@ -1170,7 +1173,6 @@ static int s3c_fb_probe_win(struct s3c_fb *sfb, unsigned int win_no,
struct s3c_fb_win_variant *variant,
struct s3c_fb_win **res)
{
- struct fb_var_screeninfo *var;
struct fb_videomode initmode;
struct s3c_fb_pd_win *windata;
struct s3c_fb_win *win;
@@ -1198,7 +1200,6 @@ static int s3c_fb_probe_win(struct s3c_fb *sfb, unsigned int win_no,
win = fbinfo->par;
*res = win;
- var = &fbinfo->var;
win->variant = *variant;
win->fbinfo = fbinfo;
win->parent = sfb;
diff --git a/drivers/video/fbdev/sis/init.c b/drivers/video/fbdev/sis/init.c
index fde27feae5d0..b568c646a76c 100644
--- a/drivers/video/fbdev/sis/init.c
+++ b/drivers/video/fbdev/sis/init.c
@@ -2648,7 +2648,7 @@ static void
SiS_SetCRT1ModeRegs(struct SiS_Private *SiS_Pr, unsigned short ModeNo,
unsigned short ModeIdIndex, unsigned short RRTI)
{
- unsigned short data, infoflag = 0, modeflag, resindex;
+ unsigned short data, infoflag = 0, modeflag;
#ifdef CONFIG_FB_SIS_315
unsigned char *ROMAddr = SiS_Pr->VirtualRomBase;
unsigned short data2, data3;
@@ -2659,7 +2659,6 @@ SiS_SetCRT1ModeRegs(struct SiS_Private *SiS_Pr, unsigned short ModeNo,
if(SiS_Pr->UseCustomMode) {
infoflag = SiS_Pr->CInfoFlag;
} else {
- resindex = SiS_GetResInfo(SiS_Pr, ModeNo, ModeIdIndex);
if(ModeNo > 0x13) {
infoflag = SiS_Pr->SiS_RefIndex[RRTI].Ext_InfoFlag;
}
@@ -3538,17 +3537,13 @@ SiS_Generic_ConvertCRData(struct SiS_Private *SiS_Pr, unsigned char *crdata,
struct fb_var_screeninfo *var, bool writeres
)
{
- unsigned short HRE, HBE, HRS, HBS, HDE, HT;
- unsigned short VRE, VBE, VRS, VBS, VDE, VT;
- unsigned char sr_data, cr_data, cr_data2;
- int A, B, C, D, E, F, temp;
+ unsigned short HRE, HBE, HRS, HDE;
+ unsigned short VRE, VBE, VRS, VDE;
+ unsigned char sr_data, cr_data;
+ int B, C, D, E, F, temp;
sr_data = crdata[14];
- /* Horizontal total */
- HT = crdata[0] | ((unsigned short)(sr_data & 0x03) << 8);
- A = HT + 5;
-
/* Horizontal display enable end */
HDE = crdata[1] | ((unsigned short)(sr_data & 0x0C) << 6);
E = HDE + 1;
@@ -3557,9 +3552,6 @@ SiS_Generic_ConvertCRData(struct SiS_Private *SiS_Pr, unsigned char *crdata,
HRS = crdata[4] | ((unsigned short)(sr_data & 0xC0) << 2);
F = HRS - E - 3;
- /* Horizontal blank start */
- HBS = crdata[2] | ((unsigned short)(sr_data & 0x30) << 4);
-
sr_data = crdata[15];
cr_data = crdata[5];
@@ -3588,13 +3580,6 @@ SiS_Generic_ConvertCRData(struct SiS_Private *SiS_Pr, unsigned char *crdata,
sr_data = crdata[13];
cr_data = crdata[7];
- /* Vertical total */
- VT = crdata[6] |
- ((unsigned short)(cr_data & 0x01) << 8) |
- ((unsigned short)(cr_data & 0x20) << 4) |
- ((unsigned short)(sr_data & 0x01) << 10);
- A = VT + 2;
-
/* Vertical display enable end */
VDE = crdata[10] |
((unsigned short)(cr_data & 0x02) << 7) |
@@ -3609,14 +3594,6 @@ SiS_Generic_ConvertCRData(struct SiS_Private *SiS_Pr, unsigned char *crdata,
((unsigned short)(sr_data & 0x08) << 7);
F = VRS + 1 - E;
- cr_data2 = (crdata[16] & 0x01) << 5;
-
- /* Vertical blank start */
- VBS = crdata[11] |
- ((unsigned short)(cr_data & 0x08) << 5) |
- ((unsigned short)(cr_data2 & 0x20) << 4) |
- ((unsigned short)(sr_data & 0x04) << 8);
-
/* Vertical blank end */
VBE = crdata[12] | ((unsigned short)(sr_data & 0x10) << 4);
temp = VBE - ((E - 1) & 511);
diff --git a/drivers/video/fbdev/sis/oem310.h b/drivers/video/fbdev/sis/oem310.h
index 8fce56e4482c..ed28755715ce 100644
--- a/drivers/video/fbdev/sis/oem310.h
+++ b/drivers/video/fbdev/sis/oem310.h
@@ -200,6 +200,7 @@ static const unsigned char SiS310_TVDelayCompensation_651302LV[] = /* M650, 651,
0x33,0x33
};
+#if 0 /* Not used */
static const unsigned char SiS_TVDelay661_301[] = /* 661, 301 */
{
0x44,0x44,
@@ -219,6 +220,7 @@ static const unsigned char SiS_TVDelay661_301B[] = /* 661, 301B et al */
0x44,0x44,
0x44,0x44
};
+#endif
static const unsigned char SiS310_TVDelayCompensation_LVDS[] = /* LVDS */
{
diff --git a/drivers/video/fbdev/sis/sis.h b/drivers/video/fbdev/sis/sis.h
index 9f4c3093ccb3..d632f096083b 100644
--- a/drivers/video/fbdev/sis/sis.h
+++ b/drivers/video/fbdev/sis/sis.h
@@ -15,7 +15,6 @@
#include "vgatypes.h"
#include "vstruct.h"
-#include "init.h"
#define VER_MAJOR 1
#define VER_MINOR 8
diff --git a/drivers/video/fbdev/sis/sis_main.c b/drivers/video/fbdev/sis/sis_main.c
index 03c736f6f3d0..266a5582f94d 100644
--- a/drivers/video/fbdev/sis/sis_main.c
+++ b/drivers/video/fbdev/sis/sis_main.c
@@ -5029,7 +5029,6 @@ static void sisfb_post_xgi_ddr2(struct sis_video_info *ivideo, u8 regb)
static const u8 cs168[8] = {
0x48, 0x78, 0x88, 0x00, 0x00, 0x00, 0x00, 0x00
};
- u8 reg;
u8 v1;
u8 v2;
u8 v3;
@@ -5037,9 +5036,9 @@ static void sisfb_post_xgi_ddr2(struct sis_video_info *ivideo, u8 regb)
SiS_SetReg(SISCR, 0xb0, 0x80); /* DDR2 dual frequency mode */
SiS_SetReg(SISCR, 0x82, 0x77);
SiS_SetReg(SISCR, 0x86, 0x00);
- reg = SiS_GetReg(SISCR, 0x86);
+ SiS_GetReg(SISCR, 0x86);
SiS_SetReg(SISCR, 0x86, 0x88);
- reg = SiS_GetReg(SISCR, 0x86);
+ SiS_GetReg(SISCR, 0x86);
v1 = cs168[regb]; v2 = cs160[regb]; v3 = cs158[regb];
if (ivideo->haveXGIROM) {
v1 = bios[regb + 0x168];
@@ -5049,9 +5048,9 @@ static void sisfb_post_xgi_ddr2(struct sis_video_info *ivideo, u8 regb)
SiS_SetReg(SISCR, 0x86, v1);
SiS_SetReg(SISCR, 0x82, 0x77);
SiS_SetReg(SISCR, 0x85, 0x00);
- reg = SiS_GetReg(SISCR, 0x85);
+ SiS_GetReg(SISCR, 0x85);
SiS_SetReg(SISCR, 0x85, 0x88);
- reg = SiS_GetReg(SISCR, 0x85);
+ SiS_GetReg(SISCR, 0x85);
SiS_SetReg(SISCR, 0x85, v2);
SiS_SetReg(SISCR, 0x82, v3);
SiS_SetReg(SISCR, 0x98, 0x01);
diff --git a/drivers/video/fbdev/sstfb.c b/drivers/video/fbdev/sstfb.c
index c05cdabeb11c..27d4b0ace2d6 100644
--- a/drivers/video/fbdev/sstfb.c
+++ b/drivers/video/fbdev/sstfb.c
@@ -1390,7 +1390,7 @@ static int sstfb_probe(struct pci_dev *pdev, const struct pci_device_id *id)
fix->smem_start, info->screen_base,
fix->smem_len >> 20);
- f_ddprintk("regbase_virt: %#lx\n", par->mmio_vbase);
+ f_ddprintk("regbase_virt: %p\n", par->mmio_vbase);
f_ddprintk("membase_phys: %#lx\n", fix->smem_start);
f_ddprintk("fbbase_virt: %p\n", info->screen_base);
diff --git a/drivers/video/fbdev/tcx.c b/drivers/video/fbdev/tcx.c
index 34b2e5b6e84a..1638a40fed22 100644
--- a/drivers/video/fbdev/tcx.c
+++ b/drivers/video/fbdev/tcx.c
@@ -196,7 +196,7 @@ static int tcx_setcolreg(unsigned regno,
/**
* tcx_blank - Optional function. Blanks the display.
- * @blank_mode: the blank mode we want.
+ * @blank: the blank mode we want.
* @info: frame buffer structure that represents a single frame buffer
*/
static int
diff --git a/drivers/video/fbdev/tdfxfb.c b/drivers/video/fbdev/tdfxfb.c
index f056d80f6359..67e37a62b07c 100644
--- a/drivers/video/fbdev/tdfxfb.c
+++ b/drivers/video/fbdev/tdfxfb.c
@@ -206,9 +206,7 @@ static inline u8 crt_inb(struct tdfx_par *par, u32 idx)
static inline void att_outb(struct tdfx_par *par, u32 idx, u8 val)
{
- unsigned char tmp;
-
- tmp = vga_inb(par, IS1_R);
+ vga_inb(par, IS1_R);
vga_outb(par, ATT_IW, idx);
vga_outb(par, ATT_IW, val);
}
diff --git a/drivers/video/fbdev/tgafb.c b/drivers/video/fbdev/tgafb.c
index 666fbe2f671c..ae0cf5540636 100644
--- a/drivers/video/fbdev/tgafb.c
+++ b/drivers/video/fbdev/tgafb.c
@@ -555,7 +555,7 @@ tgafb_setcolreg(unsigned regno, unsigned red, unsigned green, unsigned blue,
/**
* tgafb_blank - Optional function. Blanks the display.
- * @blank_mode: the blank mode we want.
+ * @blank: the blank mode we want.
* @info: frame buffer structure that represents a single frame buffer
*/
static int
@@ -837,7 +837,7 @@ tgafb_clut_imageblit(struct fb_info *info, const struct fb_image *image)
u32 *palette = ((u32 *)info->pseudo_palette);
unsigned long pos, line_length, i, j;
const unsigned char *data;
- void __iomem *regs_base, *fb_base;
+ void __iomem *fb_base;
dx = image->dx;
dy = image->dy;
@@ -855,7 +855,6 @@ tgafb_clut_imageblit(struct fb_info *info, const struct fb_image *image)
if (dy + height > vyres)
height = vyres - dy;
- regs_base = par->tga_regs_base;
fb_base = par->tga_fb_base;
pos = dy * line_length + (dx * 4);
@@ -1034,7 +1033,7 @@ tgafb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
regs_base + TGA_MODE_REG);
}
-/**
+/*
* tgafb_copyarea - REQUIRED function. Can use generic routines if
* non acclerated hardware and packed pixel based.
* Copies on area of the screen to another area.
diff --git a/drivers/video/fbdev/udlfb.c b/drivers/video/fbdev/udlfb.c
index f9b3c1cb9530..b9cdd02c1000 100644
--- a/drivers/video/fbdev/udlfb.c
+++ b/drivers/video/fbdev/udlfb.c
@@ -1017,6 +1017,7 @@ static void dlfb_ops_destroy(struct fb_info *info)
}
vfree(dlfb->backing_buffer);
kfree(dlfb->edid);
+ dlfb_free_urb_list(dlfb);
usb_put_dev(dlfb->udev);
kfree(dlfb);
diff --git a/drivers/video/fbdev/uvesafb.c b/drivers/video/fbdev/uvesafb.c
index def14ac0ebe1..4df6772802d7 100644
--- a/drivers/video/fbdev/uvesafb.c
+++ b/drivers/video/fbdev/uvesafb.c
@@ -423,7 +423,7 @@ static int uvesafb_vbe_getinfo(struct uvesafb_ktask *task,
task->t.flags = TF_VBEIB;
task->t.buf_len = sizeof(struct vbe_ib);
task->buf = &par->vbe_ib;
- strncpy(par->vbe_ib.vbe_signature, "VBE2", 4);
+ memcpy(par->vbe_ib.vbe_signature, "VBE2", 4);
err = uvesafb_exec(task);
if (err || (task->t.regs.eax & 0xffff) != 0x004f) {
@@ -560,6 +560,8 @@ static int uvesafb_vbe_getpmi(struct uvesafb_ktask *task,
task->t.regs.eax = 0x4f0a;
task->t.regs.ebx = 0x0;
err = uvesafb_exec(task);
+ if (err)
+ return err;
if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
par->pmi_setpal = par->ypan = 0;
@@ -1871,7 +1873,7 @@ static ssize_t v86d_show(struct device_driver *dev, char *buf)
static ssize_t v86d_store(struct device_driver *dev, const char *buf,
size_t count)
{
- strncpy(v86d_path, buf, PATH_MAX);
+ strncpy(v86d_path, buf, PATH_MAX - 1);
return count;
}
static DRIVER_ATTR_RW(v86d);
diff --git a/drivers/video/fbdev/via/lcd.c b/drivers/video/fbdev/via/lcd.c
index 4a869402d120..088b962076b5 100644
--- a/drivers/video/fbdev/via/lcd.c
+++ b/drivers/video/fbdev/via/lcd.c
@@ -537,11 +537,9 @@ void viafb_lcd_set_mode(const struct fb_var_screeninfo *var, u16 cxres,
u32 clock;
struct via_display_timing timing;
struct fb_var_screeninfo panel_var;
- const struct fb_videomode *mode_crt_table, *panel_crt_table;
+ const struct fb_videomode *panel_crt_table;
DEBUG_MSG(KERN_INFO "viafb_lcd_set_mode!!\n");
- /* Get mode table */
- mode_crt_table = viafb_get_best_mode(set_hres, set_vres, 60);
/* Get panel table Pointer */
panel_crt_table = viafb_get_best_mode(panel_hres, panel_vres, 60);
viafb_fill_var_timing_info(&panel_var, panel_crt_table);
diff --git a/drivers/video/fbdev/wmt_ge_rops.c b/drivers/video/fbdev/wmt_ge_rops.c
index 2445cfe617a9..42255d27a1db 100644
--- a/drivers/video/fbdev/wmt_ge_rops.c
+++ b/drivers/video/fbdev/wmt_ge_rops.c
@@ -11,6 +11,7 @@
#include <linux/fb.h>
#include <linux/platform_device.h>
#include "core/fb_draw.h"
+#include "wmt_ge_rops.h"
#define GE_COMMAND_OFF 0x00
#define GE_DEPTH_OFF 0x04
diff --git a/drivers/video/of_display_timing.c b/drivers/video/of_display_timing.c
index abc9ada798ee..f93b6abbe258 100644
--- a/drivers/video/of_display_timing.c
+++ b/drivers/video/of_display_timing.c
@@ -52,6 +52,7 @@ static int parse_timing_property(const struct device_node *np, const char *name,
/**
* of_parse_display_timing - parse display_timing entry from device_node
* @np: device_node with the properties
+ * @dt: display_timing that contains the result. I may be partially written in case of errors
**/
static int of_parse_display_timing(const struct device_node *np,
struct display_timing *dt)
diff --git a/drivers/video/of_videomode.c b/drivers/video/of_videomode.c
index 67aff2421c29..e7d10ffd3b66 100644
--- a/drivers/video/of_videomode.c
+++ b/drivers/video/of_videomode.c
@@ -14,9 +14,9 @@
/**
* of_get_videomode - get the videomode #<index> from devicetree
- * @np - devicenode with the display_timings
- * @vm - set to return value
- * @index - index into list of display_timings
+ * @np: devicenode with the display_timings
+ * @vm: set to return value
+ * @index: index into list of display_timings
* (Set this to OF_USE_NATIVE_MODE to use whatever mode is
* specified as native mode in the DT.)
*
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
index a8030332a191..e850f79351cb 100644
--- a/drivers/xen/events/events_base.c
+++ b/drivers/xen/events/events_base.c
@@ -2060,16 +2060,6 @@ static struct irq_chip xen_percpu_chip __read_mostly = {
.irq_ack = ack_dynirq,
};
-int xen_set_callback_via(uint64_t via)
-{
- struct xen_hvm_param a;
- a.domid = DOMID_SELF;
- a.index = HVM_PARAM_CALLBACK_IRQ;
- a.value = via;
- return HYPERVISOR_hvm_op(HVMOP_set_param, &a);
-}
-EXPORT_SYMBOL_GPL(xen_set_callback_via);
-
#ifdef CONFIG_XEN_PVHVM
/* Vector callbacks are better than PCI interrupts to receive event
* channel notifications because we can receive vector callbacks on any
diff --git a/drivers/xen/platform-pci.c b/drivers/xen/platform-pci.c
index dd911e1ff782..18f0ed8b1f93 100644
--- a/drivers/xen/platform-pci.c
+++ b/drivers/xen/platform-pci.c
@@ -132,6 +132,13 @@ static int platform_pci_probe(struct pci_dev *pdev,
dev_warn(&pdev->dev, "request_irq failed err=%d\n", ret);
goto out;
}
+ /*
+ * It doesn't strictly *have* to run on CPU0 but it sure
+ * as hell better process the event channel ports delivered
+ * to CPU0.
+ */
+ irq_set_affinity(pdev->irq, cpumask_of(0));
+
callback_via = get_callback_via(pdev);
ret = xen_set_callback_via(callback_via);
if (ret) {
@@ -149,7 +156,6 @@ static int platform_pci_probe(struct pci_dev *pdev,
ret = gnttab_init();
if (ret)
goto grant_out;
- xenbus_probe(NULL);
return 0;
grant_out:
gnttab_free_auto_xlat_frames();
diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c
index b0c73c58f987..720a7b7abd46 100644
--- a/drivers/xen/privcmd.c
+++ b/drivers/xen/privcmd.c
@@ -717,14 +717,15 @@ static long privcmd_ioctl_restrict(struct file *file, void __user *udata)
return 0;
}
-static long privcmd_ioctl_mmap_resource(struct file *file, void __user *udata)
+static long privcmd_ioctl_mmap_resource(struct file *file,
+ struct privcmd_mmap_resource __user *udata)
{
struct privcmd_data *data = file->private_data;
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
struct privcmd_mmap_resource kdata;
xen_pfn_t *pfns = NULL;
- struct xen_mem_acquire_resource xdata;
+ struct xen_mem_acquire_resource xdata = { };
int rc;
if (copy_from_user(&kdata, udata, sizeof(kdata)))
@@ -734,6 +735,22 @@ static long privcmd_ioctl_mmap_resource(struct file *file, void __user *udata)
if (data->domid != DOMID_INVALID && data->domid != kdata.dom)
return -EPERM;
+ /* Both fields must be set or unset */
+ if (!!kdata.addr != !!kdata.num)
+ return -EINVAL;
+
+ xdata.domid = kdata.dom;
+ xdata.type = kdata.type;
+ xdata.id = kdata.id;
+
+ if (!kdata.addr && !kdata.num) {
+ /* Query the size of the resource. */
+ rc = HYPERVISOR_memory_op(XENMEM_acquire_resource, &xdata);
+ if (rc)
+ return rc;
+ return __put_user(xdata.nr_frames, &udata->num);
+ }
+
mmap_write_lock(mm);
vma = find_vma(mm, kdata.addr);
@@ -768,10 +785,6 @@ static long privcmd_ioctl_mmap_resource(struct file *file, void __user *udata)
} else
vma->vm_private_data = PRIV_VMA_LOCKED;
- memset(&xdata, 0, sizeof(xdata));
- xdata.domid = kdata.dom;
- xdata.type = kdata.type;
- xdata.id = kdata.id;
xdata.frame = kdata.idx;
xdata.nr_frames = kdata.num;
set_xen_guest_handle(xdata.frame_list, pfns);
diff --git a/drivers/xen/xenbus/xenbus.h b/drivers/xen/xenbus/xenbus.h
index 2a93b7c9c159..dc1537335414 100644
--- a/drivers/xen/xenbus/xenbus.h
+++ b/drivers/xen/xenbus/xenbus.h
@@ -115,6 +115,7 @@ int xenbus_probe_node(struct xen_bus_type *bus,
const char *type,
const char *nodename);
int xenbus_probe_devices(struct xen_bus_type *bus);
+void xenbus_probe(void);
void xenbus_dev_changed(const char *node, struct xen_bus_type *bus);
diff --git a/drivers/xen/xenbus/xenbus_comms.c b/drivers/xen/xenbus/xenbus_comms.c
index eb5151fc8efa..e5fda0256feb 100644
--- a/drivers/xen/xenbus/xenbus_comms.c
+++ b/drivers/xen/xenbus/xenbus_comms.c
@@ -57,16 +57,8 @@ DEFINE_MUTEX(xs_response_mutex);
static int xenbus_irq;
static struct task_struct *xenbus_task;
-static DECLARE_WORK(probe_work, xenbus_probe);
-
-
static irqreturn_t wake_waiting(int irq, void *unused)
{
- if (unlikely(xenstored_ready == 0)) {
- xenstored_ready = 1;
- schedule_work(&probe_work);
- }
-
wake_up(&xb_waitq);
return IRQ_HANDLED;
}
diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c
index 44634d970a5c..c8f0282bb649 100644
--- a/drivers/xen/xenbus/xenbus_probe.c
+++ b/drivers/xen/xenbus/xenbus_probe.c
@@ -683,29 +683,76 @@ void unregister_xenstore_notifier(struct notifier_block *nb)
}
EXPORT_SYMBOL_GPL(unregister_xenstore_notifier);
-void xenbus_probe(struct work_struct *unused)
+void xenbus_probe(void)
{
xenstored_ready = 1;
+ /*
+ * In the HVM case, xenbus_init() deferred its call to
+ * xs_init() in case callbacks were not operational yet.
+ * So do it now.
+ */
+ if (xen_store_domain_type == XS_HVM)
+ xs_init();
+
/* Notify others that xenstore is up */
blocking_notifier_call_chain(&xenstore_chain, 0, NULL);
}
-EXPORT_SYMBOL_GPL(xenbus_probe);
-static int __init xenbus_probe_initcall(void)
+/*
+ * Returns true when XenStore init must be deferred in order to
+ * allow the PCI platform device to be initialised, before we
+ * can actually have event channel interrupts working.
+ */
+static bool xs_hvm_defer_init_for_callback(void)
{
- if (!xen_domain())
- return -ENODEV;
+#ifdef CONFIG_XEN_PVHVM
+ return xen_store_domain_type == XS_HVM &&
+ !xen_have_vector_callback;
+#else
+ return false;
+#endif
+}
- if (xen_initial_domain() || xen_hvm_domain())
- return 0;
+static int __init xenbus_probe_initcall(void)
+{
+ /*
+ * Probe XenBus here in the XS_PV case, and also XS_HVM unless we
+ * need to wait for the platform PCI device to come up.
+ */
+ if (xen_store_domain_type == XS_PV ||
+ (xen_store_domain_type == XS_HVM &&
+ !xs_hvm_defer_init_for_callback()))
+ xenbus_probe();
- xenbus_probe(NULL);
return 0;
}
-
device_initcall(xenbus_probe_initcall);
+int xen_set_callback_via(uint64_t via)
+{
+ struct xen_hvm_param a;
+ int ret;
+
+ a.domid = DOMID_SELF;
+ a.index = HVM_PARAM_CALLBACK_IRQ;
+ a.value = via;
+
+ ret = HYPERVISOR_hvm_op(HVMOP_set_param, &a);
+ if (ret)
+ return ret;
+
+ /*
+ * If xenbus_probe_initcall() deferred the xenbus_probe()
+ * due to the callback not functioning yet, we can do it now.
+ */
+ if (!xenstored_ready && xs_hvm_defer_init_for_callback())
+ xenbus_probe();
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(xen_set_callback_via);
+
/* Set up event channel for xenstored which is run as a local process
* (this is normally used only in dom0)
*/
@@ -818,11 +865,17 @@ static int __init xenbus_init(void)
break;
}
- /* Initialize the interface to xenstore. */
- err = xs_init();
- if (err) {
- pr_warn("Error initializing xenstore comms: %i\n", err);
- goto out_error;
+ /*
+ * HVM domains may not have a functional callback yet. In that
+ * case let xs_init() be called from xenbus_probe(), which will
+ * get invoked at an appropriate time.
+ */
+ if (xen_store_domain_type != XS_HVM) {
+ err = xs_init();
+ if (err) {
+ pr_warn("Error initializing xenstore comms: %i\n", err);
+ goto out_error;
+ }
}
if ((xen_store_domain_type != XS_LOCAL) &&
diff --git a/fs/afs/dir.c b/fs/afs/dir.c
index 9068d5578a26..7bd659ad959e 100644
--- a/fs/afs/dir.c
+++ b/fs/afs/dir.c
@@ -350,7 +350,7 @@ static int afs_dir_iterate_block(struct afs_vnode *dvnode,
unsigned blkoff)
{
union afs_xdr_dirent *dire;
- unsigned offset, next, curr;
+ unsigned offset, next, curr, nr_slots;
size_t nlen;
int tmp;
@@ -363,13 +363,12 @@ static int afs_dir_iterate_block(struct afs_vnode *dvnode,
offset < AFS_DIR_SLOTS_PER_BLOCK;
offset = next
) {
- next = offset + 1;
-
/* skip entries marked unused in the bitmap */
if (!(block->hdr.bitmap[offset / 8] &
(1 << (offset % 8)))) {
_debug("ENT[%zu.%u]: unused",
blkoff / sizeof(union afs_xdr_dir_block), offset);
+ next = offset + 1;
if (offset >= curr)
ctx->pos = blkoff +
next * sizeof(union afs_xdr_dirent);
@@ -381,35 +380,39 @@ static int afs_dir_iterate_block(struct afs_vnode *dvnode,
nlen = strnlen(dire->u.name,
sizeof(*block) -
offset * sizeof(union afs_xdr_dirent));
+ if (nlen > AFSNAMEMAX - 1) {
+ _debug("ENT[%zu]: name too long (len %u/%zu)",
+ blkoff / sizeof(union afs_xdr_dir_block),
+ offset, nlen);
+ return afs_bad(dvnode, afs_file_error_dir_name_too_long);
+ }
_debug("ENT[%zu.%u]: %s %zu \"%s\"",
blkoff / sizeof(union afs_xdr_dir_block), offset,
(offset < curr ? "skip" : "fill"),
nlen, dire->u.name);
- /* work out where the next possible entry is */
- for (tmp = nlen; tmp > 15; tmp -= sizeof(union afs_xdr_dirent)) {
- if (next >= AFS_DIR_SLOTS_PER_BLOCK) {
- _debug("ENT[%zu.%u]:"
- " %u travelled beyond end dir block"
- " (len %u/%zu)",
- blkoff / sizeof(union afs_xdr_dir_block),
- offset, next, tmp, nlen);
- return afs_bad(dvnode, afs_file_error_dir_over_end);
- }
- if (!(block->hdr.bitmap[next / 8] &
- (1 << (next % 8)))) {
- _debug("ENT[%zu.%u]:"
- " %u unmarked extension (len %u/%zu)",
+ nr_slots = afs_dir_calc_slots(nlen);
+ next = offset + nr_slots;
+ if (next > AFS_DIR_SLOTS_PER_BLOCK) {
+ _debug("ENT[%zu.%u]:"
+ " %u extends beyond end dir block"
+ " (len %zu)",
+ blkoff / sizeof(union afs_xdr_dir_block),
+ offset, next, nlen);
+ return afs_bad(dvnode, afs_file_error_dir_over_end);
+ }
+
+ /* Check that the name-extension dirents are all allocated */
+ for (tmp = 1; tmp < nr_slots; tmp++) {
+ unsigned int ix = offset + tmp;
+ if (!(block->hdr.bitmap[ix / 8] & (1 << (ix % 8)))) {
+ _debug("ENT[%zu.u]:"
+ " %u unmarked extension (%u/%u)",
blkoff / sizeof(union afs_xdr_dir_block),
- offset, next, tmp, nlen);
+ offset, tmp, nr_slots);
return afs_bad(dvnode, afs_file_error_dir_unmarked_ext);
}
-
- _debug("ENT[%zu.%u]: ext %u/%zu",
- blkoff / sizeof(union afs_xdr_dir_block),
- next, tmp, nlen);
- next++;
}
/* skip if starts before the current position */
diff --git a/fs/afs/dir_edit.c b/fs/afs/dir_edit.c
index 2ffe09abae7f..f4600c1353ad 100644
--- a/fs/afs/dir_edit.c
+++ b/fs/afs/dir_edit.c
@@ -215,8 +215,7 @@ void afs_edit_dir_add(struct afs_vnode *vnode,
}
/* Work out how many slots we're going to need. */
- need_slots = round_up(12 + name->len + 1 + 4, AFS_DIR_DIRENT_SIZE);
- need_slots /= AFS_DIR_DIRENT_SIZE;
+ need_slots = afs_dir_calc_slots(name->len);
meta_page = kmap(page0);
meta = &meta_page->blocks[0];
@@ -393,8 +392,7 @@ void afs_edit_dir_remove(struct afs_vnode *vnode,
}
/* Work out how many slots we're going to discard. */
- need_slots = round_up(12 + name->len + 1 + 4, AFS_DIR_DIRENT_SIZE);
- need_slots /= AFS_DIR_DIRENT_SIZE;
+ need_slots = afs_dir_calc_slots(name->len);
meta_page = kmap(page0);
meta = &meta_page->blocks[0];
diff --git a/fs/afs/xdr_fs.h b/fs/afs/xdr_fs.h
index 94f1f398eefa..8ca868164507 100644
--- a/fs/afs/xdr_fs.h
+++ b/fs/afs/xdr_fs.h
@@ -54,10 +54,16 @@ union afs_xdr_dirent {
__be16 hash_next;
__be32 vnode;
__be32 unique;
- u8 name[16];
- u8 overflow[4]; /* if any char of the name (inc
- * NUL) reaches here, consume
- * the next dirent too */
+ u8 name[];
+ /* When determining the number of dirent slots needed to
+ * represent a directory entry, name should be assumed to be 16
+ * bytes, due to a now-standardised (mis)calculation, but it is
+ * in fact 20 bytes in size. afs_dir_calc_slots() should be
+ * used for this.
+ *
+ * For names longer than (16 or) 20 bytes, extra slots should
+ * be annexed to this one using the extended_name format.
+ */
} u;
u8 extended_name[32];
} __packed;
@@ -96,4 +102,15 @@ struct afs_xdr_dir_page {
union afs_xdr_dir_block blocks[AFS_DIR_BLOCKS_PER_PAGE];
};
+/*
+ * Calculate the number of dirent slots required for any given name length.
+ * The calculation is made assuming the part of the name in the first slot is
+ * 16 bytes, rather than 20, but this miscalculation is now standardised.
+ */
+static inline unsigned int afs_dir_calc_slots(size_t name_len)
+{
+ name_len++; /* NUL-terminated */
+ return 1 + ((name_len + 15) / AFS_DIR_DIRENT_SIZE);
+}
+
#endif /* XDR_FS_H */
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 9293045e128c..3b8963e228a1 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -605,6 +605,8 @@ int thaw_bdev(struct block_device *bdev)
error = thaw_super(sb);
if (error)
bdev->bd_fsfreeze_count++;
+ else
+ bdev->bd_fsfreeze_sb = NULL;
out:
mutex_unlock(&bdev->bd_fsfreeze_mutex);
return error;
@@ -774,8 +776,11 @@ static struct kmem_cache * bdev_cachep __read_mostly;
static struct inode *bdev_alloc_inode(struct super_block *sb)
{
struct bdev_inode *ei = kmem_cache_alloc(bdev_cachep, GFP_KERNEL);
+
if (!ei)
return NULL;
+ memset(&ei->bdev, 0, sizeof(ei->bdev));
+ ei->bdev.bd_bdi = &noop_backing_dev_info;
return &ei->vfs_inode;
}
@@ -869,14 +874,12 @@ struct block_device *bdev_alloc(struct gendisk *disk, u8 partno)
mapping_set_gfp_mask(&inode->i_data, GFP_USER);
bdev = I_BDEV(inode);
- memset(bdev, 0, sizeof(*bdev));
mutex_init(&bdev->bd_mutex);
mutex_init(&bdev->bd_fsfreeze_mutex);
spin_lock_init(&bdev->bd_size_lock);
bdev->bd_disk = disk;
bdev->bd_partno = partno;
bdev->bd_inode = inode;
- bdev->bd_bdi = &noop_backing_dev_info;
#ifdef CONFIG_SYSFS
INIT_LIST_HEAD(&bdev->bd_holder_disks);
#endif
@@ -1055,7 +1058,6 @@ static void bd_finish_claiming(struct block_device *bdev, void *holder)
/**
* bd_abort_claiming - abort claiming of a block device
* @bdev: block device of interest
- * @whole: whole block device
* @holder: holder that has claimed @bdev
*
* Abort claiming of a block device when the exclusive open failed. This can be
@@ -1828,6 +1830,7 @@ const struct file_operations def_blk_fops = {
/**
* lookup_bdev - lookup a struct block_device by name
* @pathname: special file representing the block device
+ * @dev: return value of the block device's dev_t
*
* Get a reference to the blockdevice at @pathname in the current
* namespace if possible and return it. Return ERR_PTR(error)
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
index 02d7d7b2563b..9cadacf3ec27 100644
--- a/fs/btrfs/backref.c
+++ b/fs/btrfs/backref.c
@@ -3117,7 +3117,7 @@ void btrfs_backref_error_cleanup(struct btrfs_backref_cache *cache,
list_del_init(&lower->list);
if (lower == node)
node = NULL;
- btrfs_backref_free_node(cache, lower);
+ btrfs_backref_drop_node(cache, lower);
}
btrfs_backref_cleanup_node(cache, node);
diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c
index 52f2198d44c9..0886e81e5540 100644
--- a/fs/btrfs/block-group.c
+++ b/fs/btrfs/block-group.c
@@ -2669,7 +2669,8 @@ again:
* Go through delayed refs for all the stuff we've just kicked off
* and then loop back (just once)
*/
- ret = btrfs_run_delayed_refs(trans, 0);
+ if (!ret)
+ ret = btrfs_run_delayed_refs(trans, 0);
if (!ret && loops == 0) {
loops++;
spin_lock(&cur_trans->dirty_bgs_lock);
diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h
index 555cbcef6585..d9bf53d9ff90 100644
--- a/fs/btrfs/btrfs_inode.h
+++ b/fs/btrfs/btrfs_inode.h
@@ -42,6 +42,15 @@ enum {
* to an inode.
*/
BTRFS_INODE_NO_XATTRS,
+ /*
+ * Set when we are in a context where we need to start a transaction and
+ * have dirty pages with the respective file range locked. This is to
+ * ensure that when reserving space for the transaction, if we are low
+ * on available space and need to flush delalloc, we will not flush
+ * delalloc for this inode, because that could result in a deadlock (on
+ * the file range, inode's io_tree).
+ */
+ BTRFS_INODE_NO_DELALLOC_FLUSH,
};
/* in memory btrfs inode */
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index 07810891e204..cc89b63d65a4 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -2555,8 +2555,14 @@ out:
* @p: Holds all btree nodes along the search path
* @root: The root node of the tree
* @key: The key we are looking for
- * @ins_len: Indicates purpose of search, for inserts it is 1, for
- * deletions it's -1. 0 for plain searches
+ * @ins_len: Indicates purpose of search:
+ * >0 for inserts it's size of item inserted (*)
+ * <0 for deletions
+ * 0 for plain searches, not modifying the tree
+ *
+ * (*) If size of item inserted doesn't include
+ * sizeof(struct btrfs_item), then p->search_for_extension must
+ * be set.
* @cow: boolean should CoW operations be performed. Must always be 1
* when modifying the tree.
*
@@ -2717,6 +2723,20 @@ cow_done:
if (level == 0) {
p->slots[level] = slot;
+ /*
+ * Item key already exists. In this case, if we are
+ * allowed to insert the item (for example, in dir_item
+ * case, item key collision is allowed), it will be
+ * merged with the original item. Only the item size
+ * grows, no new btrfs item will be added. If
+ * search_for_extension is not set, ins_len already
+ * accounts the size btrfs_item, deduct it here so leaf
+ * space check will be correct.
+ */
+ if (ret == 0 && ins_len > 0 && !p->search_for_extension) {
+ ASSERT(ins_len >= sizeof(struct btrfs_item));
+ ins_len -= sizeof(struct btrfs_item);
+ }
if (ins_len > 0 &&
btrfs_leaf_free_space(b) < ins_len) {
if (write_lock_level < 1) {
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 1d3c1e479f3d..e6e37591f1de 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -131,6 +131,8 @@ enum {
* defrag
*/
BTRFS_FS_STATE_REMOUNTING,
+ /* Filesystem in RO mode */
+ BTRFS_FS_STATE_RO,
/* Track if a transaction abort has been reported on this filesystem */
BTRFS_FS_STATE_TRANS_ABORTED,
/*
@@ -367,6 +369,12 @@ struct btrfs_path {
unsigned int search_commit_root:1;
unsigned int need_commit_sem:1;
unsigned int skip_release_on_error:1;
+ /*
+ * Indicate that new item (btrfs_search_slot) is extending already
+ * existing item and ins_len contains only the data size and not item
+ * header (ie. sizeof(struct btrfs_item) is not included).
+ */
+ unsigned int search_for_extension:1;
};
#define BTRFS_MAX_EXTENT_ITEM_SIZE(r) ((BTRFS_LEAF_DATA_SIZE(r->fs_info) >> 4) - \
sizeof(struct btrfs_item))
@@ -2885,10 +2893,26 @@ static inline int btrfs_fs_closing(struct btrfs_fs_info *fs_info)
* If we remount the fs to be R/O or umount the fs, the cleaner needn't do
* anything except sleeping. This function is used to check the status of
* the fs.
+ * We check for BTRFS_FS_STATE_RO to avoid races with a concurrent remount,
+ * since setting and checking for SB_RDONLY in the superblock's flags is not
+ * atomic.
*/
static inline int btrfs_need_cleaner_sleep(struct btrfs_fs_info *fs_info)
{
- return fs_info->sb->s_flags & SB_RDONLY || btrfs_fs_closing(fs_info);
+ return test_bit(BTRFS_FS_STATE_RO, &fs_info->fs_state) ||
+ btrfs_fs_closing(fs_info);
+}
+
+static inline void btrfs_set_sb_rdonly(struct super_block *sb)
+{
+ sb->s_flags |= SB_RDONLY;
+ set_bit(BTRFS_FS_STATE_RO, &btrfs_sb(sb)->fs_state);
+}
+
+static inline void btrfs_clear_sb_rdonly(struct super_block *sb)
+{
+ sb->s_flags &= ~SB_RDONLY;
+ clear_bit(BTRFS_FS_STATE_RO, &btrfs_sb(sb)->fs_state);
}
/* tree mod log functions from ctree.c */
@@ -3073,7 +3097,8 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
u32 min_type);
int btrfs_start_delalloc_snapshot(struct btrfs_root *root);
-int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, u64 nr);
+int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, u64 nr,
+ bool in_reclaim_context);
int btrfs_set_extent_delalloc(struct btrfs_inode *inode, u64 start, u64 end,
unsigned int extra_bits,
struct extent_state **cached_state);
diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c
index a98e33f232d5..324f646d6e5e 100644
--- a/fs/btrfs/dev-replace.c
+++ b/fs/btrfs/dev-replace.c
@@ -715,7 +715,7 @@ static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info,
* flush all outstanding I/O and inode extent mappings before the
* copy operation is declared as being finished
*/
- ret = btrfs_start_delalloc_roots(fs_info, U64_MAX);
+ ret = btrfs_start_delalloc_roots(fs_info, U64_MAX, false);
if (ret) {
mutex_unlock(&dev_replace->lock_finishing_cancel_unmount);
return ret;
diff --git a/fs/btrfs/discard.c b/fs/btrfs/discard.c
index 1db966bf85b2..2b8383d41144 100644
--- a/fs/btrfs/discard.c
+++ b/fs/btrfs/discard.c
@@ -199,16 +199,15 @@ static struct btrfs_block_group *find_next_block_group(
static struct btrfs_block_group *peek_discard_list(
struct btrfs_discard_ctl *discard_ctl,
enum btrfs_discard_state *discard_state,
- int *discard_index)
+ int *discard_index, u64 now)
{
struct btrfs_block_group *block_group;
- const u64 now = ktime_get_ns();
spin_lock(&discard_ctl->lock);
again:
block_group = find_next_block_group(discard_ctl, now);
- if (block_group && now > block_group->discard_eligible_time) {
+ if (block_group && now >= block_group->discard_eligible_time) {
if (block_group->discard_index == BTRFS_DISCARD_INDEX_UNUSED &&
block_group->used != 0) {
if (btrfs_is_block_group_data_only(block_group))
@@ -222,12 +221,11 @@ again:
block_group->discard_state = BTRFS_DISCARD_EXTENTS;
}
discard_ctl->block_group = block_group;
+ }
+ if (block_group) {
*discard_state = block_group->discard_state;
*discard_index = block_group->discard_index;
- } else {
- block_group = NULL;
}
-
spin_unlock(&discard_ctl->lock);
return block_group;
@@ -330,28 +328,15 @@ void btrfs_discard_queue_work(struct btrfs_discard_ctl *discard_ctl,
btrfs_discard_schedule_work(discard_ctl, false);
}
-/**
- * btrfs_discard_schedule_work - responsible for scheduling the discard work
- * @discard_ctl: discard control
- * @override: override the current timer
- *
- * Discards are issued by a delayed workqueue item. @override is used to
- * update the current delay as the baseline delay interval is reevaluated on
- * transaction commit. This is also maxed with any other rate limit.
- */
-void btrfs_discard_schedule_work(struct btrfs_discard_ctl *discard_ctl,
- bool override)
+static void __btrfs_discard_schedule_work(struct btrfs_discard_ctl *discard_ctl,
+ u64 now, bool override)
{
struct btrfs_block_group *block_group;
- const u64 now = ktime_get_ns();
-
- spin_lock(&discard_ctl->lock);
if (!btrfs_run_discard_work(discard_ctl))
- goto out;
-
+ return;
if (!override && delayed_work_pending(&discard_ctl->work))
- goto out;
+ return;
block_group = find_next_block_group(discard_ctl, now);
if (block_group) {
@@ -393,7 +378,24 @@ void btrfs_discard_schedule_work(struct btrfs_discard_ctl *discard_ctl,
mod_delayed_work(discard_ctl->discard_workers,
&discard_ctl->work, nsecs_to_jiffies(delay));
}
-out:
+}
+
+/*
+ * btrfs_discard_schedule_work - responsible for scheduling the discard work
+ * @discard_ctl: discard control
+ * @override: override the current timer
+ *
+ * Discards are issued by a delayed workqueue item. @override is used to
+ * update the current delay as the baseline delay interval is reevaluated on
+ * transaction commit. This is also maxed with any other rate limit.
+ */
+void btrfs_discard_schedule_work(struct btrfs_discard_ctl *discard_ctl,
+ bool override)
+{
+ const u64 now = ktime_get_ns();
+
+ spin_lock(&discard_ctl->lock);
+ __btrfs_discard_schedule_work(discard_ctl, now, override);
spin_unlock(&discard_ctl->lock);
}
@@ -438,13 +440,18 @@ static void btrfs_discard_workfn(struct work_struct *work)
int discard_index = 0;
u64 trimmed = 0;
u64 minlen = 0;
+ u64 now = ktime_get_ns();
discard_ctl = container_of(work, struct btrfs_discard_ctl, work.work);
block_group = peek_discard_list(discard_ctl, &discard_state,
- &discard_index);
+ &discard_index, now);
if (!block_group || !btrfs_run_discard_work(discard_ctl))
return;
+ if (now < block_group->discard_eligible_time) {
+ btrfs_discard_schedule_work(discard_ctl, false);
+ return;
+ }
/* Perform discarding */
minlen = discard_minlen[discard_index];
@@ -474,13 +481,6 @@ static void btrfs_discard_workfn(struct work_struct *work)
discard_ctl->discard_extent_bytes += trimmed;
}
- /*
- * Updated without locks as this is inside the workfn and nothing else
- * is reading the values
- */
- discard_ctl->prev_discard = trimmed;
- discard_ctl->prev_discard_time = ktime_get_ns();
-
/* Determine next steps for a block_group */
if (block_group->discard_cursor >= btrfs_block_group_end(block_group)) {
if (discard_state == BTRFS_DISCARD_BITMAPS) {
@@ -496,11 +496,13 @@ static void btrfs_discard_workfn(struct work_struct *work)
}
}
+ now = ktime_get_ns();
spin_lock(&discard_ctl->lock);
+ discard_ctl->prev_discard = trimmed;
+ discard_ctl->prev_discard_time = now;
discard_ctl->block_group = NULL;
+ __btrfs_discard_schedule_work(discard_ctl, now, false);
spin_unlock(&discard_ctl->lock);
-
- btrfs_discard_schedule_work(discard_ctl, false);
}
/**
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 765deefda92b..6b35b7e88136 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -1457,7 +1457,7 @@ void btrfs_check_leaked_roots(struct btrfs_fs_info *fs_info)
root = list_first_entry(&fs_info->allocated_roots,
struct btrfs_root, leak_list);
btrfs_err(fs_info, "leaked root %s refcount %d",
- btrfs_root_name(root->root_key.objectid, buf),
+ btrfs_root_name(&root->root_key, buf),
refcount_read(&root->refs));
while (refcount_read(&root->refs) > 1)
btrfs_put_root(root);
@@ -1729,7 +1729,7 @@ static int cleaner_kthread(void *arg)
*/
btrfs_delete_unused_bgs(fs_info);
sleep:
- clear_bit(BTRFS_FS_CLEANER_RUNNING, &fs_info->flags);
+ clear_and_wake_up_bit(BTRFS_FS_CLEANER_RUNNING, &fs_info->flags);
if (kthread_should_park())
kthread_parkme();
if (kthread_should_stop())
@@ -2830,6 +2830,9 @@ static int init_mount_fs_info(struct btrfs_fs_info *fs_info, struct super_block
return -ENOMEM;
btrfs_init_delayed_root(fs_info->delayed_root);
+ if (sb_rdonly(sb))
+ set_bit(BTRFS_FS_STATE_RO, &fs_info->fs_state);
+
return btrfs_alloc_stripe_hash_table(fs_info);
}
@@ -2969,6 +2972,7 @@ int btrfs_start_pre_rw_mount(struct btrfs_fs_info *fs_info)
}
}
+ ret = btrfs_find_orphan_roots(fs_info);
out:
return ret;
}
@@ -3383,10 +3387,6 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device
}
}
- ret = btrfs_find_orphan_roots(fs_info);
- if (ret)
- goto fail_qgroup;
-
fs_info->fs_root = btrfs_get_fs_root(fs_info, BTRFS_FS_TREE_OBJECTID, true);
if (IS_ERR(fs_info->fs_root)) {
err = PTR_ERR(fs_info->fs_root);
@@ -4181,6 +4181,9 @@ void __cold close_ctree(struct btrfs_fs_info *fs_info)
invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
btrfs_stop_all_workers(fs_info);
+ /* We shouldn't have any transaction open at this point */
+ ASSERT(list_empty(&fs_info->trans_list));
+
clear_bit(BTRFS_FS_OPEN, &fs_info->flags);
free_root_pointers(fs_info, true);
btrfs_free_fs_roots(fs_info);
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 56ea380f5a17..30b1a630dc2f 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -844,6 +844,7 @@ int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,
want = extent_ref_type(parent, owner);
if (insert) {
extra_size = btrfs_extent_inline_ref_size(want);
+ path->search_for_extension = 1;
path->keep_locks = 1;
} else
extra_size = -1;
@@ -996,6 +997,7 @@ again:
out:
if (insert) {
path->keep_locks = 0;
+ path->search_for_extension = 0;
btrfs_unlock_up_safe(path, 1);
}
return err;
@@ -5547,7 +5549,15 @@ int btrfs_drop_snapshot(struct btrfs_root *root, int update_ref, int for_reloc)
goto out_free;
}
- trans = btrfs_start_transaction(tree_root, 0);
+ /*
+ * Use join to avoid potential EINTR from transaction
+ * start. See wait_reserve_ticket and the whole
+ * reservation callchain.
+ */
+ if (for_reloc)
+ trans = btrfs_join_transaction(tree_root);
+ else
+ trans = btrfs_start_transaction(tree_root, 0);
if (IS_ERR(trans)) {
err = PTR_ERR(trans);
goto out_free;
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 6e3b72e63e42..c9cee458e001 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -676,9 +676,7 @@ alloc_extent_state_atomic(struct extent_state *prealloc)
static void extent_io_tree_panic(struct extent_io_tree *tree, int err)
{
- struct inode *inode = tree->private_data;
-
- btrfs_panic(btrfs_sb(inode->i_sb), err,
+ btrfs_panic(tree->fs_info, err,
"locking error: extent tree was modified by another thread while locked");
}
diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c
index 1545c22ef280..6ccfc019ad90 100644
--- a/fs/btrfs/file-item.c
+++ b/fs/btrfs/file-item.c
@@ -1016,8 +1016,10 @@ again:
}
btrfs_release_path(path);
+ path->search_for_extension = 1;
ret = btrfs_search_slot(trans, root, &file_key, path,
csum_size, 1);
+ path->search_for_extension = 0;
if (ret < 0)
goto out;
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 8e23780acfae..a8e0a6b038d3 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -9390,7 +9390,9 @@ static struct btrfs_delalloc_work *btrfs_alloc_delalloc_work(struct inode *inode
* some fairly slow code that needs optimization. This walks the list
* of all the inodes with pending delalloc and forces them to disk.
*/
-static int start_delalloc_inodes(struct btrfs_root *root, u64 *nr, bool snapshot)
+static int start_delalloc_inodes(struct btrfs_root *root,
+ struct writeback_control *wbc, bool snapshot,
+ bool in_reclaim_context)
{
struct btrfs_inode *binode;
struct inode *inode;
@@ -9398,6 +9400,7 @@ static int start_delalloc_inodes(struct btrfs_root *root, u64 *nr, bool snapshot
struct list_head works;
struct list_head splice;
int ret = 0;
+ bool full_flush = wbc->nr_to_write == LONG_MAX;
INIT_LIST_HEAD(&works);
INIT_LIST_HEAD(&splice);
@@ -9411,6 +9414,11 @@ static int start_delalloc_inodes(struct btrfs_root *root, u64 *nr, bool snapshot
list_move_tail(&binode->delalloc_inodes,
&root->delalloc_inodes);
+
+ if (in_reclaim_context &&
+ test_bit(BTRFS_INODE_NO_DELALLOC_FLUSH, &binode->runtime_flags))
+ continue;
+
inode = igrab(&binode->vfs_inode);
if (!inode) {
cond_resched_lock(&root->delalloc_lock);
@@ -9421,18 +9429,24 @@ static int start_delalloc_inodes(struct btrfs_root *root, u64 *nr, bool snapshot
if (snapshot)
set_bit(BTRFS_INODE_SNAPSHOT_FLUSH,
&binode->runtime_flags);
- work = btrfs_alloc_delalloc_work(inode);
- if (!work) {
- iput(inode);
- ret = -ENOMEM;
- goto out;
- }
- list_add_tail(&work->list, &works);
- btrfs_queue_work(root->fs_info->flush_workers,
- &work->work);
- if (*nr != U64_MAX) {
- (*nr)--;
- if (*nr == 0)
+ if (full_flush) {
+ work = btrfs_alloc_delalloc_work(inode);
+ if (!work) {
+ iput(inode);
+ ret = -ENOMEM;
+ goto out;
+ }
+ list_add_tail(&work->list, &works);
+ btrfs_queue_work(root->fs_info->flush_workers,
+ &work->work);
+ } else {
+ ret = sync_inode(inode, wbc);
+ if (!ret &&
+ test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
+ &BTRFS_I(inode)->runtime_flags))
+ ret = sync_inode(inode, wbc);
+ btrfs_add_delayed_iput(inode);
+ if (ret || wbc->nr_to_write <= 0)
goto out;
}
cond_resched();
@@ -9458,17 +9472,29 @@ out:
int btrfs_start_delalloc_snapshot(struct btrfs_root *root)
{
+ struct writeback_control wbc = {
+ .nr_to_write = LONG_MAX,
+ .sync_mode = WB_SYNC_NONE,
+ .range_start = 0,
+ .range_end = LLONG_MAX,
+ };
struct btrfs_fs_info *fs_info = root->fs_info;
- u64 nr = U64_MAX;
if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state))
return -EROFS;
- return start_delalloc_inodes(root, &nr, true);
+ return start_delalloc_inodes(root, &wbc, true, false);
}
-int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, u64 nr)
+int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, u64 nr,
+ bool in_reclaim_context)
{
+ struct writeback_control wbc = {
+ .nr_to_write = (nr == U64_MAX) ? LONG_MAX : (unsigned long)nr,
+ .sync_mode = WB_SYNC_NONE,
+ .range_start = 0,
+ .range_end = LLONG_MAX,
+ };
struct btrfs_root *root;
struct list_head splice;
int ret;
@@ -9482,6 +9508,13 @@ int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, u64 nr)
spin_lock(&fs_info->delalloc_root_lock);
list_splice_init(&fs_info->delalloc_roots, &splice);
while (!list_empty(&splice) && nr) {
+ /*
+ * Reset nr_to_write here so we know that we're doing a full
+ * flush.
+ */
+ if (nr == U64_MAX)
+ wbc.nr_to_write = LONG_MAX;
+
root = list_first_entry(&splice, struct btrfs_root,
delalloc_root);
root = btrfs_grab_root(root);
@@ -9490,9 +9523,9 @@ int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, u64 nr)
&fs_info->delalloc_roots);
spin_unlock(&fs_info->delalloc_root_lock);
- ret = start_delalloc_inodes(root, &nr, false);
+ ret = start_delalloc_inodes(root, &wbc, false, in_reclaim_context);
btrfs_put_root(root);
- if (ret < 0)
+ if (ret < 0 || wbc.nr_to_write <= 0)
goto out;
spin_lock(&fs_info->delalloc_root_lock);
}
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 703212ff50a5..dde49a791f3e 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -4951,7 +4951,7 @@ long btrfs_ioctl(struct file *file, unsigned int
case BTRFS_IOC_SYNC: {
int ret;
- ret = btrfs_start_delalloc_roots(fs_info, U64_MAX);
+ ret = btrfs_start_delalloc_roots(fs_info, U64_MAX, false);
if (ret)
return ret;
ret = btrfs_sync_fs(inode->i_sb, 1);
diff --git a/fs/btrfs/print-tree.c b/fs/btrfs/print-tree.c
index fe5e0026129d..aae1027bd76a 100644
--- a/fs/btrfs/print-tree.c
+++ b/fs/btrfs/print-tree.c
@@ -26,22 +26,22 @@ static const struct root_name_map root_map[] = {
{ BTRFS_DATA_RELOC_TREE_OBJECTID, "DATA_RELOC_TREE" },
};
-const char *btrfs_root_name(u64 objectid, char *buf)
+const char *btrfs_root_name(const struct btrfs_key *key, char *buf)
{
int i;
- if (objectid == BTRFS_TREE_RELOC_OBJECTID) {
+ if (key->objectid == BTRFS_TREE_RELOC_OBJECTID) {
snprintf(buf, BTRFS_ROOT_NAME_BUF_LEN,
- "TREE_RELOC offset=%llu", objectid);
+ "TREE_RELOC offset=%llu", key->offset);
return buf;
}
for (i = 0; i < ARRAY_SIZE(root_map); i++) {
- if (root_map[i].id == objectid)
+ if (root_map[i].id == key->objectid)
return root_map[i].name;
}
- snprintf(buf, BTRFS_ROOT_NAME_BUF_LEN, "%llu", objectid);
+ snprintf(buf, BTRFS_ROOT_NAME_BUF_LEN, "%llu", key->objectid);
return buf;
}
diff --git a/fs/btrfs/print-tree.h b/fs/btrfs/print-tree.h
index 78b99385a503..8c3e9319ec4e 100644
--- a/fs/btrfs/print-tree.h
+++ b/fs/btrfs/print-tree.h
@@ -11,6 +11,6 @@
void btrfs_print_leaf(struct extent_buffer *l);
void btrfs_print_tree(struct extent_buffer *c, bool follow);
-const char *btrfs_root_name(u64 objectid, char *buf);
+const char *btrfs_root_name(const struct btrfs_key *key, char *buf);
#endif
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
index fe3046007f52..808370ada888 100644
--- a/fs/btrfs/qgroup.c
+++ b/fs/btrfs/qgroup.c
@@ -3190,6 +3190,12 @@ out:
return ret;
}
+static bool rescan_should_stop(struct btrfs_fs_info *fs_info)
+{
+ return btrfs_fs_closing(fs_info) ||
+ test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state);
+}
+
static void btrfs_qgroup_rescan_worker(struct btrfs_work *work)
{
struct btrfs_fs_info *fs_info = container_of(work, struct btrfs_fs_info,
@@ -3198,6 +3204,7 @@ static void btrfs_qgroup_rescan_worker(struct btrfs_work *work)
struct btrfs_trans_handle *trans = NULL;
int err = -ENOMEM;
int ret = 0;
+ bool stopped = false;
path = btrfs_alloc_path();
if (!path)
@@ -3210,7 +3217,7 @@ static void btrfs_qgroup_rescan_worker(struct btrfs_work *work)
path->skip_locking = 1;
err = 0;
- while (!err && !btrfs_fs_closing(fs_info)) {
+ while (!err && !(stopped = rescan_should_stop(fs_info))) {
trans = btrfs_start_transaction(fs_info->fs_root, 0);
if (IS_ERR(trans)) {
err = PTR_ERR(trans);
@@ -3253,7 +3260,7 @@ out:
}
mutex_lock(&fs_info->qgroup_rescan_lock);
- if (!btrfs_fs_closing(fs_info))
+ if (!stopped)
fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
if (trans) {
ret = update_qgroup_status_item(trans);
@@ -3272,7 +3279,7 @@ out:
btrfs_end_transaction(trans);
- if (btrfs_fs_closing(fs_info)) {
+ if (stopped) {
btrfs_info(fs_info, "qgroup scan paused");
} else if (err >= 0) {
btrfs_info(fs_info, "qgroup scan completed%s",
@@ -3531,16 +3538,6 @@ static int try_flush_qgroup(struct btrfs_root *root)
bool can_commit = true;
/*
- * We don't want to run flush again and again, so if there is a running
- * one, we won't try to start a new flush, but exit directly.
- */
- if (test_and_set_bit(BTRFS_ROOT_QGROUP_FLUSHING, &root->state)) {
- wait_event(root->qgroup_flush_wait,
- !test_bit(BTRFS_ROOT_QGROUP_FLUSHING, &root->state));
- return 0;
- }
-
- /*
* If current process holds a transaction, we shouldn't flush, as we
* assume all space reservation happens before a transaction handle is
* held.
@@ -3554,6 +3551,26 @@ static int try_flush_qgroup(struct btrfs_root *root)
current->journal_info != BTRFS_SEND_TRANS_STUB)
can_commit = false;
+ /*
+ * We don't want to run flush again and again, so if there is a running
+ * one, we won't try to start a new flush, but exit directly.
+ */
+ if (test_and_set_bit(BTRFS_ROOT_QGROUP_FLUSHING, &root->state)) {
+ /*
+ * We are already holding a transaction, thus we can block other
+ * threads from flushing. So exit right now. This increases
+ * the chance of EDQUOT for heavy load and near limit cases.
+ * But we can argue that if we're already near limit, EDQUOT is
+ * unavoidable anyway.
+ */
+ if (!can_commit)
+ return 0;
+
+ wait_event(root->qgroup_flush_wait,
+ !test_bit(BTRFS_ROOT_QGROUP_FLUSHING, &root->state));
+ return 0;
+ }
+
ret = btrfs_start_delalloc_snapshot(root);
if (ret < 0)
goto out;
diff --git a/fs/btrfs/reflink.c b/fs/btrfs/reflink.c
index ab80896315be..b03e7891394e 100644
--- a/fs/btrfs/reflink.c
+++ b/fs/btrfs/reflink.c
@@ -89,6 +89,19 @@ static int copy_inline_to_page(struct btrfs_inode *inode,
if (ret)
goto out_unlock;
+ /*
+ * After dirtying the page our caller will need to start a transaction,
+ * and if we are low on metadata free space, that can cause flushing of
+ * delalloc for all inodes in order to get metadata space released.
+ * However we are holding the range locked for the whole duration of
+ * the clone/dedupe operation, so we may deadlock if that happens and no
+ * other task releases enough space. So mark this inode as not being
+ * possible to flush to avoid such deadlock. We will clear that flag
+ * when we finish cloning all extents, since a transaction is started
+ * after finding each extent to clone.
+ */
+ set_bit(BTRFS_INODE_NO_DELALLOC_FLUSH, &inode->runtime_flags);
+
if (comp_type == BTRFS_COMPRESS_NONE) {
char *map;
@@ -549,6 +562,8 @@ process_slot:
out:
btrfs_free_path(path);
kvfree(buf);
+ clear_bit(BTRFS_INODE_NO_DELALLOC_FLUSH, &BTRFS_I(inode)->runtime_flags);
+
return ret;
}
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index 19b7db8b2117..df63ef64c5c0 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -2975,11 +2975,16 @@ static int delete_v1_space_cache(struct extent_buffer *leaf,
return 0;
for (i = 0; i < btrfs_header_nritems(leaf); i++) {
+ u8 type;
+
btrfs_item_key_to_cpu(leaf, &key, i);
if (key.type != BTRFS_EXTENT_DATA_KEY)
continue;
ei = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
- if (btrfs_file_extent_type(leaf, ei) == BTRFS_FILE_EXTENT_REG &&
+ type = btrfs_file_extent_type(leaf, ei);
+
+ if ((type == BTRFS_FILE_EXTENT_REG ||
+ type == BTRFS_FILE_EXTENT_PREALLOC) &&
btrfs_file_extent_disk_bytenr(leaf, ei) == data_bytenr) {
found = true;
space_cache_ino = key.objectid;
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
index d719a2755a40..78a35374d492 100644
--- a/fs/btrfs/send.c
+++ b/fs/btrfs/send.c
@@ -236,6 +236,7 @@ struct waiting_dir_move {
* after this directory is moved, we can try to rmdir the ino rmdir_ino.
*/
u64 rmdir_ino;
+ u64 rmdir_gen;
bool orphanized;
};
@@ -316,7 +317,7 @@ static int is_waiting_for_move(struct send_ctx *sctx, u64 ino);
static struct waiting_dir_move *
get_waiting_dir_move(struct send_ctx *sctx, u64 ino);
-static int is_waiting_for_rm(struct send_ctx *sctx, u64 dir_ino);
+static int is_waiting_for_rm(struct send_ctx *sctx, u64 dir_ino, u64 gen);
static int need_send_hole(struct send_ctx *sctx)
{
@@ -2299,7 +2300,7 @@ static int get_cur_path(struct send_ctx *sctx, u64 ino, u64 gen,
fs_path_reset(name);
- if (is_waiting_for_rm(sctx, ino)) {
+ if (is_waiting_for_rm(sctx, ino, gen)) {
ret = gen_unique_name(sctx, ino, gen, name);
if (ret < 0)
goto out;
@@ -2858,8 +2859,8 @@ out:
return ret;
}
-static struct orphan_dir_info *
-add_orphan_dir_info(struct send_ctx *sctx, u64 dir_ino)
+static struct orphan_dir_info *add_orphan_dir_info(struct send_ctx *sctx,
+ u64 dir_ino, u64 dir_gen)
{
struct rb_node **p = &sctx->orphan_dirs.rb_node;
struct rb_node *parent = NULL;
@@ -2868,20 +2869,23 @@ add_orphan_dir_info(struct send_ctx *sctx, u64 dir_ino)
while (*p) {
parent = *p;
entry = rb_entry(parent, struct orphan_dir_info, node);
- if (dir_ino < entry->ino) {
+ if (dir_ino < entry->ino)
p = &(*p)->rb_left;
- } else if (dir_ino > entry->ino) {
+ else if (dir_ino > entry->ino)
p = &(*p)->rb_right;
- } else {
+ else if (dir_gen < entry->gen)
+ p = &(*p)->rb_left;
+ else if (dir_gen > entry->gen)
+ p = &(*p)->rb_right;
+ else
return entry;
- }
}
odi = kmalloc(sizeof(*odi), GFP_KERNEL);
if (!odi)
return ERR_PTR(-ENOMEM);
odi->ino = dir_ino;
- odi->gen = 0;
+ odi->gen = dir_gen;
odi->last_dir_index_offset = 0;
rb_link_node(&odi->node, parent, p);
@@ -2889,8 +2893,8 @@ add_orphan_dir_info(struct send_ctx *sctx, u64 dir_ino)
return odi;
}
-static struct orphan_dir_info *
-get_orphan_dir_info(struct send_ctx *sctx, u64 dir_ino)
+static struct orphan_dir_info *get_orphan_dir_info(struct send_ctx *sctx,
+ u64 dir_ino, u64 gen)
{
struct rb_node *n = sctx->orphan_dirs.rb_node;
struct orphan_dir_info *entry;
@@ -2901,15 +2905,19 @@ get_orphan_dir_info(struct send_ctx *sctx, u64 dir_ino)
n = n->rb_left;
else if (dir_ino > entry->ino)
n = n->rb_right;
+ else if (gen < entry->gen)
+ n = n->rb_left;
+ else if (gen > entry->gen)
+ n = n->rb_right;
else
return entry;
}
return NULL;
}
-static int is_waiting_for_rm(struct send_ctx *sctx, u64 dir_ino)
+static int is_waiting_for_rm(struct send_ctx *sctx, u64 dir_ino, u64 gen)
{
- struct orphan_dir_info *odi = get_orphan_dir_info(sctx, dir_ino);
+ struct orphan_dir_info *odi = get_orphan_dir_info(sctx, dir_ino, gen);
return odi != NULL;
}
@@ -2954,7 +2962,7 @@ static int can_rmdir(struct send_ctx *sctx, u64 dir, u64 dir_gen,
key.type = BTRFS_DIR_INDEX_KEY;
key.offset = 0;
- odi = get_orphan_dir_info(sctx, dir);
+ odi = get_orphan_dir_info(sctx, dir, dir_gen);
if (odi)
key.offset = odi->last_dir_index_offset;
@@ -2985,7 +2993,7 @@ static int can_rmdir(struct send_ctx *sctx, u64 dir, u64 dir_gen,
dm = get_waiting_dir_move(sctx, loc.objectid);
if (dm) {
- odi = add_orphan_dir_info(sctx, dir);
+ odi = add_orphan_dir_info(sctx, dir, dir_gen);
if (IS_ERR(odi)) {
ret = PTR_ERR(odi);
goto out;
@@ -2993,12 +3001,13 @@ static int can_rmdir(struct send_ctx *sctx, u64 dir, u64 dir_gen,
odi->gen = dir_gen;
odi->last_dir_index_offset = found_key.offset;
dm->rmdir_ino = dir;
+ dm->rmdir_gen = dir_gen;
ret = 0;
goto out;
}
if (loc.objectid > send_progress) {
- odi = add_orphan_dir_info(sctx, dir);
+ odi = add_orphan_dir_info(sctx, dir, dir_gen);
if (IS_ERR(odi)) {
ret = PTR_ERR(odi);
goto out;
@@ -3038,6 +3047,7 @@ static int add_waiting_dir_move(struct send_ctx *sctx, u64 ino, bool orphanized)
return -ENOMEM;
dm->ino = ino;
dm->rmdir_ino = 0;
+ dm->rmdir_gen = 0;
dm->orphanized = orphanized;
while (*p) {
@@ -3183,7 +3193,7 @@ static int path_loop(struct send_ctx *sctx, struct fs_path *name,
while (ino != BTRFS_FIRST_FREE_OBJECTID) {
fs_path_reset(name);
- if (is_waiting_for_rm(sctx, ino))
+ if (is_waiting_for_rm(sctx, ino, gen))
break;
if (is_waiting_for_move(sctx, ino)) {
if (*ancestor_ino == 0)
@@ -3223,6 +3233,7 @@ static int apply_dir_move(struct send_ctx *sctx, struct pending_dir_move *pm)
u64 parent_ino, parent_gen;
struct waiting_dir_move *dm = NULL;
u64 rmdir_ino = 0;
+ u64 rmdir_gen;
u64 ancestor;
bool is_orphan;
int ret;
@@ -3237,6 +3248,7 @@ static int apply_dir_move(struct send_ctx *sctx, struct pending_dir_move *pm)
dm = get_waiting_dir_move(sctx, pm->ino);
ASSERT(dm);
rmdir_ino = dm->rmdir_ino;
+ rmdir_gen = dm->rmdir_gen;
is_orphan = dm->orphanized;
free_waiting_dir_move(sctx, dm);
@@ -3273,6 +3285,7 @@ static int apply_dir_move(struct send_ctx *sctx, struct pending_dir_move *pm)
dm = get_waiting_dir_move(sctx, pm->ino);
ASSERT(dm);
dm->rmdir_ino = rmdir_ino;
+ dm->rmdir_gen = rmdir_gen;
}
goto out;
}
@@ -3291,7 +3304,7 @@ static int apply_dir_move(struct send_ctx *sctx, struct pending_dir_move *pm)
struct orphan_dir_info *odi;
u64 gen;
- odi = get_orphan_dir_info(sctx, rmdir_ino);
+ odi = get_orphan_dir_info(sctx, rmdir_ino, rmdir_gen);
if (!odi) {
/* already deleted */
goto finish;
@@ -5499,6 +5512,21 @@ static int clone_range(struct send_ctx *sctx,
break;
offset += clone_len;
clone_root->offset += clone_len;
+
+ /*
+ * If we are cloning from the file we are currently processing,
+ * and using the send root as the clone root, we must stop once
+ * the current clone offset reaches the current eof of the file
+ * at the receiver, otherwise we would issue an invalid clone
+ * operation (source range going beyond eof) and cause the
+ * receiver to fail. So if we reach the current eof, bail out
+ * and fallback to a regular write.
+ */
+ if (clone_root->root == sctx->send_root &&
+ clone_root->ino == sctx->cur_ino &&
+ clone_root->offset >= sctx->cur_inode_next_write_offset)
+ break;
+
data_offset += clone_len;
next:
path->slots[0]++;
diff --git a/fs/btrfs/space-info.c b/fs/btrfs/space-info.c
index 64099565ab8f..e8347461c8dd 100644
--- a/fs/btrfs/space-info.c
+++ b/fs/btrfs/space-info.c
@@ -532,7 +532,9 @@ static void shrink_delalloc(struct btrfs_fs_info *fs_info,
loops = 0;
while ((delalloc_bytes || dio_bytes) && loops < 3) {
- btrfs_start_delalloc_roots(fs_info, items);
+ u64 nr_pages = min(delalloc_bytes, to_reclaim) >> PAGE_SHIFT;
+
+ btrfs_start_delalloc_roots(fs_info, nr_pages, true);
loops++;
if (wait_ordered && !trans) {
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 022f20810089..12d7d3be7cd4 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -175,7 +175,7 @@ void __btrfs_handle_fs_error(struct btrfs_fs_info *fs_info, const char *function
btrfs_discard_stop(fs_info);
/* btrfs handle error by forcing the filesystem readonly */
- sb->s_flags |= SB_RDONLY;
+ btrfs_set_sb_rdonly(sb);
btrfs_info(fs_info, "forced readonly");
/*
* Note that a running device replace operation is not canceled here
@@ -1953,7 +1953,7 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)
/* avoid complains from lockdep et al. */
up(&fs_info->uuid_tree_rescan_sem);
- sb->s_flags |= SB_RDONLY;
+ btrfs_set_sb_rdonly(sb);
/*
* Setting SB_RDONLY will put the cleaner thread to
@@ -1964,10 +1964,42 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)
*/
btrfs_delete_unused_bgs(fs_info);
+ /*
+ * The cleaner task could be already running before we set the
+ * flag BTRFS_FS_STATE_RO (and SB_RDONLY in the superblock).
+ * We must make sure that after we finish the remount, i.e. after
+ * we call btrfs_commit_super(), the cleaner can no longer start
+ * a transaction - either because it was dropping a dead root,
+ * running delayed iputs or deleting an unused block group (the
+ * cleaner picked a block group from the list of unused block
+ * groups before we were able to in the previous call to
+ * btrfs_delete_unused_bgs()).
+ */
+ wait_on_bit(&fs_info->flags, BTRFS_FS_CLEANER_RUNNING,
+ TASK_UNINTERRUPTIBLE);
+
+ /*
+ * We've set the superblock to RO mode, so we might have made
+ * the cleaner task sleep without running all pending delayed
+ * iputs. Go through all the delayed iputs here, so that if an
+ * unmount happens without remounting RW we don't end up at
+ * finishing close_ctree() with a non-empty list of delayed
+ * iputs.
+ */
+ btrfs_run_delayed_iputs(fs_info);
+
btrfs_dev_replace_suspend_for_unmount(fs_info);
btrfs_scrub_cancel(fs_info);
btrfs_pause_balance(fs_info);
+ /*
+ * Pause the qgroup rescan worker if it is running. We don't want
+ * it to be still running after we are in RO mode, as after that,
+ * by the time we unmount, it might have left a transaction open,
+ * so we would leak the transaction and/or crash.
+ */
+ btrfs_qgroup_wait_for_completion(fs_info, false);
+
ret = btrfs_commit_super(fs_info);
if (ret)
goto restore;
@@ -2006,7 +2038,7 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)
if (ret)
goto restore;
- sb->s_flags &= ~SB_RDONLY;
+ btrfs_clear_sb_rdonly(sb);
set_bit(BTRFS_FS_OPEN, &fs_info->flags);
}
@@ -2028,6 +2060,8 @@ restore:
/* We've hit an error - don't reset SB_RDONLY */
if (sb_rdonly(sb))
old_flags |= SB_RDONLY;
+ if (!(old_flags & SB_RDONLY))
+ clear_bit(BTRFS_FS_STATE_RO, &fs_info->fs_state);
sb->s_flags = old_flags;
fs_info->mount_opt = old_opts;
fs_info->compress_type = old_compress_type;
diff --git a/fs/btrfs/tests/btrfs-tests.c b/fs/btrfs/tests/btrfs-tests.c
index 8ca334d554af..6bd97bd4cb37 100644
--- a/fs/btrfs/tests/btrfs-tests.c
+++ b/fs/btrfs/tests/btrfs-tests.c
@@ -55,8 +55,14 @@ struct inode *btrfs_new_test_inode(void)
struct inode *inode;
inode = new_inode(test_mnt->mnt_sb);
- if (inode)
- inode_init_owner(inode, NULL, S_IFREG);
+ if (!inode)
+ return NULL;
+
+ inode->i_mode = S_IFREG;
+ BTRFS_I(inode)->location.type = BTRFS_INODE_ITEM_KEY;
+ BTRFS_I(inode)->location.objectid = BTRFS_FIRST_FREE_OBJECTID;
+ BTRFS_I(inode)->location.offset = 0;
+ inode_init_owner(inode, NULL, S_IFREG);
return inode;
}
diff --git a/fs/btrfs/tests/inode-tests.c b/fs/btrfs/tests/inode-tests.c
index 04022069761d..c9874b12d337 100644
--- a/fs/btrfs/tests/inode-tests.c
+++ b/fs/btrfs/tests/inode-tests.c
@@ -232,11 +232,6 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize)
return ret;
}
- inode->i_mode = S_IFREG;
- BTRFS_I(inode)->location.type = BTRFS_INODE_ITEM_KEY;
- BTRFS_I(inode)->location.objectid = BTRFS_FIRST_FREE_OBJECTID;
- BTRFS_I(inode)->location.offset = 0;
-
fs_info = btrfs_alloc_dummy_fs_info(nodesize, sectorsize);
if (!fs_info) {
test_std_err(TEST_ALLOC_FS_INFO);
@@ -835,10 +830,6 @@ static int test_hole_first(u32 sectorsize, u32 nodesize)
return ret;
}
- BTRFS_I(inode)->location.type = BTRFS_INODE_ITEM_KEY;
- BTRFS_I(inode)->location.objectid = BTRFS_FIRST_FREE_OBJECTID;
- BTRFS_I(inode)->location.offset = 0;
-
fs_info = btrfs_alloc_dummy_fs_info(nodesize, sectorsize);
if (!fs_info) {
test_std_err(TEST_ALLOC_FS_INFO);
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 8e0f7a1029c6..6af7f2bf92de 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -2265,14 +2265,6 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
btrfs_free_log_root_tree(trans, fs_info);
/*
- * commit_fs_roots() can call btrfs_save_ino_cache(), which generates
- * new delayed refs. Must handle them or qgroup can be wrong.
- */
- ret = btrfs_run_delayed_refs(trans, (unsigned long)-1);
- if (ret)
- goto unlock_tree_log;
-
- /*
* Since fs roots are all committed, we can get a quite accurate
* new_roots. So let's do quota accounting.
*/
diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c
index 028e733e42f3..582061c7b547 100644
--- a/fs/btrfs/tree-checker.c
+++ b/fs/btrfs/tree-checker.c
@@ -760,6 +760,7 @@ int btrfs_check_chunk_valid(struct extent_buffer *leaf,
{
struct btrfs_fs_info *fs_info = leaf->fs_info;
u64 length;
+ u64 chunk_end;
u64 stripe_len;
u16 num_stripes;
u16 sub_stripes;
@@ -814,6 +815,12 @@ int btrfs_check_chunk_valid(struct extent_buffer *leaf,
"invalid chunk length, have %llu", length);
return -EUCLEAN;
}
+ if (unlikely(check_add_overflow(logical, length, &chunk_end))) {
+ chunk_err(leaf, chunk, logical,
+"invalid chunk logical start and length, have logical start %llu length %llu",
+ logical, length);
+ return -EUCLEAN;
+ }
if (unlikely(!is_power_of_2(stripe_len) || stripe_len != BTRFS_STRIPE_LEN)) {
chunk_err(leaf, chunk, logical,
"invalid chunk stripe length: %llu",
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index ee086fc56c30..0a6de859eb22 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -2592,7 +2592,7 @@ int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path
set_blocksize(device->bdev, BTRFS_BDEV_BLOCKSIZE);
if (seeding_dev) {
- sb->s_flags &= ~SB_RDONLY;
+ btrfs_clear_sb_rdonly(sb);
ret = btrfs_prepare_sprout(fs_info);
if (ret) {
btrfs_abort_transaction(trans, ret);
@@ -2728,7 +2728,7 @@ error_sysfs:
mutex_unlock(&fs_info->fs_devices->device_list_mutex);
error_trans:
if (seeding_dev)
- sb->s_flags |= SB_RDONLY;
+ btrfs_set_sb_rdonly(sb);
if (trans)
btrfs_end_transaction(trans);
error_free_zone:
@@ -4317,6 +4317,8 @@ int btrfs_recover_balance(struct btrfs_fs_info *fs_info)
btrfs_warn(fs_info,
"balance: cannot set exclusive op status, resume manually");
+ btrfs_release_path(path);
+
mutex_lock(&fs_info->balance_mutex);
BUG_ON(fs_info->balance_ctl);
spin_lock(&fs_info->balance_lock);
diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
index 8bda092e60c5..e027c718ca01 100644
--- a/fs/cachefiles/rdwr.c
+++ b/fs/cachefiles/rdwr.c
@@ -413,7 +413,6 @@ int cachefiles_read_or_alloc_page(struct fscache_retrieval *op,
inode = d_backing_inode(object->backer);
ASSERT(S_ISREG(inode->i_mode));
- ASSERT(inode->i_mapping->a_ops->readpages);
/* calculate the shift required to use bmap */
shift = PAGE_SHIFT - inode->i_sb->s_blocksize_bits;
@@ -713,7 +712,6 @@ int cachefiles_read_or_alloc_pages(struct fscache_retrieval *op,
inode = d_backing_inode(object->backer);
ASSERT(S_ISREG(inode->i_mode));
- ASSERT(inode->i_mapping->a_ops->readpages);
/* calculate the shift required to use bmap */
shift = PAGE_SHIFT - inode->i_sb->s_blocksize_bits;
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index 98c15ff2e599..d87bd852ed96 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -2475,6 +2475,22 @@ static int set_request_path_attr(struct inode *rinode, struct dentry *rdentry,
return r;
}
+static void encode_timestamp_and_gids(void **p,
+ const struct ceph_mds_request *req)
+{
+ struct ceph_timespec ts;
+ int i;
+
+ ceph_encode_timespec64(&ts, &req->r_stamp);
+ ceph_encode_copy(p, &ts, sizeof(ts));
+
+ /* gid_list */
+ ceph_encode_32(p, req->r_cred->group_info->ngroups);
+ for (i = 0; i < req->r_cred->group_info->ngroups; i++)
+ ceph_encode_64(p, from_kgid(&init_user_ns,
+ req->r_cred->group_info->gid[i]));
+}
+
/*
* called under mdsc->mutex
*/
@@ -2491,7 +2507,7 @@ static struct ceph_msg *create_request_message(struct ceph_mds_session *session,
u64 ino1 = 0, ino2 = 0;
int pathlen1 = 0, pathlen2 = 0;
bool freepath1 = false, freepath2 = false;
- int len, i;
+ int len;
u16 releases;
void *p, *end;
int ret;
@@ -2517,17 +2533,10 @@ static struct ceph_msg *create_request_message(struct ceph_mds_session *session,
goto out_free1;
}
- if (legacy) {
- /* Old style */
- len = sizeof(*head);
- } else {
- /* New style: add gid_list and any later fields */
- len = sizeof(struct ceph_mds_request_head) + sizeof(u32) +
- (sizeof(u64) * req->r_cred->group_info->ngroups);
- }
-
+ len = legacy ? sizeof(*head) : sizeof(struct ceph_mds_request_head);
len += pathlen1 + pathlen2 + 2*(1 + sizeof(u32) + sizeof(u64)) +
sizeof(struct ceph_timespec);
+ len += sizeof(u32) + (sizeof(u64) * req->r_cred->group_info->ngroups);
/* calculate (max) length for cap releases */
len += sizeof(struct ceph_mds_request_release) *
@@ -2548,7 +2557,7 @@ static struct ceph_msg *create_request_message(struct ceph_mds_session *session,
msg->hdr.tid = cpu_to_le64(req->r_tid);
/*
- * The old ceph_mds_request_header didn't contain a version field, and
+ * The old ceph_mds_request_head didn't contain a version field, and
* one was added when we moved the message version from 3->4.
*/
if (legacy) {
@@ -2609,20 +2618,7 @@ static struct ceph_msg *create_request_message(struct ceph_mds_session *session,
head->num_releases = cpu_to_le16(releases);
- /* time stamp */
- {
- struct ceph_timespec ts;
- ceph_encode_timespec64(&ts, &req->r_stamp);
- ceph_encode_copy(&p, &ts, sizeof(ts));
- }
-
- /* gid list */
- if (!legacy) {
- ceph_encode_32(&p, req->r_cred->group_info->ngroups);
- for (i = 0; i < req->r_cred->group_info->ngroups; i++)
- ceph_encode_64(&p, from_kgid(&init_user_ns,
- req->r_cred->group_info->gid[i]));
- }
+ encode_timestamp_and_gids(&p, req);
if (WARN_ON_ONCE(p > end)) {
ceph_msg_put(msg);
@@ -2730,13 +2726,8 @@ static int __prepare_send_request(struct ceph_mds_session *session,
/* remove cap/dentry releases from message */
rhead->num_releases = 0;
- /* time stamp */
p = msg->front.iov_base + req->r_request_release_offset;
- {
- struct ceph_timespec ts;
- ceph_encode_timespec64(&ts, &req->r_stamp);
- ceph_encode_copy(&p, &ts, sizeof(ts));
- }
+ encode_timestamp_and_gids(&p, req);
msg->front.iov_len = p - msg->front.iov_base;
msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
@@ -5047,7 +5038,7 @@ bad:
return;
}
-static struct ceph_connection *con_get(struct ceph_connection *con)
+static struct ceph_connection *mds_get_con(struct ceph_connection *con)
{
struct ceph_mds_session *s = con->private;
@@ -5056,7 +5047,7 @@ static struct ceph_connection *con_get(struct ceph_connection *con)
return NULL;
}
-static void con_put(struct ceph_connection *con)
+static void mds_put_con(struct ceph_connection *con)
{
struct ceph_mds_session *s = con->private;
@@ -5067,7 +5058,7 @@ static void con_put(struct ceph_connection *con)
* if the client is unresponsive for long enough, the mds will kill
* the session entirely.
*/
-static void peer_reset(struct ceph_connection *con)
+static void mds_peer_reset(struct ceph_connection *con)
{
struct ceph_mds_session *s = con->private;
struct ceph_mds_client *mdsc = s->s_mdsc;
@@ -5076,7 +5067,7 @@ static void peer_reset(struct ceph_connection *con)
send_mds_reconnect(mdsc, s);
}
-static void dispatch(struct ceph_connection *con, struct ceph_msg *msg)
+static void mds_dispatch(struct ceph_connection *con, struct ceph_msg *msg)
{
struct ceph_mds_session *s = con->private;
struct ceph_mds_client *mdsc = s->s_mdsc;
@@ -5134,8 +5125,8 @@ out:
* Note: returned pointer is the address of a structure that's
* managed separately. Caller must *not* attempt to free it.
*/
-static struct ceph_auth_handshake *get_authorizer(struct ceph_connection *con,
- int *proto, int force_new)
+static struct ceph_auth_handshake *
+mds_get_authorizer(struct ceph_connection *con, int *proto, int force_new)
{
struct ceph_mds_session *s = con->private;
struct ceph_mds_client *mdsc = s->s_mdsc;
@@ -5151,7 +5142,7 @@ static struct ceph_auth_handshake *get_authorizer(struct ceph_connection *con,
return auth;
}
-static int add_authorizer_challenge(struct ceph_connection *con,
+static int mds_add_authorizer_challenge(struct ceph_connection *con,
void *challenge_buf, int challenge_buf_len)
{
struct ceph_mds_session *s = con->private;
@@ -5162,7 +5153,7 @@ static int add_authorizer_challenge(struct ceph_connection *con,
challenge_buf, challenge_buf_len);
}
-static int verify_authorizer_reply(struct ceph_connection *con)
+static int mds_verify_authorizer_reply(struct ceph_connection *con)
{
struct ceph_mds_session *s = con->private;
struct ceph_mds_client *mdsc = s->s_mdsc;
@@ -5174,7 +5165,7 @@ static int verify_authorizer_reply(struct ceph_connection *con)
NULL, NULL, NULL, NULL);
}
-static int invalidate_authorizer(struct ceph_connection *con)
+static int mds_invalidate_authorizer(struct ceph_connection *con)
{
struct ceph_mds_session *s = con->private;
struct ceph_mds_client *mdsc = s->s_mdsc;
@@ -5297,15 +5288,15 @@ static int mds_check_message_signature(struct ceph_msg *msg)
}
static const struct ceph_connection_operations mds_con_ops = {
- .get = con_get,
- .put = con_put,
- .dispatch = dispatch,
- .get_authorizer = get_authorizer,
- .add_authorizer_challenge = add_authorizer_challenge,
- .verify_authorizer_reply = verify_authorizer_reply,
- .invalidate_authorizer = invalidate_authorizer,
- .peer_reset = peer_reset,
+ .get = mds_get_con,
+ .put = mds_put_con,
.alloc_msg = mds_alloc_msg,
+ .dispatch = mds_dispatch,
+ .peer_reset = mds_peer_reset,
+ .get_authorizer = mds_get_authorizer,
+ .add_authorizer_challenge = mds_add_authorizer_challenge,
+ .verify_authorizer_reply = mds_verify_authorizer_reply,
+ .invalidate_authorizer = mds_invalidate_authorizer,
.sign_message = mds_sign_message,
.check_message_signature = mds_check_message_signature,
.get_auth_request = mds_get_auth_request,
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index b9df85506938..c8ef24bac94f 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -2195,7 +2195,7 @@ cifs_get_tcon(struct cifs_ses *ses, struct smb3_fs_context *ctx)
if (ses->server->capabilities & SMB2_GLOBAL_CAP_DIRECTORY_LEASING)
tcon->nohandlecache = ctx->nohandlecache;
else
- tcon->nohandlecache = 1;
+ tcon->nohandlecache = true;
tcon->nodelete = ctx->nodelete;
tcon->local_lease = ctx->local_lease;
INIT_LIST_HEAD(&tcon->pending_opens);
@@ -2628,7 +2628,7 @@ void reset_cifs_unix_caps(unsigned int xid, struct cifs_tcon *tcon,
} else if (ctx)
tcon->unix_ext = 1; /* Unix Extensions supported */
- if (tcon->unix_ext == 0) {
+ if (!tcon->unix_ext) {
cifs_dbg(FYI, "Unix extensions disabled so not set on reconnect\n");
return;
}
@@ -3740,7 +3740,7 @@ cifs_setup_session(const unsigned int xid, struct cifs_ses *ses,
if (!ses->binding) {
ses->capabilities = server->capabilities;
- if (linuxExtEnabled == 0)
+ if (!linuxExtEnabled)
ses->capabilities &= (~server->vals->cap_unix);
if (ses->auth_key.response) {
diff --git a/fs/cifs/dfs_cache.c b/fs/cifs/dfs_cache.c
index 6ad6ba5f6ebe..0fdb0de7ff86 100644
--- a/fs/cifs/dfs_cache.c
+++ b/fs/cifs/dfs_cache.c
@@ -1260,7 +1260,8 @@ void dfs_cache_del_vol(const char *fullpath)
vi = find_vol(fullpath);
spin_unlock(&vol_list_lock);
- kref_put(&vi->refcnt, vol_release);
+ if (!IS_ERR(vi))
+ kref_put(&vi->refcnt, vol_release);
}
/**
diff --git a/fs/cifs/fs_context.c b/fs/cifs/fs_context.c
index 0afccbbed2e6..076bcadc756a 100644
--- a/fs/cifs/fs_context.c
+++ b/fs/cifs/fs_context.c
@@ -303,8 +303,6 @@ do { \
int
smb3_fs_context_dup(struct smb3_fs_context *new_ctx, struct smb3_fs_context *ctx)
{
- int rc = 0;
-
memcpy(new_ctx, ctx, sizeof(*ctx));
new_ctx->prepath = NULL;
new_ctx->mount_options = NULL;
@@ -327,7 +325,7 @@ smb3_fs_context_dup(struct smb3_fs_context *new_ctx, struct smb3_fs_context *ctx
DUP_CTX_STR(nodename);
DUP_CTX_STR(iocharset);
- return rc;
+ return 0;
}
static int
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index 067eb44c7baa..794fc3b68b4f 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -3248,7 +3248,7 @@ close_exit:
free_rsp_buf(resp_buftype, rsp);
/* retry close in a worker thread if this one is interrupted */
- if (rc == -EINTR) {
+ if (is_interrupt_error(rc)) {
int tmp_rc;
tmp_rc = smb2_handle_cancelled_close(tcon, persistent_fid,
diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h
index 204a622b89ed..d85edf5d1429 100644
--- a/fs/cifs/smb2pdu.h
+++ b/fs/cifs/smb2pdu.h
@@ -424,7 +424,7 @@ struct smb2_rdma_transform_capabilities_context {
__le16 TransformCount;
__u16 Reserved1;
__u32 Reserved2;
- __le16 RDMATransformIds[1];
+ __le16 RDMATransformIds[];
} __packed;
/* Signing algorithms */
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
index e9abb41aa89b..95ef26b555b9 100644
--- a/fs/cifs/transport.c
+++ b/fs/cifs/transport.c
@@ -338,7 +338,7 @@ __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
if (ssocket == NULL)
return -EAGAIN;
- if (signal_pending(current)) {
+ if (fatal_signal_pending(current)) {
cifs_dbg(FYI, "signal pending before send request\n");
return -ERESTARTSYS;
}
@@ -429,7 +429,7 @@ unmask:
if (signal_pending(current) && (total_len != send_length)) {
cifs_dbg(FYI, "signal is pending after attempt to send\n");
- rc = -EINTR;
+ rc = -ERESTARTSYS;
}
/* uncork it */
diff --git a/fs/ext4/ext4_jbd2.c b/fs/ext4/ext4_jbd2.c
index 1a0a827a7f34..be799040a415 100644
--- a/fs/ext4/ext4_jbd2.c
+++ b/fs/ext4/ext4_jbd2.c
@@ -372,20 +372,3 @@ int __ext4_handle_dirty_metadata(const char *where, unsigned int line,
}
return err;
}
-
-int __ext4_handle_dirty_super(const char *where, unsigned int line,
- handle_t *handle, struct super_block *sb)
-{
- struct buffer_head *bh = EXT4_SB(sb)->s_sbh;
- int err = 0;
-
- ext4_superblock_csum_set(sb);
- if (ext4_handle_valid(handle)) {
- err = jbd2_journal_dirty_metadata(handle, bh);
- if (err)
- ext4_journal_abort_handle(where, line, __func__,
- bh, handle, err);
- } else
- mark_buffer_dirty(bh);
- return err;
-}
diff --git a/fs/ext4/ext4_jbd2.h b/fs/ext4/ext4_jbd2.h
index a124c68b0c75..0d2fa423b7ad 100644
--- a/fs/ext4/ext4_jbd2.h
+++ b/fs/ext4/ext4_jbd2.h
@@ -244,9 +244,6 @@ int __ext4_handle_dirty_metadata(const char *where, unsigned int line,
handle_t *handle, struct inode *inode,
struct buffer_head *bh);
-int __ext4_handle_dirty_super(const char *where, unsigned int line,
- handle_t *handle, struct super_block *sb);
-
#define ext4_journal_get_write_access(handle, bh) \
__ext4_journal_get_write_access(__func__, __LINE__, (handle), (bh))
#define ext4_forget(handle, is_metadata, inode, bh, block_nr) \
@@ -257,8 +254,6 @@ int __ext4_handle_dirty_super(const char *where, unsigned int line,
#define ext4_handle_dirty_metadata(handle, inode, bh) \
__ext4_handle_dirty_metadata(__func__, __LINE__, (handle), (inode), \
(bh))
-#define ext4_handle_dirty_super(handle, sb) \
- __ext4_handle_dirty_super(__func__, __LINE__, (handle), (sb))
handle_t *__ext4_journal_start_sb(struct super_block *sb, unsigned int line,
int type, int blocks, int rsv_blocks,
diff --git a/fs/ext4/fast_commit.c b/fs/ext4/fast_commit.c
index 4fcc21c25e79..0a14a7c87bf8 100644
--- a/fs/ext4/fast_commit.c
+++ b/fs/ext4/fast_commit.c
@@ -604,13 +604,13 @@ void ext4_fc_track_range(handle_t *handle, struct inode *inode, ext4_lblk_t star
trace_ext4_fc_track_range(inode, start, end, ret);
}
-static void ext4_fc_submit_bh(struct super_block *sb)
+static void ext4_fc_submit_bh(struct super_block *sb, bool is_tail)
{
int write_flags = REQ_SYNC;
struct buffer_head *bh = EXT4_SB(sb)->s_fc_bh;
- /* TODO: REQ_FUA | REQ_PREFLUSH is unnecessarily expensive. */
- if (test_opt(sb, BARRIER))
+ /* Add REQ_FUA | REQ_PREFLUSH only its tail */
+ if (test_opt(sb, BARRIER) && is_tail)
write_flags |= REQ_FUA | REQ_PREFLUSH;
lock_buffer(bh);
set_buffer_dirty(bh);
@@ -684,7 +684,7 @@ static u8 *ext4_fc_reserve_space(struct super_block *sb, int len, u32 *crc)
*crc = ext4_chksum(sbi, *crc, tl, sizeof(*tl));
if (pad_len > 0)
ext4_fc_memzero(sb, tl + 1, pad_len, crc);
- ext4_fc_submit_bh(sb);
+ ext4_fc_submit_bh(sb, false);
ret = jbd2_fc_get_buf(EXT4_SB(sb)->s_journal, &bh);
if (ret)
@@ -741,7 +741,7 @@ static int ext4_fc_write_tail(struct super_block *sb, u32 crc)
tail.fc_crc = cpu_to_le32(crc);
ext4_fc_memcpy(sb, dst, &tail.fc_crc, sizeof(tail.fc_crc), NULL);
- ext4_fc_submit_bh(sb);
+ ext4_fc_submit_bh(sb, true);
return 0;
}
@@ -1268,7 +1268,7 @@ static void ext4_fc_cleanup(journal_t *journal, int full)
list_splice_init(&sbi->s_fc_dentry_q[FC_Q_STAGING],
&sbi->s_fc_dentry_q[FC_Q_MAIN]);
list_splice_init(&sbi->s_fc_q[FC_Q_STAGING],
- &sbi->s_fc_q[FC_Q_STAGING]);
+ &sbi->s_fc_q[FC_Q_MAIN]);
ext4_clear_mount_flag(sb, EXT4_MF_FC_COMMITTING);
ext4_clear_mount_flag(sb, EXT4_MF_FC_INELIGIBLE);
@@ -1318,14 +1318,14 @@ static int ext4_fc_replay_unlink(struct super_block *sb, struct ext4_fc_tl *tl)
entry.len = darg.dname_len;
inode = ext4_iget(sb, darg.ino, EXT4_IGET_NORMAL);
- if (IS_ERR_OR_NULL(inode)) {
+ if (IS_ERR(inode)) {
jbd_debug(1, "Inode %d not found", darg.ino);
return 0;
}
old_parent = ext4_iget(sb, darg.parent_ino,
EXT4_IGET_NORMAL);
- if (IS_ERR_OR_NULL(old_parent)) {
+ if (IS_ERR(old_parent)) {
jbd_debug(1, "Dir with inode %d not found", darg.parent_ino);
iput(inode);
return 0;
@@ -1410,7 +1410,7 @@ static int ext4_fc_replay_link(struct super_block *sb, struct ext4_fc_tl *tl)
darg.parent_ino, darg.dname_len);
inode = ext4_iget(sb, darg.ino, EXT4_IGET_NORMAL);
- if (IS_ERR_OR_NULL(inode)) {
+ if (IS_ERR(inode)) {
jbd_debug(1, "Inode not found.");
return 0;
}
@@ -1466,10 +1466,11 @@ static int ext4_fc_replay_inode(struct super_block *sb, struct ext4_fc_tl *tl)
trace_ext4_fc_replay(sb, tag, ino, 0, 0);
inode = ext4_iget(sb, ino, EXT4_IGET_NORMAL);
- if (!IS_ERR_OR_NULL(inode)) {
+ if (!IS_ERR(inode)) {
ext4_ext_clear_bb(inode);
iput(inode);
}
+ inode = NULL;
ext4_fc_record_modified_inode(sb, ino);
@@ -1512,7 +1513,7 @@ static int ext4_fc_replay_inode(struct super_block *sb, struct ext4_fc_tl *tl)
/* Given that we just wrote the inode on disk, this SHOULD succeed. */
inode = ext4_iget(sb, ino, EXT4_IGET_NORMAL);
- if (IS_ERR_OR_NULL(inode)) {
+ if (IS_ERR(inode)) {
jbd_debug(1, "Inode not found.");
return -EFSCORRUPTED;
}
@@ -1564,7 +1565,7 @@ static int ext4_fc_replay_create(struct super_block *sb, struct ext4_fc_tl *tl)
goto out;
inode = ext4_iget(sb, darg.ino, EXT4_IGET_NORMAL);
- if (IS_ERR_OR_NULL(inode)) {
+ if (IS_ERR(inode)) {
jbd_debug(1, "inode %d not found.", darg.ino);
inode = NULL;
ret = -EINVAL;
@@ -1577,7 +1578,7 @@ static int ext4_fc_replay_create(struct super_block *sb, struct ext4_fc_tl *tl)
* dot and dot dot dirents are setup properly.
*/
dir = ext4_iget(sb, darg.parent_ino, EXT4_IGET_NORMAL);
- if (IS_ERR_OR_NULL(dir)) {
+ if (IS_ERR(dir)) {
jbd_debug(1, "Dir %d not found.", darg.ino);
goto out;
}
@@ -1653,7 +1654,7 @@ static int ext4_fc_replay_add_range(struct super_block *sb,
inode = ext4_iget(sb, le32_to_cpu(fc_add_ex->fc_ino),
EXT4_IGET_NORMAL);
- if (IS_ERR_OR_NULL(inode)) {
+ if (IS_ERR(inode)) {
jbd_debug(1, "Inode not found.");
return 0;
}
@@ -1777,7 +1778,7 @@ ext4_fc_replay_del_range(struct super_block *sb, struct ext4_fc_tl *tl)
le32_to_cpu(lrange->fc_ino), cur, remaining);
inode = ext4_iget(sb, le32_to_cpu(lrange->fc_ino), EXT4_IGET_NORMAL);
- if (IS_ERR_OR_NULL(inode)) {
+ if (IS_ERR(inode)) {
jbd_debug(1, "Inode %d not found", le32_to_cpu(lrange->fc_ino));
return 0;
}
@@ -1832,7 +1833,7 @@ static void ext4_fc_set_bitmaps_and_counters(struct super_block *sb)
for (i = 0; i < state->fc_modified_inodes_used; i++) {
inode = ext4_iget(sb, state->fc_modified_inodes[i],
EXT4_IGET_NORMAL);
- if (IS_ERR_OR_NULL(inode)) {
+ if (IS_ERR(inode)) {
jbd_debug(1, "Inode %d not found.",
state->fc_modified_inodes[i]);
continue;
@@ -1849,7 +1850,7 @@ static void ext4_fc_set_bitmaps_and_counters(struct super_block *sb)
if (ret > 0) {
path = ext4_find_extent(inode, map.m_lblk, NULL, 0);
- if (!IS_ERR_OR_NULL(path)) {
+ if (!IS_ERR(path)) {
for (j = 0; j < path->p_depth; j++)
ext4_mb_mark_bb(inode->i_sb,
path[j].p_block, 1, 1);
diff --git a/fs/ext4/file.c b/fs/ext4/file.c
index 3ed8c048fb12..349b27f0dda0 100644
--- a/fs/ext4/file.c
+++ b/fs/ext4/file.c
@@ -809,9 +809,12 @@ static int ext4_sample_last_mounted(struct super_block *sb,
err = ext4_journal_get_write_access(handle, sbi->s_sbh);
if (err)
goto out_journal;
- strlcpy(sbi->s_es->s_last_mounted, cp,
+ lock_buffer(sbi->s_sbh);
+ strncpy(sbi->s_es->s_last_mounted, cp,
sizeof(sbi->s_es->s_last_mounted));
- ext4_handle_dirty_super(handle, sb);
+ ext4_superblock_csum_set(sb);
+ unlock_buffer(sbi->s_sbh);
+ ext4_handle_dirty_metadata(handle, NULL, sbi->s_sbh);
out_journal:
ext4_journal_stop(handle);
out:
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 27946882d4ce..c173c8405856 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -5150,9 +5150,13 @@ static int ext4_do_update_inode(handle_t *handle,
err = ext4_journal_get_write_access(handle, EXT4_SB(sb)->s_sbh);
if (err)
goto out_brelse;
+ lock_buffer(EXT4_SB(sb)->s_sbh);
ext4_set_feature_large_file(sb);
+ ext4_superblock_csum_set(sb);
+ unlock_buffer(EXT4_SB(sb)->s_sbh);
ext4_handle_sync(handle);
- err = ext4_handle_dirty_super(handle, sb);
+ err = ext4_handle_dirty_metadata(handle, NULL,
+ EXT4_SB(sb)->s_sbh);
}
ext4_update_inode_fsync_trans(handle, inode, need_datasync);
out_brelse:
diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
index 524e13432447..d9665d2f82db 100644
--- a/fs/ext4/ioctl.c
+++ b/fs/ext4/ioctl.c
@@ -1157,7 +1157,10 @@ resizefs_out:
err = ext4_journal_get_write_access(handle, sbi->s_sbh);
if (err)
goto pwsalt_err_journal;
+ lock_buffer(sbi->s_sbh);
generate_random_uuid(sbi->s_es->s_encrypt_pw_salt);
+ ext4_superblock_csum_set(sb);
+ unlock_buffer(sbi->s_sbh);
err = ext4_handle_dirty_metadata(handle, NULL,
sbi->s_sbh);
pwsalt_err_journal:
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index b17a082b7db1..cf652ba3e74d 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -2976,14 +2976,17 @@ int ext4_orphan_add(handle_t *handle, struct inode *inode)
(le32_to_cpu(sbi->s_es->s_inodes_count))) {
/* Insert this inode at the head of the on-disk orphan list */
NEXT_ORPHAN(inode) = le32_to_cpu(sbi->s_es->s_last_orphan);
+ lock_buffer(sbi->s_sbh);
sbi->s_es->s_last_orphan = cpu_to_le32(inode->i_ino);
+ ext4_superblock_csum_set(sb);
+ unlock_buffer(sbi->s_sbh);
dirty = true;
}
list_add(&EXT4_I(inode)->i_orphan, &sbi->s_orphan);
mutex_unlock(&sbi->s_orphan_lock);
if (dirty) {
- err = ext4_handle_dirty_super(handle, sb);
+ err = ext4_handle_dirty_metadata(handle, NULL, sbi->s_sbh);
rc = ext4_mark_iloc_dirty(handle, inode, &iloc);
if (!err)
err = rc;
@@ -3059,9 +3062,12 @@ int ext4_orphan_del(handle_t *handle, struct inode *inode)
mutex_unlock(&sbi->s_orphan_lock);
goto out_brelse;
}
+ lock_buffer(sbi->s_sbh);
sbi->s_es->s_last_orphan = cpu_to_le32(ino_next);
+ ext4_superblock_csum_set(inode->i_sb);
+ unlock_buffer(sbi->s_sbh);
mutex_unlock(&sbi->s_orphan_lock);
- err = ext4_handle_dirty_super(handle, inode->i_sb);
+ err = ext4_handle_dirty_metadata(handle, NULL, sbi->s_sbh);
} else {
struct ext4_iloc iloc2;
struct inode *i_prev =
@@ -3593,9 +3599,6 @@ static int ext4_setent(handle_t *handle, struct ext4_renament *ent,
return retval2;
}
}
- brelse(ent->bh);
- ent->bh = NULL;
-
return retval;
}
@@ -3794,6 +3797,7 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
}
}
+ old_file_type = old.de->file_type;
if (IS_DIRSYNC(old.dir) || IS_DIRSYNC(new.dir))
ext4_handle_sync(handle);
@@ -3821,7 +3825,6 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
force_reread = (new.dir->i_ino == old.dir->i_ino &&
ext4_test_inode_flag(new.dir, EXT4_INODE_INLINE_DATA));
- old_file_type = old.de->file_type;
if (whiteout) {
/*
* Do this before adding a new entry, so the old entry is sure
@@ -3919,15 +3922,19 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
retval = 0;
end_rename:
- brelse(old.dir_bh);
- brelse(old.bh);
- brelse(new.bh);
if (whiteout) {
- if (retval)
+ if (retval) {
+ ext4_setent(handle, &old,
+ old.inode->i_ino, old_file_type);
drop_nlink(whiteout);
+ }
unlock_new_inode(whiteout);
iput(whiteout);
+
}
+ brelse(old.dir_bh);
+ brelse(old.bh);
+ brelse(new.bh);
if (handle)
ext4_journal_stop(handle);
return retval;
diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
index 928700d57eb6..bd0d185654f3 100644
--- a/fs/ext4/resize.c
+++ b/fs/ext4/resize.c
@@ -899,8 +899,11 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
EXT4_SB(sb)->s_gdb_count++;
ext4_kvfree_array_rcu(o_group_desc);
+ lock_buffer(EXT4_SB(sb)->s_sbh);
le16_add_cpu(&es->s_reserved_gdt_blocks, -1);
- err = ext4_handle_dirty_super(handle, sb);
+ ext4_superblock_csum_set(sb);
+ unlock_buffer(EXT4_SB(sb)->s_sbh);
+ err = ext4_handle_dirty_metadata(handle, NULL, EXT4_SB(sb)->s_sbh);
if (err)
ext4_std_error(sb, err);
return err;
@@ -1384,6 +1387,7 @@ static void ext4_update_super(struct super_block *sb,
reserved_blocks *= blocks_count;
do_div(reserved_blocks, 100);
+ lock_buffer(sbi->s_sbh);
ext4_blocks_count_set(es, ext4_blocks_count(es) + blocks_count);
ext4_free_blocks_count_set(es, ext4_free_blocks_count(es) + free_blocks);
le32_add_cpu(&es->s_inodes_count, EXT4_INODES_PER_GROUP(sb) *
@@ -1421,6 +1425,8 @@ static void ext4_update_super(struct super_block *sb,
* active. */
ext4_r_blocks_count_set(es, ext4_r_blocks_count(es) +
reserved_blocks);
+ ext4_superblock_csum_set(sb);
+ unlock_buffer(sbi->s_sbh);
/* Update the free space counts */
percpu_counter_add(&sbi->s_freeclusters_counter,
@@ -1515,7 +1521,7 @@ static int ext4_flex_group_add(struct super_block *sb,
ext4_update_super(sb, flex_gd);
- err = ext4_handle_dirty_super(handle, sb);
+ err = ext4_handle_dirty_metadata(handle, NULL, sbi->s_sbh);
exit_journal:
err2 = ext4_journal_stop(handle);
@@ -1717,15 +1723,18 @@ static int ext4_group_extend_no_check(struct super_block *sb,
goto errout;
}
+ lock_buffer(EXT4_SB(sb)->s_sbh);
ext4_blocks_count_set(es, o_blocks_count + add);
ext4_free_blocks_count_set(es, ext4_free_blocks_count(es) + add);
+ ext4_superblock_csum_set(sb);
+ unlock_buffer(EXT4_SB(sb)->s_sbh);
ext4_debug("freeing blocks %llu through %llu\n", o_blocks_count,
o_blocks_count + add);
/* We add the blocks to the bitmap and set the group need init bit */
err = ext4_group_add_blocks(handle, sb, o_blocks_count, add);
if (err)
goto errout;
- ext4_handle_dirty_super(handle, sb);
+ ext4_handle_dirty_metadata(handle, NULL, EXT4_SB(sb)->s_sbh);
ext4_debug("freed blocks %llu through %llu\n", o_blocks_count,
o_blocks_count + add);
errout:
@@ -1874,12 +1883,15 @@ static int ext4_convert_meta_bg(struct super_block *sb, struct inode *inode)
if (err)
goto errout;
+ lock_buffer(sbi->s_sbh);
ext4_clear_feature_resize_inode(sb);
ext4_set_feature_meta_bg(sb);
sbi->s_es->s_first_meta_bg =
cpu_to_le32(num_desc_blocks(sb, sbi->s_groups_count));
+ ext4_superblock_csum_set(sb);
+ unlock_buffer(sbi->s_sbh);
- err = ext4_handle_dirty_super(handle, sb);
+ err = ext4_handle_dirty_metadata(handle, NULL, sbi->s_sbh);
if (err) {
ext4_std_error(sb, err);
goto errout;
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 21121787c874..9a6f9875aa34 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -65,7 +65,8 @@ static struct ratelimit_state ext4_mount_msg_ratelimit;
static int ext4_load_journal(struct super_block *, struct ext4_super_block *,
unsigned long journal_devnum);
static int ext4_show_options(struct seq_file *seq, struct dentry *root);
-static int ext4_commit_super(struct super_block *sb, int sync);
+static void ext4_update_super(struct super_block *sb);
+static int ext4_commit_super(struct super_block *sb);
static int ext4_mark_recovery_complete(struct super_block *sb,
struct ext4_super_block *es);
static int ext4_clear_journal_err(struct super_block *sb,
@@ -586,15 +587,12 @@ static int ext4_errno_to_code(int errno)
return EXT4_ERR_UNKNOWN;
}
-static void __save_error_info(struct super_block *sb, int error,
- __u32 ino, __u64 block,
- const char *func, unsigned int line)
+static void save_error_info(struct super_block *sb, int error,
+ __u32 ino, __u64 block,
+ const char *func, unsigned int line)
{
struct ext4_sb_info *sbi = EXT4_SB(sb);
- EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS;
- if (bdev_read_only(sb->s_bdev))
- return;
/* We default to EFSCORRUPTED error... */
if (error == 0)
error = EFSCORRUPTED;
@@ -618,15 +616,6 @@ static void __save_error_info(struct super_block *sb, int error,
spin_unlock(&sbi->s_error_lock);
}
-static void save_error_info(struct super_block *sb, int error,
- __u32 ino, __u64 block,
- const char *func, unsigned int line)
-{
- __save_error_info(sb, error, ino, block, func, line);
- if (!bdev_read_only(sb->s_bdev))
- ext4_commit_super(sb, 1);
-}
-
/* Deal with the reporting of failure conditions on a filesystem such as
* inconsistencies detected or read IO failures.
*
@@ -647,19 +636,40 @@ static void save_error_info(struct super_block *sb, int error,
* used to deal with unrecoverable failures such as journal IO errors or ENOMEM
* at a critical moment in log management.
*/
-static void ext4_handle_error(struct super_block *sb, bool force_ro)
+static void ext4_handle_error(struct super_block *sb, bool force_ro, int error,
+ __u32 ino, __u64 block,
+ const char *func, unsigned int line)
{
journal_t *journal = EXT4_SB(sb)->s_journal;
+ bool continue_fs = !force_ro && test_opt(sb, ERRORS_CONT);
+ EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS;
if (test_opt(sb, WARN_ON_ERROR))
WARN_ON_ONCE(1);
- if (sb_rdonly(sb) || (!force_ro && test_opt(sb, ERRORS_CONT)))
+ if (!continue_fs && !sb_rdonly(sb)) {
+ ext4_set_mount_flag(sb, EXT4_MF_FS_ABORTED);
+ if (journal)
+ jbd2_journal_abort(journal, -EIO);
+ }
+
+ if (!bdev_read_only(sb->s_bdev)) {
+ save_error_info(sb, error, ino, block, func, line);
+ /*
+ * In case the fs should keep running, we need to writeout
+ * superblock through the journal. Due to lock ordering
+ * constraints, it may not be safe to do it right here so we
+ * defer superblock flushing to a workqueue.
+ */
+ if (continue_fs)
+ schedule_work(&EXT4_SB(sb)->s_error_work);
+ else
+ ext4_commit_super(sb);
+ }
+
+ if (sb_rdonly(sb) || continue_fs)
return;
- ext4_set_mount_flag(sb, EXT4_MF_FS_ABORTED);
- if (journal)
- jbd2_journal_abort(journal, -EIO);
/*
* We force ERRORS_RO behavior when system is rebooting. Otherwise we
* could panic during 'reboot -f' as the underlying device got already
@@ -682,8 +692,39 @@ static void flush_stashed_error_work(struct work_struct *work)
{
struct ext4_sb_info *sbi = container_of(work, struct ext4_sb_info,
s_error_work);
+ journal_t *journal = sbi->s_journal;
+ handle_t *handle;
- ext4_commit_super(sbi->s_sb, 1);
+ /*
+ * If the journal is still running, we have to write out superblock
+ * through the journal to avoid collisions of other journalled sb
+ * updates.
+ *
+ * We use directly jbd2 functions here to avoid recursing back into
+ * ext4 error handling code during handling of previous errors.
+ */
+ if (!sb_rdonly(sbi->s_sb) && journal) {
+ handle = jbd2_journal_start(journal, 1);
+ if (IS_ERR(handle))
+ goto write_directly;
+ if (jbd2_journal_get_write_access(handle, sbi->s_sbh)) {
+ jbd2_journal_stop(handle);
+ goto write_directly;
+ }
+ ext4_update_super(sbi->s_sb);
+ if (jbd2_journal_dirty_metadata(handle, sbi->s_sbh)) {
+ jbd2_journal_stop(handle);
+ goto write_directly;
+ }
+ jbd2_journal_stop(handle);
+ return;
+ }
+write_directly:
+ /*
+ * Write through journal failed. Write sb directly to get error info
+ * out and hope for the best.
+ */
+ ext4_commit_super(sbi->s_sb);
}
#define ext4_error_ratelimit(sb) \
@@ -710,8 +751,7 @@ void __ext4_error(struct super_block *sb, const char *function,
sb->s_id, function, line, current->comm, &vaf);
va_end(args);
}
- save_error_info(sb, error, 0, block, function, line);
- ext4_handle_error(sb, force_ro);
+ ext4_handle_error(sb, force_ro, error, 0, block, function, line);
}
void __ext4_error_inode(struct inode *inode, const char *function,
@@ -741,9 +781,8 @@ void __ext4_error_inode(struct inode *inode, const char *function,
current->comm, &vaf);
va_end(args);
}
- save_error_info(inode->i_sb, error, inode->i_ino, block,
- function, line);
- ext4_handle_error(inode->i_sb, false);
+ ext4_handle_error(inode->i_sb, false, error, inode->i_ino, block,
+ function, line);
}
void __ext4_error_file(struct file *file, const char *function,
@@ -780,9 +819,8 @@ void __ext4_error_file(struct file *file, const char *function,
current->comm, path, &vaf);
va_end(args);
}
- save_error_info(inode->i_sb, EFSCORRUPTED, inode->i_ino, block,
- function, line);
- ext4_handle_error(inode->i_sb, false);
+ ext4_handle_error(inode->i_sb, false, EFSCORRUPTED, inode->i_ino, block,
+ function, line);
}
const char *ext4_decode_error(struct super_block *sb, int errno,
@@ -849,8 +887,7 @@ void __ext4_std_error(struct super_block *sb, const char *function,
sb->s_id, function, line, errstr);
}
- save_error_info(sb, -errno, 0, 0, function, line);
- ext4_handle_error(sb, false);
+ ext4_handle_error(sb, false, -errno, 0, 0, function, line);
}
void __ext4_msg(struct super_block *sb,
@@ -944,13 +981,16 @@ __acquires(bitlock)
if (test_opt(sb, ERRORS_CONT)) {
if (test_opt(sb, WARN_ON_ERROR))
WARN_ON_ONCE(1);
- __save_error_info(sb, EFSCORRUPTED, ino, block, function, line);
- schedule_work(&EXT4_SB(sb)->s_error_work);
+ EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS;
+ if (!bdev_read_only(sb->s_bdev)) {
+ save_error_info(sb, EFSCORRUPTED, ino, block, function,
+ line);
+ schedule_work(&EXT4_SB(sb)->s_error_work);
+ }
return;
}
ext4_unlock_group(sb, grp);
- save_error_info(sb, EFSCORRUPTED, ino, block, function, line);
- ext4_handle_error(sb, false);
+ ext4_handle_error(sb, false, EFSCORRUPTED, ino, block, function, line);
/*
* We only get here in the ERRORS_RO case; relocking the group
* may be dangerous, but nothing bad will happen since the
@@ -1152,7 +1192,7 @@ static void ext4_put_super(struct super_block *sb)
es->s_state = cpu_to_le16(sbi->s_mount_state);
}
if (!sb_rdonly(sb))
- ext4_commit_super(sb, 1);
+ ext4_commit_super(sb);
rcu_read_lock();
group_desc = rcu_dereference(sbi->s_group_desc);
@@ -2642,7 +2682,7 @@ static int ext4_setup_super(struct super_block *sb, struct ext4_super_block *es,
if (sbi->s_journal)
ext4_set_feature_journal_needs_recovery(sb);
- err = ext4_commit_super(sb, 1);
+ err = ext4_commit_super(sb);
done:
if (test_opt(sb, DEBUG))
printk(KERN_INFO "[EXT4 FS bs=%lu, gc=%u, "
@@ -4868,7 +4908,7 @@ no_journal:
if (DUMMY_ENCRYPTION_ENABLED(sbi) && !sb_rdonly(sb) &&
!ext4_has_feature_encrypt(sb)) {
ext4_set_feature_encrypt(sb);
- ext4_commit_super(sb, 1);
+ ext4_commit_super(sb);
}
/*
@@ -5418,7 +5458,7 @@ static int ext4_load_journal(struct super_block *sb,
es->s_journal_dev = cpu_to_le32(journal_devnum);
/* Make sure we flush the recovery flag to disk. */
- ext4_commit_super(sb, 1);
+ ext4_commit_super(sb);
}
return 0;
@@ -5428,16 +5468,14 @@ err_out:
return err;
}
-static int ext4_commit_super(struct super_block *sb, int sync)
+/* Copy state of EXT4_SB(sb) into buffer for on-disk superblock */
+static void ext4_update_super(struct super_block *sb)
{
struct ext4_sb_info *sbi = EXT4_SB(sb);
- struct ext4_super_block *es = EXT4_SB(sb)->s_es;
- struct buffer_head *sbh = EXT4_SB(sb)->s_sbh;
- int error = 0;
-
- if (!sbh || block_device_ejected(sb))
- return error;
+ struct ext4_super_block *es = sbi->s_es;
+ struct buffer_head *sbh = sbi->s_sbh;
+ lock_buffer(sbh);
/*
* If the file system is mounted read-only, don't update the
* superblock write time. This avoids updating the superblock
@@ -5451,17 +5489,17 @@ static int ext4_commit_super(struct super_block *sb, int sync)
if (!(sb->s_flags & SB_RDONLY))
ext4_update_tstamp(es, s_wtime);
es->s_kbytes_written =
- cpu_to_le64(EXT4_SB(sb)->s_kbytes_written +
+ cpu_to_le64(sbi->s_kbytes_written +
((part_stat_read(sb->s_bdev, sectors[STAT_WRITE]) -
- EXT4_SB(sb)->s_sectors_written_start) >> 1));
- if (percpu_counter_initialized(&EXT4_SB(sb)->s_freeclusters_counter))
+ sbi->s_sectors_written_start) >> 1));
+ if (percpu_counter_initialized(&sbi->s_freeclusters_counter))
ext4_free_blocks_count_set(es,
- EXT4_C2B(EXT4_SB(sb), percpu_counter_sum_positive(
- &EXT4_SB(sb)->s_freeclusters_counter)));
- if (percpu_counter_initialized(&EXT4_SB(sb)->s_freeinodes_counter))
+ EXT4_C2B(sbi, percpu_counter_sum_positive(
+ &sbi->s_freeclusters_counter)));
+ if (percpu_counter_initialized(&sbi->s_freeinodes_counter))
es->s_free_inodes_count =
cpu_to_le32(percpu_counter_sum_positive(
- &EXT4_SB(sb)->s_freeinodes_counter));
+ &sbi->s_freeinodes_counter));
/* Copy error information to the on-disk superblock */
spin_lock(&sbi->s_error_lock);
if (sbi->s_add_error_count > 0) {
@@ -5502,10 +5540,20 @@ static int ext4_commit_super(struct super_block *sb, int sync)
}
spin_unlock(&sbi->s_error_lock);
- BUFFER_TRACE(sbh, "marking dirty");
ext4_superblock_csum_set(sb);
- if (sync)
- lock_buffer(sbh);
+ unlock_buffer(sbh);
+}
+
+static int ext4_commit_super(struct super_block *sb)
+{
+ struct buffer_head *sbh = EXT4_SB(sb)->s_sbh;
+ int error = 0;
+
+ if (!sbh || block_device_ejected(sb))
+ return error;
+
+ ext4_update_super(sb);
+
if (buffer_write_io_error(sbh) || !buffer_uptodate(sbh)) {
/*
* Oh, dear. A previous attempt to write the
@@ -5520,17 +5568,15 @@ static int ext4_commit_super(struct super_block *sb, int sync)
clear_buffer_write_io_error(sbh);
set_buffer_uptodate(sbh);
}
+ BUFFER_TRACE(sbh, "marking dirty");
mark_buffer_dirty(sbh);
- if (sync) {
- unlock_buffer(sbh);
- error = __sync_dirty_buffer(sbh,
- REQ_SYNC | (test_opt(sb, BARRIER) ? REQ_FUA : 0));
- if (buffer_write_io_error(sbh)) {
- ext4_msg(sb, KERN_ERR, "I/O error while writing "
- "superblock");
- clear_buffer_write_io_error(sbh);
- set_buffer_uptodate(sbh);
- }
+ error = __sync_dirty_buffer(sbh,
+ REQ_SYNC | (test_opt(sb, BARRIER) ? REQ_FUA : 0));
+ if (buffer_write_io_error(sbh)) {
+ ext4_msg(sb, KERN_ERR, "I/O error while writing "
+ "superblock");
+ clear_buffer_write_io_error(sbh);
+ set_buffer_uptodate(sbh);
}
return error;
}
@@ -5561,7 +5607,7 @@ static int ext4_mark_recovery_complete(struct super_block *sb,
if (ext4_has_feature_journal_needs_recovery(sb) && sb_rdonly(sb)) {
ext4_clear_feature_journal_needs_recovery(sb);
- ext4_commit_super(sb, 1);
+ ext4_commit_super(sb);
}
out:
jbd2_journal_unlock_updates(journal);
@@ -5603,7 +5649,7 @@ static int ext4_clear_journal_err(struct super_block *sb,
EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS;
es->s_state |= cpu_to_le16(EXT4_ERROR_FS);
- ext4_commit_super(sb, 1);
+ ext4_commit_super(sb);
jbd2_journal_clear_err(journal);
jbd2_journal_update_sb_errno(journal);
@@ -5705,7 +5751,7 @@ static int ext4_freeze(struct super_block *sb)
ext4_clear_feature_journal_needs_recovery(sb);
}
- error = ext4_commit_super(sb, 1);
+ error = ext4_commit_super(sb);
out:
if (journal)
/* we rely on upper layer to stop further updates */
@@ -5727,7 +5773,7 @@ static int ext4_unfreeze(struct super_block *sb)
ext4_set_feature_journal_needs_recovery(sb);
}
- ext4_commit_super(sb, 1);
+ ext4_commit_super(sb);
return 0;
}
@@ -5987,7 +6033,7 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
}
if (sbi->s_journal == NULL && !(old_sb_flags & SB_RDONLY)) {
- err = ext4_commit_super(sb, 1);
+ err = ext4_commit_super(sb);
if (err)
goto restore_opts;
}
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
index 4e3b1f8c2e81..372208500f4e 100644
--- a/fs/ext4/xattr.c
+++ b/fs/ext4/xattr.c
@@ -792,8 +792,11 @@ static void ext4_xattr_update_super_block(handle_t *handle,
BUFFER_TRACE(EXT4_SB(sb)->s_sbh, "get_write_access");
if (ext4_journal_get_write_access(handle, EXT4_SB(sb)->s_sbh) == 0) {
+ lock_buffer(EXT4_SB(sb)->s_sbh);
ext4_set_feature_xattr(sb);
- ext4_handle_dirty_super(handle, sb);
+ ext4_superblock_csum_set(sb);
+ unlock_buffer(EXT4_SB(sb)->s_sbh);
+ ext4_handle_dirty_metadata(handle, NULL, EXT4_SB(sb)->s_sbh);
}
}
diff --git a/fs/file.c b/fs/file.c
index c0b60961c672..dab120b71e44 100644
--- a/fs/file.c
+++ b/fs/file.c
@@ -21,7 +21,6 @@
#include <linux/rcupdate.h>
#include <linux/close_range.h>
#include <net/sock.h>
-#include <linux/io_uring.h>
unsigned int sysctl_nr_open __read_mostly = 1024*1024;
unsigned int sysctl_nr_open_min = BITS_PER_LONG;
@@ -428,7 +427,6 @@ void exit_files(struct task_struct *tsk)
struct files_struct * files = tsk->files;
if (files) {
- io_uring_files_cancel(files);
task_lock(tsk);
tsk->files = NULL;
task_unlock(tsk);
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index acfb55834af2..c41cb887eb7d 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -1474,21 +1474,25 @@ __writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
}
/*
- * Some filesystems may redirty the inode during the writeback
- * due to delalloc, clear dirty metadata flags right before
- * write_inode()
+ * If the inode has dirty timestamps and we need to write them, call
+ * mark_inode_dirty_sync() to notify the filesystem about it and to
+ * change I_DIRTY_TIME into I_DIRTY_SYNC.
*/
- spin_lock(&inode->i_lock);
-
- dirty = inode->i_state & I_DIRTY;
if ((inode->i_state & I_DIRTY_TIME) &&
- ((dirty & I_DIRTY_INODE) ||
- wbc->sync_mode == WB_SYNC_ALL || wbc->for_sync ||
+ (wbc->sync_mode == WB_SYNC_ALL || wbc->for_sync ||
time_after(jiffies, inode->dirtied_time_when +
dirtytime_expire_interval * HZ))) {
- dirty |= I_DIRTY_TIME;
trace_writeback_lazytime(inode);
+ mark_inode_dirty_sync(inode);
}
+
+ /*
+ * Some filesystems may redirty the inode during the writeback
+ * due to delalloc, clear dirty metadata flags right before
+ * write_inode()
+ */
+ spin_lock(&inode->i_lock);
+ dirty = inode->i_state & I_DIRTY;
inode->i_state &= ~dirty;
/*
@@ -1509,8 +1513,6 @@ __writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
spin_unlock(&inode->i_lock);
- if (dirty & I_DIRTY_TIME)
- mark_inode_dirty_sync(inode);
/* Don't write the inode if only I_DIRTY_PAGES was set */
if (dirty & ~I_DIRTY_PAGES) {
int err = write_inode(inode, wbc);
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 7e35283fc0b1..c07913ec0cca 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -262,6 +262,7 @@ struct io_ring_ctx {
unsigned int drain_next: 1;
unsigned int eventfd_async: 1;
unsigned int restricted: 1;
+ unsigned int sqo_dead: 1;
/*
* Ring buffer of indices into array of io_uring_sqe, which is
@@ -353,6 +354,7 @@ struct io_ring_ctx {
unsigned cq_entries;
unsigned cq_mask;
atomic_t cq_timeouts;
+ unsigned cq_last_tm_flush;
unsigned long cq_check_overflow;
struct wait_queue_head cq_wait;
struct fasync_struct *cq_fasync;
@@ -992,6 +994,13 @@ enum io_mem_account {
ACCT_PINNED,
};
+static void __io_uring_cancel_task_requests(struct io_ring_ctx *ctx,
+ struct task_struct *task);
+
+static void destroy_fixed_file_ref_node(struct fixed_file_ref_node *ref_node);
+static struct fixed_file_ref_node *alloc_fixed_file_ref_node(
+ struct io_ring_ctx *ctx);
+
static void __io_complete_rw(struct io_kiocb *req, long res, long res2,
struct io_comp_state *cs);
static void io_cqring_fill_event(struct io_kiocb *req, long res);
@@ -1016,6 +1025,7 @@ static ssize_t io_import_iovec(int rw, struct io_kiocb *req,
static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec,
const struct iovec *fast_iov,
struct iov_iter *iter, bool force);
+static void io_req_drop_files(struct io_kiocb *req);
static struct kmem_cache *req_cachep;
@@ -1039,8 +1049,7 @@ EXPORT_SYMBOL(io_uring_get_socket);
static inline void io_clean_op(struct io_kiocb *req)
{
- if (req->flags & (REQ_F_NEED_CLEANUP | REQ_F_BUFFER_SELECTED |
- REQ_F_INFLIGHT))
+ if (req->flags & (REQ_F_NEED_CLEANUP | REQ_F_BUFFER_SELECTED))
__io_clean_op(req);
}
@@ -1066,8 +1075,11 @@ static bool io_match_task(struct io_kiocb *head,
return true;
io_for_each_link(req, head) {
- if ((req->flags & REQ_F_WORK_INITIALIZED) &&
- (req->work.flags & IO_WQ_WORK_FILES) &&
+ if (!(req->flags & REQ_F_WORK_INITIALIZED))
+ continue;
+ if (req->file && req->file->f_op == &io_uring_fops)
+ return true;
+ if ((req->work.flags & IO_WQ_WORK_FILES) &&
req->work.identity->files == files)
return true;
}
@@ -1098,6 +1110,9 @@ static void io_sq_thread_drop_mm_files(void)
static int __io_sq_thread_acquire_files(struct io_ring_ctx *ctx)
{
+ if (current->flags & PF_EXITING)
+ return -EFAULT;
+
if (!current->files) {
struct files_struct *files;
struct nsproxy *nsproxy;
@@ -1125,6 +1140,8 @@ static int __io_sq_thread_acquire_mm(struct io_ring_ctx *ctx)
{
struct mm_struct *mm;
+ if (current->flags & PF_EXITING)
+ return -EFAULT;
if (current->mm)
return 0;
@@ -1338,11 +1355,6 @@ static void __io_commit_cqring(struct io_ring_ctx *ctx)
/* order cqe stores with ring update */
smp_store_release(&rings->cq.tail, ctx->cached_cq_tail);
-
- if (wq_has_sleeper(&ctx->cq_wait)) {
- wake_up_interruptible(&ctx->cq_wait);
- kill_fasync(&ctx->cq_fasync, SIGIO, POLL_IN);
- }
}
static void io_put_identity(struct io_uring_task *tctx, struct io_kiocb *req)
@@ -1385,6 +1397,8 @@ static void io_req_clean_work(struct io_kiocb *req)
free_fs_struct(fs);
req->work.flags &= ~IO_WQ_WORK_FS;
}
+ if (req->flags & REQ_F_INFLIGHT)
+ io_req_drop_files(req);
io_put_identity(req->task->io_uring, req);
}
@@ -1494,13 +1508,23 @@ static bool io_grab_identity(struct io_kiocb *req)
return false;
atomic_inc(&id->files->count);
get_nsproxy(id->nsproxy);
- req->flags |= REQ_F_INFLIGHT;
- spin_lock_irq(&ctx->inflight_lock);
- list_add(&req->inflight_entry, &ctx->inflight_list);
- spin_unlock_irq(&ctx->inflight_lock);
+ if (!(req->flags & REQ_F_INFLIGHT)) {
+ req->flags |= REQ_F_INFLIGHT;
+
+ spin_lock_irq(&ctx->inflight_lock);
+ list_add(&req->inflight_entry, &ctx->inflight_list);
+ spin_unlock_irq(&ctx->inflight_lock);
+ }
req->work.flags |= IO_WQ_WORK_FILES;
}
+ if (!(req->work.flags & IO_WQ_WORK_MM) &&
+ (def->work_flags & IO_WQ_WORK_MM)) {
+ if (id->mm != current->mm)
+ return false;
+ mmgrab(id->mm);
+ req->work.flags |= IO_WQ_WORK_MM;
+ }
return true;
}
@@ -1509,10 +1533,8 @@ static void io_prep_async_work(struct io_kiocb *req)
{
const struct io_op_def *def = &io_op_defs[req->opcode];
struct io_ring_ctx *ctx = req->ctx;
- struct io_identity *id;
io_req_init_async(req);
- id = req->work.identity;
if (req->flags & REQ_F_FORCE_ASYNC)
req->work.flags |= IO_WQ_WORK_CONCURRENT;
@@ -1525,13 +1547,6 @@ static void io_prep_async_work(struct io_kiocb *req)
req->work.flags |= IO_WQ_WORK_UNBOUND;
}
- /* ->mm can never change on us */
- if (!(req->work.flags & IO_WQ_WORK_MM) &&
- (def->work_flags & IO_WQ_WORK_MM)) {
- mmgrab(id->mm);
- req->work.flags |= IO_WQ_WORK_MM;
- }
-
/* if we fail grabbing identity, we must COW, regrab, and retry */
if (io_grab_identity(req))
return;
@@ -1633,19 +1648,38 @@ static void __io_queue_deferred(struct io_ring_ctx *ctx)
static void io_flush_timeouts(struct io_ring_ctx *ctx)
{
- while (!list_empty(&ctx->timeout_list)) {
+ u32 seq;
+
+ if (list_empty(&ctx->timeout_list))
+ return;
+
+ seq = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts);
+
+ do {
+ u32 events_needed, events_got;
struct io_kiocb *req = list_first_entry(&ctx->timeout_list,
struct io_kiocb, timeout.list);
if (io_is_timeout_noseq(req))
break;
- if (req->timeout.target_seq != ctx->cached_cq_tail
- - atomic_read(&ctx->cq_timeouts))
+
+ /*
+ * Since seq can easily wrap around over time, subtract
+ * the last seq at which timeouts were flushed before comparing.
+ * Assuming not more than 2^31-1 events have happened since,
+ * these subtractions won't have wrapped, so we can check if
+ * target is in [last_seq, current_seq] by comparing the two.
+ */
+ events_needed = req->timeout.target_seq - ctx->cq_last_tm_flush;
+ events_got = seq - ctx->cq_last_tm_flush;
+ if (events_got < events_needed)
break;
list_del_init(&req->timeout.list);
io_kill_timeout(req);
- }
+ } while (!list_empty(&ctx->timeout_list));
+
+ ctx->cq_last_tm_flush = seq;
}
static void io_commit_cqring(struct io_ring_ctx *ctx)
@@ -1700,18 +1734,42 @@ static inline unsigned __io_cqring_events(struct io_ring_ctx *ctx)
static void io_cqring_ev_posted(struct io_ring_ctx *ctx)
{
+ /* see waitqueue_active() comment */
+ smp_mb();
+
if (waitqueue_active(&ctx->wait))
wake_up(&ctx->wait);
if (ctx->sq_data && waitqueue_active(&ctx->sq_data->wait))
wake_up(&ctx->sq_data->wait);
if (io_should_trigger_evfd(ctx))
eventfd_signal(ctx->cq_ev_fd, 1);
+ if (waitqueue_active(&ctx->cq_wait)) {
+ wake_up_interruptible(&ctx->cq_wait);
+ kill_fasync(&ctx->cq_fasync, SIGIO, POLL_IN);
+ }
+}
+
+static void io_cqring_ev_posted_iopoll(struct io_ring_ctx *ctx)
+{
+ /* see waitqueue_active() comment */
+ smp_mb();
+
+ if (ctx->flags & IORING_SETUP_SQPOLL) {
+ if (waitqueue_active(&ctx->wait))
+ wake_up(&ctx->wait);
+ }
+ if (io_should_trigger_evfd(ctx))
+ eventfd_signal(ctx->cq_ev_fd, 1);
+ if (waitqueue_active(&ctx->cq_wait)) {
+ wake_up_interruptible(&ctx->cq_wait);
+ kill_fasync(&ctx->cq_fasync, SIGIO, POLL_IN);
+ }
}
/* Returns true if there are no backlogged entries after the flush */
-static bool io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force,
- struct task_struct *tsk,
- struct files_struct *files)
+static bool __io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force,
+ struct task_struct *tsk,
+ struct files_struct *files)
{
struct io_rings *rings = ctx->rings;
struct io_kiocb *req, *tmp;
@@ -1764,6 +1822,20 @@ static bool io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force,
return all_flushed;
}
+static void io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force,
+ struct task_struct *tsk,
+ struct files_struct *files)
+{
+ if (test_bit(0, &ctx->cq_check_overflow)) {
+ /* iopoll syncs against uring_lock, not completion_lock */
+ if (ctx->flags & IORING_SETUP_IOPOLL)
+ mutex_lock(&ctx->uring_lock);
+ __io_cqring_overflow_flush(ctx, force, tsk, files);
+ if (ctx->flags & IORING_SETUP_IOPOLL)
+ mutex_unlock(&ctx->uring_lock);
+ }
+}
+
static void __io_cqring_fill_event(struct io_kiocb *req, long res, long cflags)
{
struct io_ring_ctx *ctx = req->ctx;
@@ -2123,14 +2195,14 @@ static void __io_req_task_submit(struct io_kiocb *req)
{
struct io_ring_ctx *ctx = req->ctx;
- if (!__io_sq_thread_acquire_mm(ctx) &&
- !__io_sq_thread_acquire_files(ctx)) {
- mutex_lock(&ctx->uring_lock);
+ mutex_lock(&ctx->uring_lock);
+ if (!ctx->sqo_dead &&
+ !__io_sq_thread_acquire_mm(ctx) &&
+ !__io_sq_thread_acquire_files(ctx))
__io_queue_sqe(req, NULL);
- mutex_unlock(&ctx->uring_lock);
- } else {
+ else
__io_req_task_cancel(req, -EFAULT);
- }
+ mutex_unlock(&ctx->uring_lock);
}
static void io_req_task_submit(struct callback_head *cb)
@@ -2206,6 +2278,8 @@ static void io_req_free_batch_finish(struct io_ring_ctx *ctx,
struct io_uring_task *tctx = rb->task->io_uring;
percpu_counter_sub(&tctx->inflight, rb->task_refs);
+ if (atomic_read(&tctx->in_idle))
+ wake_up(&tctx->wait);
put_task_struct_many(rb->task, rb->task_refs);
rb->task = NULL;
}
@@ -2224,6 +2298,8 @@ static void io_req_free_batch(struct req_batch *rb, struct io_kiocb *req)
struct io_uring_task *tctx = rb->task->io_uring;
percpu_counter_sub(&tctx->inflight, rb->task_refs);
+ if (atomic_read(&tctx->in_idle))
+ wake_up(&tctx->wait);
put_task_struct_many(rb->task, rb->task_refs);
}
rb->task = req->task;
@@ -2309,20 +2385,8 @@ static void io_double_put_req(struct io_kiocb *req)
io_free_req(req);
}
-static unsigned io_cqring_events(struct io_ring_ctx *ctx, bool noflush)
+static unsigned io_cqring_events(struct io_ring_ctx *ctx)
{
- if (test_bit(0, &ctx->cq_check_overflow)) {
- /*
- * noflush == true is from the waitqueue handler, just ensure
- * we wake up the task, and the next invocation will flush the
- * entries. We cannot safely to it from here.
- */
- if (noflush)
- return -1U;
-
- io_cqring_overflow_flush(ctx, false, NULL, NULL);
- }
-
/* See comment at the top of this file */
smp_rmb();
return __io_cqring_events(ctx);
@@ -2420,8 +2484,7 @@ static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
}
io_commit_cqring(ctx);
- if (ctx->flags & IORING_SETUP_SQPOLL)
- io_cqring_ev_posted(ctx);
+ io_cqring_ev_posted_iopoll(ctx);
io_req_free_batch_finish(ctx, &rb);
if (!list_empty(&again))
@@ -2547,7 +2610,9 @@ static int io_iopoll_check(struct io_ring_ctx *ctx, long min)
* If we do, we can potentially be spinning for commands that
* already triggered a CQE (eg in error).
*/
- if (io_cqring_events(ctx, false))
+ if (test_bit(0, &ctx->cq_check_overflow))
+ __io_cqring_overflow_flush(ctx, false, NULL, NULL);
+ if (io_cqring_events(ctx))
break;
/*
@@ -2664,6 +2729,8 @@ static bool io_rw_reissue(struct io_kiocb *req, long res)
if ((res != -EAGAIN && res != -EOPNOTSUPP) || io_wq_current_is_worker())
return false;
+ lockdep_assert_held(&req->ctx->uring_lock);
+
ret = io_sq_thread_acquire_mm_files(req->ctx, req);
if (io_resubmit_prep(req, ret)) {
@@ -3493,7 +3560,7 @@ static int io_read(struct io_kiocb *req, bool force_nonblock,
/* read it all, or we did blocking attempt. no retry. */
if (!iov_iter_count(iter) || !force_nonblock ||
- (req->file->f_flags & O_NONBLOCK))
+ (req->file->f_flags & O_NONBLOCK) || !(req->flags & REQ_F_ISREG))
goto done;
io_size -= ret;
@@ -4413,7 +4480,6 @@ static int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
* io_wq_work.flags, so initialize io_wq_work firstly.
*/
io_req_init_async(req);
- req->work.flags |= IO_WQ_WORK_NO_CANCEL;
if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
return -EINVAL;
@@ -4446,6 +4512,8 @@ static int io_close(struct io_kiocb *req, bool force_nonblock,
/* if the file has a flush method, be safe and punt to async */
if (close->put_file->f_op->flush && force_nonblock) {
+ /* not safe to cancel at this point */
+ req->work.flags |= IO_WQ_WORK_NO_CANCEL;
/* was never set, but play safe */
req->flags &= ~REQ_F_NOWAIT;
/* avoid grabbing files - we don't need the files */
@@ -5802,6 +5870,12 @@ static int io_timeout(struct io_kiocb *req)
tail = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts);
req->timeout.target_seq = tail + off;
+ /* Update the last seq here in case io_flush_timeouts() hasn't.
+ * This is safe because ->completion_lock is held, and submissions
+ * and completions are never mixed in the same ->completion_lock section.
+ */
+ ctx->cq_last_tm_flush = tail;
+
/*
* Insertion sort, ensuring the first entry in the list is always
* the one we need first.
@@ -6096,8 +6170,10 @@ static void io_req_drop_files(struct io_kiocb *req)
struct io_uring_task *tctx = req->task->io_uring;
unsigned long flags;
- put_files_struct(req->work.identity->files);
- put_nsproxy(req->work.identity->nsproxy);
+ if (req->work.flags & IO_WQ_WORK_FILES) {
+ put_files_struct(req->work.identity->files);
+ put_nsproxy(req->work.identity->nsproxy);
+ }
spin_lock_irqsave(&ctx->inflight_lock, flags);
list_del(&req->inflight_entry);
spin_unlock_irqrestore(&ctx->inflight_lock, flags);
@@ -6164,9 +6240,6 @@ static void __io_clean_op(struct io_kiocb *req)
}
req->flags &= ~REQ_F_NEED_CLEANUP;
}
-
- if (req->flags & REQ_F_INFLIGHT)
- io_req_drop_files(req);
}
static int io_issue_sqe(struct io_kiocb *req, bool force_nonblock,
@@ -6385,6 +6458,15 @@ static struct file *io_file_get(struct io_submit_state *state,
file = __io_file_get(state, fd);
}
+ if (file && file->f_op == &io_uring_fops) {
+ io_req_init_async(req);
+ req->flags |= REQ_F_INFLIGHT;
+
+ spin_lock_irq(&ctx->inflight_lock);
+ list_add(&req->inflight_entry, &ctx->inflight_list);
+ spin_unlock_irq(&ctx->inflight_lock);
+ }
+
return file;
}
@@ -6822,7 +6904,7 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr)
/* if we have a backlog and couldn't flush it all, return BUSY */
if (test_bit(0, &ctx->sq_check_overflow)) {
- if (!io_cqring_overflow_flush(ctx, false, NULL, NULL))
+ if (!__io_cqring_overflow_flush(ctx, false, NULL, NULL))
return -EBUSY;
}
@@ -6924,7 +7006,8 @@ static int __io_sq_thread(struct io_ring_ctx *ctx, bool cap_entries)
if (!list_empty(&ctx->iopoll_list))
io_do_iopoll(ctx, &nr_events, 0);
- if (to_submit && likely(!percpu_ref_is_dying(&ctx->refs)))
+ if (to_submit && !ctx->sqo_dead &&
+ likely(!percpu_ref_is_dying(&ctx->refs)))
ret = io_submit_sqes(ctx, to_submit);
mutex_unlock(&ctx->uring_lock);
}
@@ -7025,6 +7108,7 @@ static int io_sq_thread(void *data)
if (sqt_spin || !time_after(jiffies, timeout)) {
io_run_task_work();
+ io_sq_thread_drop_mm_files();
cond_resched();
if (sqt_spin)
timeout = jiffies + sqd->sq_thread_idle;
@@ -7062,6 +7146,7 @@ static int io_sq_thread(void *data)
}
io_run_task_work();
+ io_sq_thread_drop_mm_files();
if (cur_css)
io_sq_thread_unassociate_blkcg();
@@ -7085,7 +7170,7 @@ struct io_wait_queue {
unsigned nr_timeouts;
};
-static inline bool io_should_wake(struct io_wait_queue *iowq, bool noflush)
+static inline bool io_should_wake(struct io_wait_queue *iowq)
{
struct io_ring_ctx *ctx = iowq->ctx;
@@ -7094,7 +7179,7 @@ static inline bool io_should_wake(struct io_wait_queue *iowq, bool noflush)
* started waiting. For timeouts, we always want to return to userspace,
* regardless of event count.
*/
- return io_cqring_events(ctx, noflush) >= iowq->to_wait ||
+ return io_cqring_events(ctx) >= iowq->to_wait ||
atomic_read(&ctx->cq_timeouts) != iowq->nr_timeouts;
}
@@ -7104,11 +7189,13 @@ static int io_wake_function(struct wait_queue_entry *curr, unsigned int mode,
struct io_wait_queue *iowq = container_of(curr, struct io_wait_queue,
wq);
- /* use noflush == true, as we can't safely rely on locking context */
- if (!io_should_wake(iowq, true))
- return -1;
-
- return autoremove_wake_function(curr, mode, wake_flags, key);
+ /*
+ * Cannot safely flush overflowed CQEs from here, ensure we wake up
+ * the task, and the next invocation will do it.
+ */
+ if (io_should_wake(iowq) || test_bit(0, &iowq->ctx->cq_check_overflow))
+ return autoremove_wake_function(curr, mode, wake_flags, key);
+ return -1;
}
static int io_run_task_work_sig(void)
@@ -7145,7 +7232,8 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
int ret = 0;
do {
- if (io_cqring_events(ctx, false) >= min_events)
+ io_cqring_overflow_flush(ctx, false, NULL, NULL);
+ if (io_cqring_events(ctx) >= min_events)
return 0;
if (!io_run_task_work())
break;
@@ -7173,6 +7261,7 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
iowq.nr_timeouts = atomic_read(&ctx->cq_timeouts);
trace_io_uring_cqring_wait(ctx, min_events);
do {
+ io_cqring_overflow_flush(ctx, false, NULL, NULL);
prepare_to_wait_exclusive(&ctx->wait, &iowq.wq,
TASK_INTERRUPTIBLE);
/* make sure we run task_work before checking for signals */
@@ -7181,8 +7270,10 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
continue;
else if (ret < 0)
break;
- if (io_should_wake(&iowq, false))
+ if (io_should_wake(&iowq))
break;
+ if (test_bit(0, &ctx->cq_check_overflow))
+ continue;
if (uts) {
timeout = schedule_timeout(timeout);
if (timeout == 0) {
@@ -7231,14 +7322,28 @@ static void io_file_ref_kill(struct percpu_ref *ref)
complete(&data->done);
}
+static void io_sqe_files_set_node(struct fixed_file_data *file_data,
+ struct fixed_file_ref_node *ref_node)
+{
+ spin_lock_bh(&file_data->lock);
+ file_data->node = ref_node;
+ list_add_tail(&ref_node->node, &file_data->ref_list);
+ spin_unlock_bh(&file_data->lock);
+ percpu_ref_get(&file_data->refs);
+}
+
static int io_sqe_files_unregister(struct io_ring_ctx *ctx)
{
struct fixed_file_data *data = ctx->file_data;
- struct fixed_file_ref_node *ref_node = NULL;
+ struct fixed_file_ref_node *backup_node, *ref_node = NULL;
unsigned nr_tables, i;
+ int ret;
if (!data)
return -ENXIO;
+ backup_node = alloc_fixed_file_ref_node(ctx);
+ if (!backup_node)
+ return -ENOMEM;
spin_lock_bh(&data->lock);
ref_node = data->node;
@@ -7250,7 +7355,18 @@ static int io_sqe_files_unregister(struct io_ring_ctx *ctx)
/* wait for all refs nodes to complete */
flush_delayed_work(&ctx->file_put_work);
- wait_for_completion(&data->done);
+ do {
+ ret = wait_for_completion_interruptible(&data->done);
+ if (!ret)
+ break;
+ ret = io_run_task_work_sig();
+ if (ret < 0) {
+ percpu_ref_resurrect(&data->refs);
+ reinit_completion(&data->done);
+ io_sqe_files_set_node(data, backup_node);
+ return ret;
+ }
+ } while (1);
__io_sqe_files_unregister(ctx);
nr_tables = DIV_ROUND_UP(ctx->nr_user_files, IORING_MAX_FILES_TABLE);
@@ -7261,6 +7377,7 @@ static int io_sqe_files_unregister(struct io_ring_ctx *ctx)
kfree(data);
ctx->file_data = NULL;
ctx->nr_user_files = 0;
+ destroy_fixed_file_ref_node(backup_node);
return 0;
}
@@ -7654,12 +7771,12 @@ static struct fixed_file_ref_node *alloc_fixed_file_ref_node(
ref_node = kzalloc(sizeof(*ref_node), GFP_KERNEL);
if (!ref_node)
- return ERR_PTR(-ENOMEM);
+ return NULL;
if (percpu_ref_init(&ref_node->refs, io_file_data_ref_zero,
0, GFP_KERNEL)) {
kfree(ref_node);
- return ERR_PTR(-ENOMEM);
+ return NULL;
}
INIT_LIST_HEAD(&ref_node->node);
INIT_LIST_HEAD(&ref_node->file_list);
@@ -7753,16 +7870,12 @@ static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
}
ref_node = alloc_fixed_file_ref_node(ctx);
- if (IS_ERR(ref_node)) {
+ if (!ref_node) {
io_sqe_files_unregister(ctx);
- return PTR_ERR(ref_node);
+ return -ENOMEM;
}
- file_data->node = ref_node;
- spin_lock_bh(&file_data->lock);
- list_add_tail(&ref_node->node, &file_data->ref_list);
- spin_unlock_bh(&file_data->lock);
- percpu_ref_get(&file_data->refs);
+ io_sqe_files_set_node(file_data, ref_node);
return ret;
out_fput:
for (i = 0; i < ctx->nr_user_files; i++) {
@@ -7859,8 +7972,8 @@ static int __io_sqe_files_update(struct io_ring_ctx *ctx,
return -EINVAL;
ref_node = alloc_fixed_file_ref_node(ctx);
- if (IS_ERR(ref_node))
- return PTR_ERR(ref_node);
+ if (!ref_node)
+ return -ENOMEM;
done = 0;
fds = u64_to_user_ptr(up->fds);
@@ -7918,11 +8031,7 @@ static int __io_sqe_files_update(struct io_ring_ctx *ctx,
if (needs_switch) {
percpu_ref_kill(&data->node->refs);
- spin_lock_bh(&data->lock);
- list_add_tail(&ref_node->node, &data->ref_list);
- data->node = ref_node;
- spin_unlock_bh(&data->lock);
- percpu_ref_get(&ctx->file_data->refs);
+ io_sqe_files_set_node(data, ref_node);
} else
destroy_fixed_file_ref_node(ref_node);
@@ -8602,7 +8711,8 @@ static __poll_t io_uring_poll(struct file *file, poll_table *wait)
smp_rmb();
if (!io_sqring_full(ctx))
mask |= EPOLLOUT | EPOLLWRNORM;
- if (io_cqring_events(ctx, false))
+ io_cqring_overflow_flush(ctx, false, NULL, NULL);
+ if (io_cqring_events(ctx))
mask |= EPOLLIN | EPOLLRDNORM;
return mask;
@@ -8641,7 +8751,7 @@ static void io_ring_exit_work(struct work_struct *work)
* as nobody else will be looking for them.
*/
do {
- io_iopoll_try_reap_events(ctx);
+ __io_uring_cancel_task_requests(ctx, NULL);
} while (!wait_for_completion_timeout(&ctx->ref_comp, HZ/20));
io_ring_ctx_free(ctx);
}
@@ -8657,10 +8767,14 @@ static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
{
mutex_lock(&ctx->uring_lock);
percpu_ref_kill(&ctx->refs);
+
+ if (WARN_ON_ONCE((ctx->flags & IORING_SETUP_SQPOLL) && !ctx->sqo_dead))
+ ctx->sqo_dead = 1;
+
/* if force is set, the ring is going away. always drop after that */
ctx->cq_overflow_flushed = 1;
if (ctx->rings)
- io_cqring_overflow_flush(ctx, true, NULL, NULL);
+ __io_cqring_overflow_flush(ctx, true, NULL, NULL);
mutex_unlock(&ctx->uring_lock);
io_kill_timeouts(ctx, NULL, NULL);
@@ -8763,8 +8877,7 @@ static void io_uring_cancel_files(struct io_ring_ctx *ctx,
spin_lock_irq(&ctx->inflight_lock);
list_for_each_entry(req, &ctx->inflight_list, inflight_entry) {
- if (req->task != task ||
- req->work.identity->files != files)
+ if (!io_match_task(req, task, files))
continue;
found = true;
break;
@@ -8781,6 +8894,7 @@ static void io_uring_cancel_files(struct io_ring_ctx *ctx,
io_wq_cancel_cb(ctx->io_wq, io_cancel_task_cb, &cancel, true);
io_poll_remove_all(ctx, task, files);
io_kill_timeouts(ctx, task, files);
+ io_cqring_overflow_flush(ctx, true, task, files);
/* cancellations _may_ trigger task work */
io_run_task_work();
schedule();
@@ -8796,9 +8910,11 @@ static void __io_uring_cancel_task_requests(struct io_ring_ctx *ctx,
enum io_wq_cancel cret;
bool ret = false;
- cret = io_wq_cancel_cb(ctx->io_wq, io_cancel_task_cb, &cancel, true);
- if (cret != IO_WQ_CANCEL_NOTFOUND)
- ret = true;
+ if (ctx->io_wq) {
+ cret = io_wq_cancel_cb(ctx->io_wq, io_cancel_task_cb,
+ &cancel, true);
+ ret |= (cret != IO_WQ_CANCEL_NOTFOUND);
+ }
/* SQPOLL thread does its own polling */
if (!(ctx->flags & IORING_SETUP_SQPOLL)) {
@@ -8817,6 +8933,17 @@ static void __io_uring_cancel_task_requests(struct io_ring_ctx *ctx,
}
}
+static void io_disable_sqo_submit(struct io_ring_ctx *ctx)
+{
+ mutex_lock(&ctx->uring_lock);
+ ctx->sqo_dead = 1;
+ mutex_unlock(&ctx->uring_lock);
+
+ /* make sure callers enter the ring to get error */
+ if (ctx->rings)
+ io_ring_set_wakeup_flag(ctx);
+}
+
/*
* We need to iteratively cancel requests, in case a request has dependent
* hard links. These persist even for failure of cancelations, hence keep
@@ -8828,15 +8955,16 @@ static void io_uring_cancel_task_requests(struct io_ring_ctx *ctx,
struct task_struct *task = current;
if ((ctx->flags & IORING_SETUP_SQPOLL) && ctx->sq_data) {
+ /* for SQPOLL only sqo_task has task notes */
+ WARN_ON_ONCE(ctx->sqo_task != current);
+ io_disable_sqo_submit(ctx);
task = ctx->sq_data->thread;
atomic_inc(&task->io_uring->in_idle);
io_sq_thread_park(ctx->sq_data);
}
io_cancel_defer_files(ctx, task, files);
- io_ring_submit_lock(ctx, (ctx->flags & IORING_SETUP_IOPOLL));
io_cqring_overflow_flush(ctx, true, task, files);
- io_ring_submit_unlock(ctx, (ctx->flags & IORING_SETUP_IOPOLL));
if (!files)
__io_uring_cancel_task_requests(ctx, task);
@@ -8909,20 +9037,12 @@ static void io_uring_del_task_file(struct file *file)
fput(file);
}
-/*
- * Drop task note for this file if we're the only ones that hold it after
- * pending fput()
- */
-static void io_uring_attempt_task_drop(struct file *file)
+static void io_uring_remove_task_files(struct io_uring_task *tctx)
{
- if (!current->io_uring)
- return;
- /*
- * fput() is pending, will be 2 if the only other ref is our potential
- * task file note. If the task is exiting, drop regardless of count.
- */
- if (fatal_signal_pending(current) || (current->flags & PF_EXITING) ||
- atomic_long_read(&file->f_count) == 2)
+ struct file *file;
+ unsigned long index;
+
+ xa_for_each(&tctx->xa, index, file)
io_uring_del_task_file(file);
}
@@ -8934,16 +9054,12 @@ void __io_uring_files_cancel(struct files_struct *files)
/* make sure overflow events are dropped */
atomic_inc(&tctx->in_idle);
-
- xa_for_each(&tctx->xa, index, file) {
- struct io_ring_ctx *ctx = file->private_data;
-
- io_uring_cancel_task_requests(ctx, files);
- if (files)
- io_uring_del_task_file(file);
- }
-
+ xa_for_each(&tctx->xa, index, file)
+ io_uring_cancel_task_requests(file->private_data, files);
atomic_dec(&tctx->in_idle);
+
+ if (files)
+ io_uring_remove_task_files(tctx);
}
static s64 tctx_inflight(struct io_uring_task *tctx)
@@ -8986,6 +9102,10 @@ void __io_uring_task_cancel(void)
/* make sure overflow events are dropped */
atomic_inc(&tctx->in_idle);
+ /* trigger io_disable_sqo_submit() */
+ if (tctx->sqpoll)
+ __io_uring_files_cancel(NULL);
+
do {
/* read completions before cancelations */
inflight = tctx_inflight(tctx);
@@ -9005,12 +9125,44 @@ void __io_uring_task_cancel(void)
finish_wait(&tctx->wait, &wait);
} while (1);
+ finish_wait(&tctx->wait, &wait);
atomic_dec(&tctx->in_idle);
+
+ io_uring_remove_task_files(tctx);
}
static int io_uring_flush(struct file *file, void *data)
{
- io_uring_attempt_task_drop(file);
+ struct io_uring_task *tctx = current->io_uring;
+ struct io_ring_ctx *ctx = file->private_data;
+
+ if (!tctx)
+ return 0;
+
+ /* we should have cancelled and erased it before PF_EXITING */
+ WARN_ON_ONCE((current->flags & PF_EXITING) &&
+ xa_load(&tctx->xa, (unsigned long)file));
+
+ /*
+ * fput() is pending, will be 2 if the only other ref is our potential
+ * task file note. If the task is exiting, drop regardless of count.
+ */
+ if (atomic_long_read(&file->f_count) != 2)
+ return 0;
+
+ if (ctx->flags & IORING_SETUP_SQPOLL) {
+ /* there is only one file note, which is owned by sqo_task */
+ WARN_ON_ONCE(ctx->sqo_task != current &&
+ xa_load(&tctx->xa, (unsigned long)file));
+ /* sqo_dead check is for when this happens after cancellation */
+ WARN_ON_ONCE(ctx->sqo_task == current && !ctx->sqo_dead &&
+ !xa_load(&tctx->xa, (unsigned long)file));
+
+ io_disable_sqo_submit(ctx);
+ }
+
+ if (!(ctx->flags & IORING_SETUP_SQPOLL) || ctx->sqo_task == current)
+ io_uring_del_task_file(file);
return 0;
}
@@ -9084,8 +9236,9 @@ static unsigned long io_uring_nommu_get_unmapped_area(struct file *file,
#endif /* !CONFIG_MMU */
-static void io_sqpoll_wait_sq(struct io_ring_ctx *ctx)
+static int io_sqpoll_wait_sq(struct io_ring_ctx *ctx)
{
+ int ret = 0;
DEFINE_WAIT(wait);
do {
@@ -9094,6 +9247,11 @@ static void io_sqpoll_wait_sq(struct io_ring_ctx *ctx)
prepare_to_wait(&ctx->sqo_sq_wait, &wait, TASK_INTERRUPTIBLE);
+ if (unlikely(ctx->sqo_dead)) {
+ ret = -EOWNERDEAD;
+ goto out;
+ }
+
if (!io_sqring_full(ctx))
break;
@@ -9101,6 +9259,8 @@ static void io_sqpoll_wait_sq(struct io_ring_ctx *ctx)
} while (!signal_pending(current));
finish_wait(&ctx->sqo_sq_wait, &wait);
+out:
+ return ret;
}
static int io_get_ext_arg(unsigned flags, const void __user *argp, size_t *argsz,
@@ -9172,17 +9332,18 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
*/
ret = 0;
if (ctx->flags & IORING_SETUP_SQPOLL) {
- if (!list_empty_careful(&ctx->cq_overflow_list)) {
- bool needs_lock = ctx->flags & IORING_SETUP_IOPOLL;
+ io_cqring_overflow_flush(ctx, false, NULL, NULL);
- io_ring_submit_lock(ctx, needs_lock);
- io_cqring_overflow_flush(ctx, false, NULL, NULL);
- io_ring_submit_unlock(ctx, needs_lock);
- }
+ ret = -EOWNERDEAD;
+ if (unlikely(ctx->sqo_dead))
+ goto out;
if (flags & IORING_ENTER_SQ_WAKEUP)
wake_up(&ctx->sq_data->wait);
- if (flags & IORING_ENTER_SQ_WAIT)
- io_sqpoll_wait_sq(ctx);
+ if (flags & IORING_ENTER_SQ_WAIT) {
+ ret = io_sqpoll_wait_sq(ctx);
+ if (ret)
+ goto out;
+ }
submitted = to_submit;
} else if (to_submit) {
ret = io_uring_add_task_file(ctx, f.file);
@@ -9601,6 +9762,7 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p,
*/
ret = io_uring_install_fd(ctx, file);
if (ret < 0) {
+ io_disable_sqo_submit(ctx);
/* fput will clean it up */
fput(file);
return ret;
@@ -9609,6 +9771,7 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p,
trace_io_uring_create(ret, ctx, p->sq_entries, p->cq_entries, p->flags);
return ret;
err:
+ io_disable_sqo_submit(ctx);
io_ring_ctx_wait_and_kill(ctx);
return ret;
}
diff --git a/fs/kernfs/file.c b/fs/kernfs/file.c
index f277d023ebcd..c75719312147 100644
--- a/fs/kernfs/file.c
+++ b/fs/kernfs/file.c
@@ -14,6 +14,7 @@
#include <linux/pagemap.h>
#include <linux/sched/mm.h>
#include <linux/fsnotify.h>
+#include <linux/uio.h>
#include "kernfs-internal.h"
@@ -180,11 +181,10 @@ static const struct seq_operations kernfs_seq_ops = {
* it difficult to use seq_file. Implement simplistic custom buffering for
* bin files.
*/
-static ssize_t kernfs_file_direct_read(struct kernfs_open_file *of,
- char __user *user_buf, size_t count,
- loff_t *ppos)
+static ssize_t kernfs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
{
- ssize_t len = min_t(size_t, count, PAGE_SIZE);
+ struct kernfs_open_file *of = kernfs_of(iocb->ki_filp);
+ ssize_t len = min_t(size_t, iov_iter_count(iter), PAGE_SIZE);
const struct kernfs_ops *ops;
char *buf;
@@ -210,7 +210,7 @@ static ssize_t kernfs_file_direct_read(struct kernfs_open_file *of,
of->event = atomic_read(&of->kn->attr.open->event);
ops = kernfs_ops(of->kn);
if (ops->read)
- len = ops->read(of, buf, len, *ppos);
+ len = ops->read(of, buf, len, iocb->ki_pos);
else
len = -EINVAL;
@@ -220,12 +220,12 @@ static ssize_t kernfs_file_direct_read(struct kernfs_open_file *of,
if (len < 0)
goto out_free;
- if (copy_to_user(user_buf, buf, len)) {
+ if (copy_to_iter(buf, len, iter) != len) {
len = -EFAULT;
goto out_free;
}
- *ppos += len;
+ iocb->ki_pos += len;
out_free:
if (buf == of->prealloc_buf)
@@ -235,31 +235,14 @@ static ssize_t kernfs_file_direct_read(struct kernfs_open_file *of,
return len;
}
-/**
- * kernfs_fop_read - kernfs vfs read callback
- * @file: file pointer
- * @user_buf: data to write
- * @count: number of bytes
- * @ppos: starting offset
- */
-static ssize_t kernfs_fop_read(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos)
+static ssize_t kernfs_fop_read_iter(struct kiocb *iocb, struct iov_iter *iter)
{
- struct kernfs_open_file *of = kernfs_of(file);
-
- if (of->kn->flags & KERNFS_HAS_SEQ_SHOW)
- return seq_read(file, user_buf, count, ppos);
- else
- return kernfs_file_direct_read(of, user_buf, count, ppos);
+ if (kernfs_of(iocb->ki_filp)->kn->flags & KERNFS_HAS_SEQ_SHOW)
+ return seq_read_iter(iocb, iter);
+ return kernfs_file_read_iter(iocb, iter);
}
-/**
- * kernfs_fop_write - kernfs vfs write callback
- * @file: file pointer
- * @user_buf: data to write
- * @count: number of bytes
- * @ppos: starting offset
- *
+/*
* Copy data in from userland and pass it to the matching kernfs write
* operation.
*
@@ -269,20 +252,18 @@ static ssize_t kernfs_fop_read(struct file *file, char __user *user_buf,
* modify only the the value you're changing, then write entire buffer
* back.
*/
-static ssize_t kernfs_fop_write(struct file *file, const char __user *user_buf,
- size_t count, loff_t *ppos)
+static ssize_t kernfs_fop_write_iter(struct kiocb *iocb, struct iov_iter *iter)
{
- struct kernfs_open_file *of = kernfs_of(file);
+ struct kernfs_open_file *of = kernfs_of(iocb->ki_filp);
+ ssize_t len = iov_iter_count(iter);
const struct kernfs_ops *ops;
- ssize_t len;
char *buf;
if (of->atomic_write_len) {
- len = count;
if (len > of->atomic_write_len)
return -E2BIG;
} else {
- len = min_t(size_t, count, PAGE_SIZE);
+ len = min_t(size_t, len, PAGE_SIZE);
}
buf = of->prealloc_buf;
@@ -293,7 +274,7 @@ static ssize_t kernfs_fop_write(struct file *file, const char __user *user_buf,
if (!buf)
return -ENOMEM;
- if (copy_from_user(buf, user_buf, len)) {
+ if (copy_from_iter(buf, len, iter) != len) {
len = -EFAULT;
goto out_free;
}
@@ -312,7 +293,7 @@ static ssize_t kernfs_fop_write(struct file *file, const char __user *user_buf,
ops = kernfs_ops(of->kn);
if (ops->write)
- len = ops->write(of, buf, len, *ppos);
+ len = ops->write(of, buf, len, iocb->ki_pos);
else
len = -EINVAL;
@@ -320,7 +301,7 @@ static ssize_t kernfs_fop_write(struct file *file, const char __user *user_buf,
mutex_unlock(&of->mutex);
if (len > 0)
- *ppos += len;
+ iocb->ki_pos += len;
out_free:
if (buf == of->prealloc_buf)
@@ -673,7 +654,7 @@ static int kernfs_fop_open(struct inode *inode, struct file *file)
/*
* Write path needs to atomic_write_len outside active reference.
- * Cache it in open_file. See kernfs_fop_write() for details.
+ * Cache it in open_file. See kernfs_fop_write_iter() for details.
*/
of->atomic_write_len = ops->atomic_write_len;
@@ -960,14 +941,16 @@ void kernfs_notify(struct kernfs_node *kn)
EXPORT_SYMBOL_GPL(kernfs_notify);
const struct file_operations kernfs_file_fops = {
- .read = kernfs_fop_read,
- .write = kernfs_fop_write,
+ .read_iter = kernfs_fop_read_iter,
+ .write_iter = kernfs_fop_write_iter,
.llseek = generic_file_llseek,
.mmap = kernfs_fop_mmap,
.open = kernfs_fop_open,
.release = kernfs_fop_release,
.poll = kernfs_fop_poll,
.fsync = noop_fsync,
+ .splice_read = generic_file_splice_read,
+ .splice_write = iter_file_splice_write,
};
/**
diff --git a/fs/namespace.c b/fs/namespace.c
index d2db7dfe232b..9d33909d0f9e 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -1713,8 +1713,6 @@ static int can_umount(const struct path *path, int flags)
{
struct mount *mnt = real_mount(path->mnt);
- if (flags & ~(MNT_FORCE | MNT_DETACH | MNT_EXPIRE | UMOUNT_NOFOLLOW))
- return -EINVAL;
if (!may_mount())
return -EPERM;
if (path->dentry != path->mnt->mnt_root)
@@ -1728,6 +1726,7 @@ static int can_umount(const struct path *path, int flags)
return 0;
}
+// caller is responsible for flags being sane
int path_umount(struct path *path, int flags)
{
struct mount *mnt = real_mount(path->mnt);
@@ -1749,6 +1748,10 @@ static int ksys_umount(char __user *name, int flags)
struct path path;
int ret;
+ // basic validity checks done first
+ if (flags & ~(MNT_FORCE | MNT_DETACH | MNT_EXPIRE | UMOUNT_NOFOLLOW))
+ return -EINVAL;
+
if (!(flags & UMOUNT_NOFOLLOW))
lookup_flags |= LOOKUP_FOLLOW;
ret = user_path_at(AT_FDCWD, name, lookup_flags, &path);
diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
index 816e1427f17e..04bf8066980c 100644
--- a/fs/nfs/delegation.c
+++ b/fs/nfs/delegation.c
@@ -1011,22 +1011,24 @@ nfs_delegation_find_inode_server(struct nfs_server *server,
const struct nfs_fh *fhandle)
{
struct nfs_delegation *delegation;
- struct inode *freeme, *res = NULL;
+ struct super_block *freeme = NULL;
+ struct inode *res = NULL;
list_for_each_entry_rcu(delegation, &server->delegations, super_list) {
spin_lock(&delegation->lock);
if (delegation->inode != NULL &&
!test_bit(NFS_DELEGATION_REVOKED, &delegation->flags) &&
nfs_compare_fh(fhandle, &NFS_I(delegation->inode)->fh) == 0) {
- freeme = igrab(delegation->inode);
- if (freeme && nfs_sb_active(freeme->i_sb))
- res = freeme;
+ if (nfs_sb_active(server->super)) {
+ freeme = server->super;
+ res = igrab(delegation->inode);
+ }
spin_unlock(&delegation->lock);
if (res != NULL)
return res;
if (freeme) {
rcu_read_unlock();
- iput(freeme);
+ nfs_sb_deactive(freeme);
rcu_read_lock();
}
return ERR_PTR(-EAGAIN);
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index b840d0a91c9d..62d3189745cd 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -136,9 +136,29 @@ struct nfs_fs_context {
} clone_data;
};
-#define nfs_errorf(fc, fmt, ...) errorf(fc, fmt, ## __VA_ARGS__)
-#define nfs_invalf(fc, fmt, ...) invalf(fc, fmt, ## __VA_ARGS__)
-#define nfs_warnf(fc, fmt, ...) warnf(fc, fmt, ## __VA_ARGS__)
+#define nfs_errorf(fc, fmt, ...) ((fc)->log.log ? \
+ errorf(fc, fmt, ## __VA_ARGS__) : \
+ ({ dprintk(fmt "\n", ## __VA_ARGS__); }))
+
+#define nfs_ferrorf(fc, fac, fmt, ...) ((fc)->log.log ? \
+ errorf(fc, fmt, ## __VA_ARGS__) : \
+ ({ dfprintk(fac, fmt "\n", ## __VA_ARGS__); }))
+
+#define nfs_invalf(fc, fmt, ...) ((fc)->log.log ? \
+ invalf(fc, fmt, ## __VA_ARGS__) : \
+ ({ dprintk(fmt "\n", ## __VA_ARGS__); -EINVAL; }))
+
+#define nfs_finvalf(fc, fac, fmt, ...) ((fc)->log.log ? \
+ invalf(fc, fmt, ## __VA_ARGS__) : \
+ ({ dfprintk(fac, fmt "\n", ## __VA_ARGS__); -EINVAL; }))
+
+#define nfs_warnf(fc, fmt, ...) ((fc)->log.log ? \
+ warnf(fc, fmt, ## __VA_ARGS__) : \
+ ({ dprintk(fmt "\n", ## __VA_ARGS__); }))
+
+#define nfs_fwarnf(fc, fac, fmt, ...) ((fc)->log.log ? \
+ warnf(fc, fmt, ## __VA_ARGS__) : \
+ ({ dfprintk(fac, fmt "\n", ## __VA_ARGS__); }))
static inline struct nfs_fs_context *nfs_fc2context(const struct fs_context *fc)
{
@@ -579,12 +599,14 @@ extern void nfs4_test_session_trunk(struct rpc_clnt *clnt,
static inline struct inode *nfs_igrab_and_active(struct inode *inode)
{
- inode = igrab(inode);
- if (inode != NULL && !nfs_sb_active(inode->i_sb)) {
- iput(inode);
- inode = NULL;
+ struct super_block *sb = inode->i_sb;
+
+ if (sb && nfs_sb_active(sb)) {
+ if (igrab(inode))
+ return inode;
+ nfs_sb_deactive(sb);
}
- return inode;
+ return NULL;
}
static inline void nfs_iput_and_deactive(struct inode *inode)
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 0ce04e0e5d82..2f4679a62712 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -3536,10 +3536,8 @@ static void nfs4_close_done(struct rpc_task *task, void *data)
trace_nfs4_close(state, &calldata->arg, &calldata->res, task->tk_status);
/* Handle Layoutreturn errors */
- if (pnfs_roc_done(task, calldata->inode,
- &calldata->arg.lr_args,
- &calldata->res.lr_res,
- &calldata->res.lr_ret) == -EAGAIN)
+ if (pnfs_roc_done(task, &calldata->arg.lr_args, &calldata->res.lr_res,
+ &calldata->res.lr_ret) == -EAGAIN)
goto out_restart;
/* hmm. we are done with the inode, and in the process of freeing
@@ -6384,10 +6382,8 @@ static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata)
trace_nfs4_delegreturn_exit(&data->args, &data->res, task->tk_status);
/* Handle Layoutreturn errors */
- if (pnfs_roc_done(task, data->inode,
- &data->args.lr_args,
- &data->res.lr_res,
- &data->res.lr_ret) == -EAGAIN)
+ if (pnfs_roc_done(task, &data->args.lr_args, &data->res.lr_res,
+ &data->res.lr_ret) == -EAGAIN)
goto out_restart;
switch (task->tk_status) {
@@ -6441,10 +6437,10 @@ static void nfs4_delegreturn_release(void *calldata)
struct nfs4_delegreturndata *data = calldata;
struct inode *inode = data->inode;
+ if (data->lr.roc)
+ pnfs_roc_release(&data->lr.arg, &data->lr.res,
+ data->res.lr_ret);
if (inode) {
- if (data->lr.roc)
- pnfs_roc_release(&data->lr.arg, &data->lr.res,
- data->res.lr_ret);
nfs_post_op_update_inode_force_wcc(inode, &data->fattr);
nfs_iput_and_deactive(inode);
}
@@ -6520,16 +6516,14 @@ static int _nfs4_proc_delegreturn(struct inode *inode, const struct cred *cred,
nfs_fattr_init(data->res.fattr);
data->timestamp = jiffies;
data->rpc_status = 0;
- data->lr.roc = pnfs_roc(inode, &data->lr.arg, &data->lr.res, cred);
data->inode = nfs_igrab_and_active(inode);
- if (data->inode) {
+ if (data->inode || issync) {
+ data->lr.roc = pnfs_roc(inode, &data->lr.arg, &data->lr.res,
+ cred);
if (data->lr.roc) {
data->args.lr_args = &data->lr.arg;
data->res.lr_res = &data->lr.res;
}
- } else if (data->lr.roc) {
- pnfs_roc_release(&data->lr.arg, &data->lr.res, 0);
- data->lr.roc = false;
}
task_setup_data.callback_data = data;
@@ -7111,9 +7105,9 @@ static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *f
data->arg.new_lock_owner, ret);
} else
data->cancelled = true;
+ trace_nfs4_set_lock(fl, state, &data->res.stateid, cmd, ret);
rpc_put_task(task);
dprintk("%s: done, ret = %d!\n", __func__, ret);
- trace_nfs4_set_lock(fl, state, &data->res.stateid, cmd, ret);
return ret;
}
diff --git a/fs/nfs/nfs4super.c b/fs/nfs/nfs4super.c
index 984cc42ee54d..d09bcfd7db89 100644
--- a/fs/nfs/nfs4super.c
+++ b/fs/nfs/nfs4super.c
@@ -227,7 +227,7 @@ int nfs4_try_get_tree(struct fs_context *fc)
fc, ctx->nfs_server.hostname,
ctx->nfs_server.export_path);
if (err) {
- nfs_errorf(fc, "NFS4: Couldn't follow remote path");
+ nfs_ferrorf(fc, MOUNT, "NFS4: Couldn't follow remote path");
dfprintk(MOUNT, "<-- nfs4_try_get_tree() = %d [error]\n", err);
} else {
dfprintk(MOUNT, "<-- nfs4_try_get_tree() = 0\n");
@@ -250,7 +250,7 @@ int nfs4_get_referral_tree(struct fs_context *fc)
fc, ctx->nfs_server.hostname,
ctx->nfs_server.export_path);
if (err) {
- nfs_errorf(fc, "NFS4: Couldn't follow remote path");
+ nfs_ferrorf(fc, MOUNT, "NFS4: Couldn't follow remote path");
dfprintk(MOUNT, "<-- nfs4_get_referral_tree() = %d [error]\n", err);
} else {
dfprintk(MOUNT, "<-- nfs4_get_referral_tree() = 0\n");
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
index 07f59dc8cb2e..4f274f21c4ab 100644
--- a/fs/nfs/pnfs.c
+++ b/fs/nfs/pnfs.c
@@ -1152,7 +1152,7 @@ void pnfs_layoutreturn_free_lsegs(struct pnfs_layout_hdr *lo,
LIST_HEAD(freeme);
spin_lock(&inode->i_lock);
- if (!pnfs_layout_is_valid(lo) || !arg_stateid ||
+ if (!pnfs_layout_is_valid(lo) ||
!nfs4_stateid_match_other(&lo->plh_stateid, arg_stateid))
goto out_unlock;
if (stateid) {
@@ -1509,10 +1509,8 @@ out_noroc:
return false;
}
-int pnfs_roc_done(struct rpc_task *task, struct inode *inode,
- struct nfs4_layoutreturn_args **argpp,
- struct nfs4_layoutreturn_res **respp,
- int *ret)
+int pnfs_roc_done(struct rpc_task *task, struct nfs4_layoutreturn_args **argpp,
+ struct nfs4_layoutreturn_res **respp, int *ret)
{
struct nfs4_layoutreturn_args *arg = *argpp;
int retval = -EAGAIN;
@@ -1545,7 +1543,7 @@ int pnfs_roc_done(struct rpc_task *task, struct inode *inode,
return 0;
case -NFS4ERR_OLD_STATEID:
if (!nfs4_layout_refresh_old_stateid(&arg->stateid,
- &arg->range, inode))
+ &arg->range, arg->inode))
break;
*ret = -NFS4ERR_NOMATCHING_LAYOUT;
return -EAGAIN;
@@ -1560,23 +1558,28 @@ void pnfs_roc_release(struct nfs4_layoutreturn_args *args,
int ret)
{
struct pnfs_layout_hdr *lo = args->layout;
- const nfs4_stateid *arg_stateid = NULL;
+ struct inode *inode = args->inode;
const nfs4_stateid *res_stateid = NULL;
struct nfs4_xdr_opaque_data *ld_private = args->ld_private;
switch (ret) {
case -NFS4ERR_NOMATCHING_LAYOUT:
+ spin_lock(&inode->i_lock);
+ if (pnfs_layout_is_valid(lo) &&
+ nfs4_stateid_match_other(&args->stateid, &lo->plh_stateid))
+ pnfs_set_plh_return_info(lo, args->range.iomode, 0);
+ pnfs_clear_layoutreturn_waitbit(lo);
+ spin_unlock(&inode->i_lock);
break;
case 0:
if (res->lrs_present)
res_stateid = &res->stateid;
fallthrough;
default:
- arg_stateid = &args->stateid;
+ pnfs_layoutreturn_free_lsegs(lo, &args->stateid, &args->range,
+ res_stateid);
}
trace_nfs4_layoutreturn_on_close(args->inode, &args->stateid, ret);
- pnfs_layoutreturn_free_lsegs(lo, arg_stateid, &args->range,
- res_stateid);
if (ld_private && ld_private->ops && ld_private->ops->free)
ld_private->ops->free(ld_private);
pnfs_put_layout_hdr(lo);
@@ -2015,6 +2018,27 @@ lookup_again:
goto lookup_again;
}
+ /*
+ * Because we free lsegs when sending LAYOUTRETURN, we need to wait
+ * for LAYOUTRETURN.
+ */
+ if (test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags)) {
+ spin_unlock(&ino->i_lock);
+ dprintk("%s wait for layoutreturn\n", __func__);
+ lseg = ERR_PTR(pnfs_prepare_to_retry_layoutget(lo));
+ if (!IS_ERR(lseg)) {
+ pnfs_put_layout_hdr(lo);
+ dprintk("%s retrying\n", __func__);
+ trace_pnfs_update_layout(ino, pos, count, iomode, lo,
+ lseg,
+ PNFS_UPDATE_LAYOUT_RETRY);
+ goto lookup_again;
+ }
+ trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
+ PNFS_UPDATE_LAYOUT_RETURN);
+ goto out_put_layout_hdr;
+ }
+
lseg = pnfs_find_lseg(lo, &arg, strict_iomode);
if (lseg) {
trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
@@ -2067,28 +2091,6 @@ lookup_again:
nfs4_stateid_copy(&stateid, &lo->plh_stateid);
}
- /*
- * Because we free lsegs before sending LAYOUTRETURN, we need to wait
- * for LAYOUTRETURN even if first is true.
- */
- if (test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags)) {
- spin_unlock(&ino->i_lock);
- dprintk("%s wait for layoutreturn\n", __func__);
- lseg = ERR_PTR(pnfs_prepare_to_retry_layoutget(lo));
- if (!IS_ERR(lseg)) {
- if (first)
- pnfs_clear_first_layoutget(lo);
- pnfs_put_layout_hdr(lo);
- dprintk("%s retrying\n", __func__);
- trace_pnfs_update_layout(ino, pos, count, iomode, lo,
- lseg, PNFS_UPDATE_LAYOUT_RETRY);
- goto lookup_again;
- }
- trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
- PNFS_UPDATE_LAYOUT_RETURN);
- goto out_put_layout_hdr;
- }
-
if (pnfs_layoutgets_blocked(lo)) {
trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
PNFS_UPDATE_LAYOUT_BLOCKED);
@@ -2242,6 +2244,7 @@ static void _lgopen_prepare_attached(struct nfs4_opendata *data,
&rng, GFP_KERNEL);
if (!lgp) {
pnfs_clear_first_layoutget(lo);
+ nfs_layoutget_end(lo);
pnfs_put_layout_hdr(lo);
return;
}
diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h
index bbd3de1025f2..d810ae674f4e 100644
--- a/fs/nfs/pnfs.h
+++ b/fs/nfs/pnfs.h
@@ -297,10 +297,8 @@ bool pnfs_roc(struct inode *ino,
struct nfs4_layoutreturn_args *args,
struct nfs4_layoutreturn_res *res,
const struct cred *cred);
-int pnfs_roc_done(struct rpc_task *task, struct inode *inode,
- struct nfs4_layoutreturn_args **argpp,
- struct nfs4_layoutreturn_res **respp,
- int *ret);
+int pnfs_roc_done(struct rpc_task *task, struct nfs4_layoutreturn_args **argpp,
+ struct nfs4_layoutreturn_res **respp, int *ret);
void pnfs_roc_release(struct nfs4_layoutreturn_args *args,
struct nfs4_layoutreturn_res *res,
int ret);
@@ -772,7 +770,7 @@ pnfs_roc(struct inode *ino,
}
static inline int
-pnfs_roc_done(struct rpc_task *task, struct inode *inode,
+pnfs_roc_done(struct rpc_task *task,
struct nfs4_layoutreturn_args **argpp,
struct nfs4_layoutreturn_res **respp,
int *ret)
diff --git a/fs/nfs/pnfs_nfs.c b/fs/nfs/pnfs_nfs.c
index 2efcfdd348a1..49d3389bd813 100644
--- a/fs/nfs/pnfs_nfs.c
+++ b/fs/nfs/pnfs_nfs.c
@@ -78,22 +78,18 @@ void
pnfs_generic_clear_request_commit(struct nfs_page *req,
struct nfs_commit_info *cinfo)
{
- struct pnfs_layout_segment *freeme = NULL;
+ struct pnfs_commit_bucket *bucket = NULL;
if (!test_and_clear_bit(PG_COMMIT_TO_DS, &req->wb_flags))
goto out;
cinfo->ds->nwritten--;
- if (list_is_singular(&req->wb_list)) {
- struct pnfs_commit_bucket *bucket;
-
+ if (list_is_singular(&req->wb_list))
bucket = list_first_entry(&req->wb_list,
- struct pnfs_commit_bucket,
- written);
- freeme = pnfs_free_bucket_lseg(bucket);
- }
+ struct pnfs_commit_bucket, written);
out:
nfs_request_remove_commit_list(req, cinfo);
- pnfs_put_lseg(freeme);
+ if (bucket)
+ pnfs_put_lseg(pnfs_free_bucket_lseg(bucket));
}
EXPORT_SYMBOL_GPL(pnfs_generic_clear_request_commit);
@@ -407,12 +403,16 @@ pnfs_bucket_get_committing(struct list_head *head,
struct pnfs_commit_bucket *bucket,
struct nfs_commit_info *cinfo)
{
+ struct pnfs_layout_segment *lseg;
struct list_head *pos;
list_for_each(pos, &bucket->committing)
cinfo->ds->ncommitting--;
list_splice_init(&bucket->committing, head);
- return pnfs_free_bucket_lseg(bucket);
+ lseg = pnfs_free_bucket_lseg(bucket);
+ if (!lseg)
+ lseg = pnfs_get_lseg(bucket->lseg);
+ return lseg;
}
static struct nfs_commit_data *
@@ -424,8 +424,6 @@ pnfs_bucket_fetch_commitdata(struct pnfs_commit_bucket *bucket,
if (!data)
return NULL;
data->lseg = pnfs_bucket_get_committing(&data->pages, bucket, cinfo);
- if (!data->lseg)
- data->lseg = pnfs_get_lseg(bucket->lseg);
return data;
}
diff --git a/fs/nfsd/nfs3xdr.c b/fs/nfsd/nfs3xdr.c
index 821db21ba072..34b880211e5e 100644
--- a/fs/nfsd/nfs3xdr.c
+++ b/fs/nfsd/nfs3xdr.c
@@ -865,9 +865,14 @@ compose_entry_fh(struct nfsd3_readdirres *cd, struct svc_fh *fhp,
if (isdotent(name, namlen)) {
if (namlen == 2) {
dchild = dget_parent(dparent);
- /* filesystem root - cannot return filehandle for ".." */
+ /*
+ * Don't return filehandle for ".." if we're at
+ * the filesystem or export root:
+ */
if (dchild == dparent)
goto out;
+ if (dparent == exp->ex_path.dentry)
+ goto out;
} else
dchild = dget(dparent);
} else
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
index 4727b7f03c5b..8d6d2678abad 100644
--- a/fs/nfsd/nfs4proc.c
+++ b/fs/nfsd/nfs4proc.c
@@ -50,6 +50,11 @@
#include "pnfs.h"
#include "trace.h"
+static bool inter_copy_offload_enable;
+module_param(inter_copy_offload_enable, bool, 0644);
+MODULE_PARM_DESC(inter_copy_offload_enable,
+ "Enable inter server to server copy offload. Default: false");
+
#ifdef CONFIG_NFSD_V4_SECURITY_LABEL
#include <linux/security.h>
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index 45ee6b12ce5b..eaaa1605b5b5 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -147,6 +147,25 @@ svcxdr_dupstr(struct nfsd4_compoundargs *argp, void *buf, u32 len)
return p;
}
+static void *
+svcxdr_savemem(struct nfsd4_compoundargs *argp, __be32 *p, u32 len)
+{
+ __be32 *tmp;
+
+ /*
+ * The location of the decoded data item is stable,
+ * so @p is OK to use. This is the common case.
+ */
+ if (p != argp->xdr->scratch.iov_base)
+ return p;
+
+ tmp = svcxdr_tmpalloc(argp, len);
+ if (!tmp)
+ return NULL;
+ memcpy(tmp, p, len);
+ return tmp;
+}
+
/*
* NFSv4 basic data type decoders
*/
@@ -183,11 +202,10 @@ nfsd4_decode_opaque(struct nfsd4_compoundargs *argp, struct xdr_netobj *o)
p = xdr_inline_decode(argp->xdr, len);
if (!p)
return nfserr_bad_xdr;
- o->data = svcxdr_tmpalloc(argp, len);
+ o->data = svcxdr_savemem(argp, p, len);
if (!o->data)
return nfserr_jukebox;
o->len = len;
- memcpy(o->data, p, len);
return nfs_ok;
}
@@ -205,10 +223,9 @@ nfsd4_decode_component4(struct nfsd4_compoundargs *argp, char **namp, u32 *lenp)
status = check_filename((char *)p, *lenp);
if (status)
return status;
- *namp = svcxdr_tmpalloc(argp, *lenp);
+ *namp = svcxdr_savemem(argp, p, *lenp);
if (!*namp)
return nfserr_jukebox;
- memcpy(*namp, p, *lenp);
return nfs_ok;
}
@@ -1200,10 +1217,9 @@ nfsd4_decode_putfh(struct nfsd4_compoundargs *argp, struct nfsd4_putfh *putfh)
p = xdr_inline_decode(argp->xdr, putfh->pf_fhlen);
if (!p)
return nfserr_bad_xdr;
- putfh->pf_fhval = svcxdr_tmpalloc(argp, putfh->pf_fhlen);
+ putfh->pf_fhval = svcxdr_savemem(argp, p, putfh->pf_fhlen);
if (!putfh->pf_fhval)
return nfserr_jukebox;
- memcpy(putfh->pf_fhval, p, putfh->pf_fhlen);
return nfs_ok;
}
@@ -1318,24 +1334,20 @@ nfsd4_decode_setclientid(struct nfsd4_compoundargs *argp, struct nfsd4_setclient
p = xdr_inline_decode(argp->xdr, setclientid->se_callback_netid_len);
if (!p)
return nfserr_bad_xdr;
- setclientid->se_callback_netid_val = svcxdr_tmpalloc(argp,
+ setclientid->se_callback_netid_val = svcxdr_savemem(argp, p,
setclientid->se_callback_netid_len);
if (!setclientid->se_callback_netid_val)
return nfserr_jukebox;
- memcpy(setclientid->se_callback_netid_val, p,
- setclientid->se_callback_netid_len);
if (xdr_stream_decode_u32(argp->xdr, &setclientid->se_callback_addr_len) < 0)
return nfserr_bad_xdr;
p = xdr_inline_decode(argp->xdr, setclientid->se_callback_addr_len);
if (!p)
return nfserr_bad_xdr;
- setclientid->se_callback_addr_val = svcxdr_tmpalloc(argp,
+ setclientid->se_callback_addr_val = svcxdr_savemem(argp, p,
setclientid->se_callback_addr_len);
if (!setclientid->se_callback_addr_val)
return nfserr_jukebox;
- memcpy(setclientid->se_callback_addr_val, p,
- setclientid->se_callback_addr_len);
if (xdr_stream_decode_u32(argp->xdr, &setclientid->se_callback_ident) < 0)
return nfserr_bad_xdr;
@@ -1375,10 +1387,9 @@ nfsd4_decode_verify(struct nfsd4_compoundargs *argp, struct nfsd4_verify *verify
p = xdr_inline_decode(argp->xdr, verify->ve_attrlen);
if (!p)
return nfserr_bad_xdr;
- verify->ve_attrval = svcxdr_tmpalloc(argp, verify->ve_attrlen);
+ verify->ve_attrval = svcxdr_savemem(argp, p, verify->ve_attrlen);
if (!verify->ve_attrval)
return nfserr_jukebox;
- memcpy(verify->ve_attrval, p, verify->ve_attrlen);
return nfs_ok;
}
@@ -2333,10 +2344,9 @@ nfsd4_decode_compound(struct nfsd4_compoundargs *argp)
p = xdr_inline_decode(argp->xdr, argp->taglen);
if (!p)
return 0;
- argp->tag = svcxdr_tmpalloc(argp, argp->taglen);
+ argp->tag = svcxdr_savemem(argp, p, argp->taglen);
if (!argp->tag)
return 0;
- memcpy(argp->tag, p, argp->taglen);
max_reply += xdr_align_size(argp->taglen);
}
@@ -4756,6 +4766,7 @@ nfsd4_encode_read_plus_data(struct nfsd4_compoundres *resp,
resp->rqstp->rq_vec, read->rd_vlen, maxcount, eof);
if (nfserr)
return nfserr;
+ xdr_truncate_encode(xdr, starting_len + 16 + xdr_align_size(*maxcount));
tmp = htonl(NFS4_CONTENT_DATA);
write_bytes_to_xdr_buf(xdr->buf, starting_len, &tmp, 4);
@@ -4763,6 +4774,10 @@ nfsd4_encode_read_plus_data(struct nfsd4_compoundres *resp,
write_bytes_to_xdr_buf(xdr->buf, starting_len + 4, &tmp64, 8);
tmp = htonl(*maxcount);
write_bytes_to_xdr_buf(xdr->buf, starting_len + 12, &tmp, 4);
+
+ tmp = xdr_zero;
+ write_bytes_to_xdr_buf(xdr->buf, starting_len + 16 + *maxcount, &tmp,
+ xdr_pad_size(*maxcount));
return nfs_ok;
}
@@ -4855,14 +4870,15 @@ out:
if (nfserr && segments == 0)
xdr_truncate_encode(xdr, starting_len);
else {
- tmp = htonl(eof);
- write_bytes_to_xdr_buf(xdr->buf, starting_len, &tmp, 4);
- tmp = htonl(segments);
- write_bytes_to_xdr_buf(xdr->buf, starting_len + 4, &tmp, 4);
if (nfserr) {
xdr_truncate_encode(xdr, last_segment);
nfserr = nfs_ok;
+ eof = 0;
}
+ tmp = htonl(eof);
+ write_bytes_to_xdr_buf(xdr->buf, starting_len, &tmp, 4);
+ tmp = htonl(segments);
+ write_bytes_to_xdr_buf(xdr->buf, starting_len + 4, &tmp, 4);
}
return nfserr;
diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
index 00384c332f9b..f9c9f4c63cc7 100644
--- a/fs/nfsd/nfssvc.c
+++ b/fs/nfsd/nfssvc.c
@@ -33,12 +33,6 @@
#define NFSDDBG_FACILITY NFSDDBG_SVC
-bool inter_copy_offload_enable;
-EXPORT_SYMBOL_GPL(inter_copy_offload_enable);
-module_param(inter_copy_offload_enable, bool, 0644);
-MODULE_PARM_DESC(inter_copy_offload_enable,
- "Enable inter server to server copy offload. Default: false");
-
extern struct svc_program nfsd_program;
static int nfsd(void *vrqstp);
#if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL)
diff --git a/fs/nfsd/xdr4.h b/fs/nfsd/xdr4.h
index a60ff5ce1a37..c300885ae75d 100644
--- a/fs/nfsd/xdr4.h
+++ b/fs/nfsd/xdr4.h
@@ -568,7 +568,6 @@ struct nfsd4_copy {
struct nfs_fh c_fh;
nfs4_stateid stateid;
};
-extern bool inter_copy_offload_enable;
struct nfsd4_seek {
/* request */
diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
index 3e01d8f2ab90..dcab112e1f00 100644
--- a/fs/notify/fanotify/fanotify_user.c
+++ b/fs/notify/fanotify/fanotify_user.c
@@ -1285,26 +1285,23 @@ fput_and_out:
return ret;
}
+#ifndef CONFIG_ARCH_SPLIT_ARG64
SYSCALL_DEFINE5(fanotify_mark, int, fanotify_fd, unsigned int, flags,
__u64, mask, int, dfd,
const char __user *, pathname)
{
return do_fanotify_mark(fanotify_fd, flags, mask, dfd, pathname);
}
+#endif
-#ifdef CONFIG_COMPAT
-COMPAT_SYSCALL_DEFINE6(fanotify_mark,
+#if defined(CONFIG_ARCH_SPLIT_ARG64) || defined(CONFIG_COMPAT)
+SYSCALL32_DEFINE6(fanotify_mark,
int, fanotify_fd, unsigned int, flags,
- __u32, mask0, __u32, mask1, int, dfd,
+ SC_ARG64(mask), int, dfd,
const char __user *, pathname)
{
- return do_fanotify_mark(fanotify_fd, flags,
-#ifdef __BIG_ENDIAN
- ((__u64)mask0 << 32) | mask1,
-#else
- ((__u64)mask1 << 32) | mask0,
-#endif
- dfd, pathname);
+ return do_fanotify_mark(fanotify_fd, flags, SC_VAL64(__u64, mask),
+ dfd, pathname);
}
#endif
diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
index 317899222d7f..d2018f70d1fa 100644
--- a/fs/proc/proc_sysctl.c
+++ b/fs/proc/proc_sysctl.c
@@ -1770,6 +1770,12 @@ static int process_sysctl_arg(char *param, char *val,
return 0;
}
+ if (!val)
+ return -EINVAL;
+ len = strlen(val);
+ if (len == 0)
+ return -EINVAL;
+
/*
* To set sysctl options, we use a temporary mount of proc, look up the
* respective sys/ file and write to it. To avoid mounting it when no
@@ -1811,7 +1817,6 @@ static int process_sysctl_arg(char *param, char *val,
file, param, val);
goto out;
}
- len = strlen(val);
wret = kernel_write(file, val, len, &pos);
if (wret < 0) {
err = wret;
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index ee5a235b3056..602e3a52884d 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -1035,6 +1035,25 @@ struct clear_refs_private {
};
#ifdef CONFIG_MEM_SOFT_DIRTY
+
+#define is_cow_mapping(flags) (((flags) & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE)
+
+static inline bool pte_is_pinned(struct vm_area_struct *vma, unsigned long addr, pte_t pte)
+{
+ struct page *page;
+
+ if (!pte_write(pte))
+ return false;
+ if (!is_cow_mapping(vma->vm_flags))
+ return false;
+ if (likely(!atomic_read(&vma->vm_mm->has_pinned)))
+ return false;
+ page = vm_normal_page(vma, addr, pte);
+ if (!page)
+ return false;
+ return page_maybe_dma_pinned(page);
+}
+
static inline void clear_soft_dirty(struct vm_area_struct *vma,
unsigned long addr, pte_t *pte)
{
@@ -1049,6 +1068,8 @@ static inline void clear_soft_dirty(struct vm_area_struct *vma,
if (pte_present(ptent)) {
pte_t old_pte;
+ if (pte_is_pinned(vma, addr, ptent))
+ return;
old_pte = ptep_modify_prot_start(vma, addr, pte);
ptent = pte_wrprotect(old_pte);
ptent = pte_clear_soft_dirty(ptent);
@@ -1215,41 +1236,26 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
.type = type,
};
+ if (mmap_write_lock_killable(mm)) {
+ count = -EINTR;
+ goto out_mm;
+ }
if (type == CLEAR_REFS_MM_HIWATER_RSS) {
- if (mmap_write_lock_killable(mm)) {
- count = -EINTR;
- goto out_mm;
- }
-
/*
* Writing 5 to /proc/pid/clear_refs resets the peak
* resident set size to this mm's current rss value.
*/
reset_mm_hiwater_rss(mm);
- mmap_write_unlock(mm);
- goto out_mm;
+ goto out_unlock;
}
- if (mmap_read_lock_killable(mm)) {
- count = -EINTR;
- goto out_mm;
- }
tlb_gather_mmu(&tlb, mm, 0, -1);
if (type == CLEAR_REFS_SOFT_DIRTY) {
for (vma = mm->mmap; vma; vma = vma->vm_next) {
if (!(vma->vm_flags & VM_SOFTDIRTY))
continue;
- mmap_read_unlock(mm);
- if (mmap_write_lock_killable(mm)) {
- count = -EINTR;
- goto out_mm;
- }
- for (vma = mm->mmap; vma; vma = vma->vm_next) {
- vma->vm_flags &= ~VM_SOFTDIRTY;
- vma_set_page_prot(vma);
- }
- mmap_write_downgrade(mm);
- break;
+ vma->vm_flags &= ~VM_SOFTDIRTY;
+ vma_set_page_prot(vma);
}
mmu_notifier_range_init(&range, MMU_NOTIFY_SOFT_DIRTY,
@@ -1261,7 +1267,8 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
if (type == CLEAR_REFS_SOFT_DIRTY)
mmu_notifier_invalidate_range_end(&range);
tlb_finish_mmu(&tlb, 0, -1);
- mmap_read_unlock(mm);
+out_unlock:
+ mmap_write_unlock(mm);
out_mm:
mmput(mm);
}
diff --git a/fs/select.c b/fs/select.c
index ebfebdfe5c69..37aaa8317f3a 100644
--- a/fs/select.c
+++ b/fs/select.c
@@ -1011,14 +1011,17 @@ static int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
fdcount = do_poll(head, &table, end_time);
poll_freewait(&table);
+ if (!user_write_access_begin(ufds, nfds * sizeof(*ufds)))
+ goto out_fds;
+
for (walk = head; walk; walk = walk->next) {
struct pollfd *fds = walk->entries;
int j;
- for (j = 0; j < walk->len; j++, ufds++)
- if (__put_user(fds[j].revents, &ufds->revents))
- goto out_fds;
+ for (j = walk->len; j; fds++, ufds++, j--)
+ unsafe_put_user(fds->revents, &ufds->revents, Efault);
}
+ user_write_access_end();
err = fdcount;
out_fds:
@@ -1030,6 +1033,11 @@ out_fds:
}
return err;
+
+Efault:
+ user_write_access_end();
+ err = -EFAULT;
+ goto out_fds;
}
static long do_restart_poll(struct restart_block *restart_block)
diff --git a/fs/udf/super.c b/fs/udf/super.c
index 5bef3a68395d..d0df217f4712 100644
--- a/fs/udf/super.c
+++ b/fs/udf/super.c
@@ -705,6 +705,7 @@ static int udf_check_vsd(struct super_block *sb)
struct buffer_head *bh = NULL;
int nsr = 0;
struct udf_sb_info *sbi;
+ loff_t session_offset;
sbi = UDF_SB(sb);
if (sb->s_blocksize < sizeof(struct volStructDesc))
@@ -712,7 +713,8 @@ static int udf_check_vsd(struct super_block *sb)
else
sectorsize = sb->s_blocksize;
- sector += (((loff_t)sbi->s_session) << sb->s_blocksize_bits);
+ session_offset = (loff_t)sbi->s_session << sb->s_blocksize_bits;
+ sector += session_offset;
udf_debug("Starting at sector %u (%lu byte sectors)\n",
(unsigned int)(sector >> sb->s_blocksize_bits),
@@ -757,8 +759,7 @@ static int udf_check_vsd(struct super_block *sb)
if (nsr > 0)
return 1;
- else if (!bh && sector - (sbi->s_session << sb->s_blocksize_bits) ==
- VSD_FIRST_SECTOR_OFFSET)
+ else if (!bh && sector - session_offset == VSD_FIRST_SECTOR_OFFSET)
return -1;
else
return 0;
diff --git a/fs/zonefs/Kconfig b/fs/zonefs/Kconfig
index ef2697b78820..827278f937fe 100644
--- a/fs/zonefs/Kconfig
+++ b/fs/zonefs/Kconfig
@@ -3,6 +3,7 @@ config ZONEFS_FS
depends on BLOCK
depends on BLK_DEV_ZONED
select FS_IOMAP
+ select CRC32
help
zonefs is a simple file system which exposes zones of a zoned block
device (e.g. host-managed or host-aware SMR disk drives) as files.
diff --git a/include/asm-generic/Kbuild b/include/asm-generic/Kbuild
index 4365b9aa3e3f..267f6dfb8960 100644
--- a/include/asm-generic/Kbuild
+++ b/include/asm-generic/Kbuild
@@ -34,6 +34,7 @@ mandatory-y += kmap_size.h
mandatory-y += kprobes.h
mandatory-y += linkage.h
mandatory-y += local.h
+mandatory-y += local64.h
mandatory-y += mm-arch-hooks.h
mandatory-y += mmiowb.h
mandatory-y += mmu.h
diff --git a/include/asm-generic/bitops/atomic.h b/include/asm-generic/bitops/atomic.h
index dd90c9792909..0e7316a86240 100644
--- a/include/asm-generic/bitops/atomic.h
+++ b/include/asm-generic/bitops/atomic.h
@@ -11,19 +11,19 @@
* See Documentation/atomic_bitops.txt for details.
*/
-static inline void set_bit(unsigned int nr, volatile unsigned long *p)
+static __always_inline void set_bit(unsigned int nr, volatile unsigned long *p)
{
p += BIT_WORD(nr);
atomic_long_or(BIT_MASK(nr), (atomic_long_t *)p);
}
-static inline void clear_bit(unsigned int nr, volatile unsigned long *p)
+static __always_inline void clear_bit(unsigned int nr, volatile unsigned long *p)
{
p += BIT_WORD(nr);
atomic_long_andnot(BIT_MASK(nr), (atomic_long_t *)p);
}
-static inline void change_bit(unsigned int nr, volatile unsigned long *p)
+static __always_inline void change_bit(unsigned int nr, volatile unsigned long *p)
{
p += BIT_WORD(nr);
atomic_long_xor(BIT_MASK(nr), (atomic_long_t *)p);
diff --git a/include/drm/drm_agpsupport.h b/include/drm/drm_agpsupport.h
index 664e120b93e6..f3136750c490 100644
--- a/include/drm/drm_agpsupport.h
+++ b/include/drm/drm_agpsupport.h
@@ -28,10 +28,6 @@ struct drm_agp_head {
#if IS_ENABLED(CONFIG_AGP)
-void drm_free_agp(struct agp_memory * handle, int pages);
-int drm_bind_agp(struct agp_memory * handle, unsigned int start);
-int drm_unbind_agp(struct agp_memory * handle);
-
struct drm_agp_head *drm_agp_init(struct drm_device *dev);
void drm_legacy_agp_clear(struct drm_device *dev);
int drm_agp_acquire(struct drm_device *dev);
@@ -61,20 +57,6 @@ int drm_agp_bind_ioctl(struct drm_device *dev, void *data,
#else /* CONFIG_AGP */
-static inline void drm_free_agp(struct agp_memory * handle, int pages)
-{
-}
-
-static inline int drm_bind_agp(struct agp_memory * handle, unsigned int start)
-{
- return -ENODEV;
-}
-
-static inline int drm_unbind_agp(struct agp_memory * handle)
-{
- return -ENODEV;
-}
-
static inline struct drm_agp_head *drm_agp_init(struct drm_device *dev)
{
return NULL;
diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h
index 54e051a957df..ce7023e9115d 100644
--- a/include/drm/drm_atomic.h
+++ b/include/drm/drm_atomic.h
@@ -248,6 +248,26 @@ struct drm_private_state_funcs {
* drm_dev_register()
* 2/ all calls to drm_atomic_private_obj_fini() must be done after calling
* drm_dev_unregister()
+ *
+ * If that private object is used to store a state shared by multiple
+ * CRTCs, proper care must be taken to ensure that non-blocking commits are
+ * properly ordered to avoid a use-after-free issue.
+ *
+ * Indeed, assuming a sequence of two non-blocking &drm_atomic_commit on two
+ * different &drm_crtc using different &drm_plane and &drm_connector, so with no
+ * resources shared, there's no guarantee on which commit is going to happen
+ * first. However, the second &drm_atomic_commit will consider the first
+ * &drm_private_obj its old state, and will be in charge of freeing it whenever
+ * the second &drm_atomic_commit is done.
+ *
+ * If the first &drm_atomic_commit happens after it, it will consider its
+ * &drm_private_obj the new state and will be likely to access it, resulting in
+ * an access to a freed memory region. Drivers should store (and get a reference
+ * to) the &drm_crtc_commit structure in our private state in
+ * &drm_mode_config_helper_funcs.atomic_commit_setup, and then wait for that
+ * commit to complete as the first step of
+ * &drm_mode_config_helper_funcs.atomic_commit_tail, similar to
+ * drm_atomic_helper_wait_for_dependencies().
*/
struct drm_private_obj {
/**
diff --git a/include/drm/drm_atomic_helper.h b/include/drm/drm_atomic_helper.h
index 5f47720440fa..4045e2507e11 100644
--- a/include/drm/drm_atomic_helper.h
+++ b/include/drm/drm_atomic_helper.h
@@ -147,10 +147,6 @@ int drm_atomic_helper_page_flip_target(
uint32_t flags,
uint32_t target,
struct drm_modeset_acquire_ctx *ctx);
-int drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc,
- u16 *red, u16 *green, u16 *blue,
- uint32_t size,
- struct drm_modeset_acquire_ctx *ctx);
/**
* drm_atomic_crtc_for_each_plane - iterate over planes currently attached to CRTC
diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h
index fcdc58d8b88b..1922b278ffad 100644
--- a/include/drm/drm_connector.h
+++ b/include/drm/drm_connector.h
@@ -175,6 +175,46 @@ struct drm_scdc {
struct drm_scrambling scrambling;
};
+/**
+ * struct drm_hdmi_dsc_cap - DSC capabilities of HDMI sink
+ *
+ * Describes the DSC support provided by HDMI 2.1 sink.
+ * The information is fetched fom additional HFVSDB blocks defined
+ * for HDMI 2.1.
+ */
+struct drm_hdmi_dsc_cap {
+ /** @v_1p2: flag for dsc1.2 version support by sink */
+ bool v_1p2;
+
+ /** @native_420: Does sink support DSC with 4:2:0 compression */
+ bool native_420;
+
+ /**
+ * @all_bpp: Does sink support all bpp with 4:4:4: or 4:2:2
+ * compressed formats
+ */
+ bool all_bpp;
+
+ /**
+ * @bpc_supported: compressed bpc supported by sink : 10, 12 or 16 bpc
+ */
+ u8 bpc_supported;
+
+ /** @max_slices: maximum number of Horizontal slices supported by */
+ u8 max_slices;
+
+ /** @clk_per_slice : max pixel clock in MHz supported per slice */
+ int clk_per_slice;
+
+ /** @max_lanes : dsc max lanes supported for Fixed rate Link training */
+ u8 max_lanes;
+
+ /** @max_frl_rate_per_lane : maximum frl rate with DSC per lane */
+ u8 max_frl_rate_per_lane;
+
+ /** @total_chunk_kbytes: max size of chunks in KBs supported per line*/
+ u8 total_chunk_kbytes;
+};
/**
* struct drm_hdmi_info - runtime information about the connected HDMI sink
@@ -207,6 +247,15 @@ struct drm_hdmi_info {
/** @y420_dc_modes: bitmap of deep color support index */
u8 y420_dc_modes;
+
+ /** @max_frl_rate_per_lane: support fixed rate link */
+ u8 max_frl_rate_per_lane;
+
+ /** @max_lanes: supported by sink */
+ u8 max_lanes;
+
+ /** @dsc_cap: DSC capabilities of the sink */
+ struct drm_hdmi_dsc_cap dsc_cap;
};
/**
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index 5f43d64d2a07..13eeba2a750a 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -1223,6 +1223,39 @@ int drm_crtc_init_with_planes(struct drm_device *dev,
const char *name, ...);
void drm_crtc_cleanup(struct drm_crtc *crtc);
+__printf(7, 8)
+void *__drmm_crtc_alloc_with_planes(struct drm_device *dev,
+ size_t size, size_t offset,
+ struct drm_plane *primary,
+ struct drm_plane *cursor,
+ const struct drm_crtc_funcs *funcs,
+ const char *name, ...);
+
+/**
+ * drmm_crtc_alloc_with_planes - Allocate and initialize a new CRTC object with
+ * specified primary and cursor planes.
+ * @dev: DRM device
+ * @type: the type of the struct which contains struct &drm_crtc
+ * @member: the name of the &drm_crtc within @type.
+ * @primary: Primary plane for CRTC
+ * @cursor: Cursor plane for CRTC
+ * @funcs: callbacks for the new CRTC
+ * @name: printf style format string for the CRTC name, or NULL for default name
+ *
+ * Allocates and initializes a new crtc object. Cleanup is automatically
+ * handled through registering drmm_crtc_cleanup() with drmm_add_action().
+ *
+ * The @drm_crtc_funcs.destroy hook must be NULL.
+ *
+ * Returns:
+ * Pointer to new crtc, or ERR_PTR on failure.
+ */
+#define drmm_crtc_alloc_with_planes(dev, type, member, primary, cursor, funcs, name, ...) \
+ ((type *)__drmm_crtc_alloc_with_planes(dev, sizeof(type), \
+ offsetof(type, member), \
+ primary, cursor, funcs, \
+ name, ##__VA_ARGS__))
+
/**
* drm_crtc_index - find the index of a registered CRTC
* @crtc: CRTC to find index for
diff --git a/include/drm/drm_device.h b/include/drm/drm_device.h
index 283a93ce4617..d647223e8390 100644
--- a/include/drm/drm_device.h
+++ b/include/drm/drm_device.h
@@ -51,13 +51,6 @@ enum switch_power_state {
* may contain multiple heads.
*/
struct drm_device {
- /**
- * @legacy_dev_list:
- *
- * List of devices per driver for stealth attach cleanup
- */
- struct list_head legacy_dev_list;
-
/** @if_version: Highest interface version set */
int if_version;
@@ -83,11 +76,7 @@ struct drm_device {
} managed;
/** @driver: DRM driver managing the device */
-#ifdef CONFIG_DRM_LEGACY
- struct drm_driver *driver;
-#else
const struct drm_driver *driver;
-#endif
/**
* @dev_private:
@@ -293,10 +282,6 @@ struct drm_device {
/** @pdev: PCI device structure */
struct pci_dev *pdev;
-#ifdef __alpha__
- /** @hose: PCI hose, only used on ALPHA platforms. */
- struct pci_controller *hose;
-#endif
/** @num_crtcs: Number of CRTCs on this device */
unsigned int num_crtcs;
@@ -336,6 +321,14 @@ struct drm_device {
/* Everything below here is for legacy driver, never use! */
/* private: */
#if IS_ENABLED(CONFIG_DRM_LEGACY)
+ /* List of devices per driver for stealth attach cleanup */
+ struct list_head legacy_dev_list;
+
+#ifdef __alpha__
+ /** @hose: PCI hose, only used on ALPHA platforms. */
+ struct pci_controller *hose;
+#endif
+
/* Context handle management - linked list of context handles */
struct list_head ctxlist;
diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
index 6b40258927bf..edffd1dcca3e 100644
--- a/include/drm/drm_dp_helper.h
+++ b/include/drm/drm_dp_helper.h
@@ -411,6 +411,17 @@ struct drm_device;
# define DP_DS_10BPC 1
# define DP_DS_12BPC 2
# define DP_DS_16BPC 3
+/* HDMI2.1 PCON FRL CONFIGURATION */
+# define DP_PCON_MAX_FRL_BW (7 << 2)
+# define DP_PCON_MAX_0GBPS (0 << 2)
+# define DP_PCON_MAX_9GBPS (1 << 2)
+# define DP_PCON_MAX_18GBPS (2 << 2)
+# define DP_PCON_MAX_24GBPS (3 << 2)
+# define DP_PCON_MAX_32GBPS (4 << 2)
+# define DP_PCON_MAX_40GBPS (5 << 2)
+# define DP_PCON_MAX_48GBPS (6 << 2)
+# define DP_PCON_SOURCE_CTL_MODE (1 << 5)
+
/* offset 3 for DVI */
# define DP_DS_DVI_DUAL_LINK (1 << 1)
# define DP_DS_DVI_HIGH_COLOR_DEPTH (1 << 2)
@@ -421,6 +432,17 @@ struct drm_device;
# define DP_DS_HDMI_YCBCR444_TO_422_CONV (1 << 3)
# define DP_DS_HDMI_YCBCR444_TO_420_CONV (1 << 4)
+/*
+ * VESA DP-to-HDMI PCON Specification adds caps for colorspace
+ * conversion in DFP cap DPCD 83h. Sec6.1 Table-3.
+ * Based on the available support the source can enable
+ * color conversion by writing into PROTOCOL_COVERTER_CONTROL_2
+ * DPCD 3052h.
+ */
+# define DP_DS_HDMI_BT601_RGB_YCBCR_CONV (1 << 5)
+# define DP_DS_HDMI_BT709_RGB_YCBCR_CONV (1 << 6)
+# define DP_DS_HDMI_BT2020_RGB_YCBCR_CONV (1 << 7)
+
#define DP_MAX_DOWNSTREAM_PORTS 0x10
/* DP Forward error Correction Registers */
@@ -430,6 +452,84 @@ struct drm_device;
# define DP_FEC_CORR_BLK_ERROR_COUNT_CAP (1 << 2)
# define DP_FEC_BIT_ERROR_COUNT_CAP (1 << 3)
+/* DP-HDMI2.1 PCON DSC ENCODER SUPPORT */
+#define DP_PCON_DSC_ENCODER_CAP_SIZE 0xC /* 0x9E - 0x92 */
+#define DP_PCON_DSC_ENCODER 0x092
+# define DP_PCON_DSC_ENCODER_SUPPORTED (1 << 0)
+# define DP_PCON_DSC_PPS_ENC_OVERRIDE (1 << 1)
+
+/* DP-HDMI2.1 PCON DSC Version */
+#define DP_PCON_DSC_VERSION 0x093
+# define DP_PCON_DSC_MAJOR_MASK (0xF << 0)
+# define DP_PCON_DSC_MINOR_MASK (0xF << 4)
+# define DP_PCON_DSC_MAJOR_SHIFT 0
+# define DP_PCON_DSC_MINOR_SHIFT 4
+
+/* DP-HDMI2.1 PCON DSC RC Buffer block size */
+#define DP_PCON_DSC_RC_BUF_BLK_INFO 0x094
+# define DP_PCON_DSC_RC_BUF_BLK_SIZE (0x3 << 0)
+# define DP_PCON_DSC_RC_BUF_BLK_1KB 0
+# define DP_PCON_DSC_RC_BUF_BLK_4KB 1
+# define DP_PCON_DSC_RC_BUF_BLK_16KB 2
+# define DP_PCON_DSC_RC_BUF_BLK_64KB 3
+
+/* DP-HDMI2.1 PCON DSC RC Buffer size */
+#define DP_PCON_DSC_RC_BUF_SIZE 0x095
+
+/* DP-HDMI2.1 PCON DSC Slice capabilities-1 */
+#define DP_PCON_DSC_SLICE_CAP_1 0x096
+# define DP_PCON_DSC_1_PER_DSC_ENC (0x1 << 0)
+# define DP_PCON_DSC_2_PER_DSC_ENC (0x1 << 1)
+# define DP_PCON_DSC_4_PER_DSC_ENC (0x1 << 3)
+# define DP_PCON_DSC_6_PER_DSC_ENC (0x1 << 4)
+# define DP_PCON_DSC_8_PER_DSC_ENC (0x1 << 5)
+# define DP_PCON_DSC_10_PER_DSC_ENC (0x1 << 6)
+# define DP_PCON_DSC_12_PER_DSC_ENC (0x1 << 7)
+
+#define DP_PCON_DSC_BUF_BIT_DEPTH 0x097
+# define DP_PCON_DSC_BIT_DEPTH_MASK (0xF << 0)
+# define DP_PCON_DSC_DEPTH_9_BITS 0
+# define DP_PCON_DSC_DEPTH_10_BITS 1
+# define DP_PCON_DSC_DEPTH_11_BITS 2
+# define DP_PCON_DSC_DEPTH_12_BITS 3
+# define DP_PCON_DSC_DEPTH_13_BITS 4
+# define DP_PCON_DSC_DEPTH_14_BITS 5
+# define DP_PCON_DSC_DEPTH_15_BITS 6
+# define DP_PCON_DSC_DEPTH_16_BITS 7
+# define DP_PCON_DSC_DEPTH_8_BITS 8
+
+#define DP_PCON_DSC_BLOCK_PREDICTION 0x098
+# define DP_PCON_DSC_BLOCK_PRED_SUPPORT (0x1 << 0)
+
+#define DP_PCON_DSC_ENC_COLOR_FMT_CAP 0x099
+# define DP_PCON_DSC_ENC_RGB (0x1 << 0)
+# define DP_PCON_DSC_ENC_YUV444 (0x1 << 1)
+# define DP_PCON_DSC_ENC_YUV422_S (0x1 << 2)
+# define DP_PCON_DSC_ENC_YUV422_N (0x1 << 3)
+# define DP_PCON_DSC_ENC_YUV420_N (0x1 << 4)
+
+#define DP_PCON_DSC_ENC_COLOR_DEPTH_CAP 0x09A
+# define DP_PCON_DSC_ENC_8BPC (0x1 << 1)
+# define DP_PCON_DSC_ENC_10BPC (0x1 << 2)
+# define DP_PCON_DSC_ENC_12BPC (0x1 << 3)
+
+#define DP_PCON_DSC_MAX_SLICE_WIDTH 0x09B
+
+/* DP-HDMI2.1 PCON DSC Slice capabilities-2 */
+#define DP_PCON_DSC_SLICE_CAP_2 0x09C
+# define DP_PCON_DSC_16_PER_DSC_ENC (0x1 << 0)
+# define DP_PCON_DSC_20_PER_DSC_ENC (0x1 << 1)
+# define DP_PCON_DSC_24_PER_DSC_ENC (0x1 << 2)
+
+/* DP-HDMI2.1 PCON HDMI TX Encoder Bits/pixel increment */
+#define DP_PCON_DSC_BPP_INCR 0x09E
+# define DP_PCON_DSC_BPP_INCR_MASK (0x7 << 0)
+# define DP_PCON_DSC_ONE_16TH_BPP 0
+# define DP_PCON_DSC_ONE_8TH_BPP 1
+# define DP_PCON_DSC_ONE_4TH_BPP 2
+# define DP_PCON_DSC_ONE_HALF_BPP 3
+# define DP_PCON_DSC_ONE_BPP 4
+
/* DP Extended DSC Capabilities */
#define DP_DSC_BRANCH_OVERALL_THROUGHPUT_0 0x0a0 /* DP 1.4a SCR */
#define DP_DSC_BRANCH_OVERALL_THROUGHPUT_1 0x0a1
@@ -935,6 +1035,11 @@ struct drm_device;
# define DP_CEC_IRQ (1 << 2)
#define DP_LINK_SERVICE_IRQ_VECTOR_ESI0 0x2005 /* 1.2 */
+# define RX_CAP_CHANGED (1 << 0)
+# define LINK_STATUS_CHANGED (1 << 1)
+# define STREAM_STATUS_CHANGED (1 << 2)
+# define HDMI_LINK_STATUS_CHANGED (1 << 3)
+# define CONNECTED_OFF_ENTRY_REQUESTED (1 << 4)
#define DP_PSR_ERROR_STATUS 0x2006 /* XXX 1.2? */
# define DP_PSR_LINK_CRC_ERROR (1 << 0)
@@ -1054,6 +1159,51 @@ struct drm_device;
#define DP_CEC_TX_MESSAGE_BUFFER 0x3020
#define DP_CEC_MESSAGE_BUFFER_LENGTH 0x10
+/* PCON CONFIGURE-1 FRL FOR HDMI SINK */
+#define DP_PCON_HDMI_LINK_CONFIG_1 0x305A
+# define DP_PCON_ENABLE_MAX_FRL_BW (7 << 0)
+# define DP_PCON_ENABLE_MAX_BW_0GBPS 0
+# define DP_PCON_ENABLE_MAX_BW_9GBPS 1
+# define DP_PCON_ENABLE_MAX_BW_18GBPS 2
+# define DP_PCON_ENABLE_MAX_BW_24GBPS 3
+# define DP_PCON_ENABLE_MAX_BW_32GBPS 4
+# define DP_PCON_ENABLE_MAX_BW_40GBPS 5
+# define DP_PCON_ENABLE_MAX_BW_48GBPS 6
+# define DP_PCON_ENABLE_SOURCE_CTL_MODE (1 << 3)
+# define DP_PCON_ENABLE_CONCURRENT_LINK (1 << 4)
+# define DP_PCON_ENABLE_LINK_FRL_MODE (1 << 5)
+# define DP_PCON_ENABLE_HPD_READY (1 << 6)
+# define DP_PCON_ENABLE_HDMI_LINK (1 << 7)
+
+/* PCON CONFIGURE-2 FRL FOR HDMI SINK */
+#define DP_PCON_HDMI_LINK_CONFIG_2 0x305B
+# define DP_PCON_MAX_LINK_BW_MASK (0x3F << 0)
+# define DP_PCON_FRL_BW_MASK_9GBPS (1 << 0)
+# define DP_PCON_FRL_BW_MASK_18GBPS (1 << 1)
+# define DP_PCON_FRL_BW_MASK_24GBPS (1 << 2)
+# define DP_PCON_FRL_BW_MASK_32GBPS (1 << 3)
+# define DP_PCON_FRL_BW_MASK_40GBPS (1 << 4)
+# define DP_PCON_FRL_BW_MASK_48GBPS (1 << 5)
+# define DP_PCON_FRL_LINK_TRAIN_EXTENDED (1 << 6)
+
+/* PCON HDMI LINK STATUS */
+#define DP_PCON_HDMI_TX_LINK_STATUS 0x303B
+# define DP_PCON_HDMI_TX_LINK_ACTIVE (1 << 0)
+# define DP_PCON_FRL_READY (1 << 1)
+
+/* PCON HDMI POST FRL STATUS */
+#define DP_PCON_HDMI_POST_FRL_STATUS 0x3036
+# define DP_PCON_HDMI_LINK_MODE (1 << 0)
+# define DP_PCON_HDMI_MODE_TMDS 0
+# define DP_PCON_HDMI_MODE_FRL 1
+# define DP_PCON_HDMI_FRL_TRAINED_BW (0x3F << 1)
+# define DP_PCON_FRL_TRAINED_BW_9GBPS (1 << 1)
+# define DP_PCON_FRL_TRAINED_BW_18GBPS (1 << 2)
+# define DP_PCON_FRL_TRAINED_BW_24GBPS (1 << 3)
+# define DP_PCON_FRL_TRAINED_BW_32GBPS (1 << 4)
+# define DP_PCON_FRL_TRAINED_BW_40GBPS (1 << 5)
+# define DP_PCON_FRL_TRAINED_BW_48GBPS (1 << 6)
+
#define DP_PROTOCOL_CONVERTER_CONTROL_0 0x3050 /* DP 1.3 */
# define DP_HDMI_DVI_OUTPUT_CONFIG (1 << 0) /* DP 1.3 */
#define DP_PROTOCOL_CONVERTER_CONTROL_1 0x3051 /* DP 1.3 */
@@ -1063,6 +1213,48 @@ struct drm_device;
# define DP_HDMI_FORCE_SCRAMBLING (1 << 3) /* DP 1.4 */
#define DP_PROTOCOL_CONVERTER_CONTROL_2 0x3052 /* DP 1.3 */
# define DP_CONVERSION_TO_YCBCR422_ENABLE (1 << 0) /* DP 1.3 */
+# define DP_PCON_ENABLE_DSC_ENCODER (1 << 1)
+# define DP_PCON_ENCODER_PPS_OVERRIDE_MASK (0x3 << 2)
+# define DP_PCON_ENC_PPS_OVERRIDE_DISABLED 0
+# define DP_PCON_ENC_PPS_OVERRIDE_EN_PARAMS 1
+# define DP_PCON_ENC_PPS_OVERRIDE_EN_BUFFER 2
+# define DP_CONVERSION_RGB_YCBCR_MASK (7 << 4)
+# define DP_CONVERSION_BT601_RGB_YCBCR_ENABLE (1 << 4)
+# define DP_CONVERSION_BT709_RGB_YCBCR_ENABLE (1 << 5)
+# define DP_CONVERSION_BT2020_RGB_YCBCR_ENABLE (1 << 6)
+
+/* PCON Downstream HDMI ERROR Status per Lane */
+#define DP_PCON_HDMI_ERROR_STATUS_LN0 0x3037
+#define DP_PCON_HDMI_ERROR_STATUS_LN1 0x3038
+#define DP_PCON_HDMI_ERROR_STATUS_LN2 0x3039
+#define DP_PCON_HDMI_ERROR_STATUS_LN3 0x303A
+# define DP_PCON_HDMI_ERROR_COUNT_MASK (0x7 << 0)
+# define DP_PCON_HDMI_ERROR_COUNT_THREE_PLUS (1 << 0)
+# define DP_PCON_HDMI_ERROR_COUNT_TEN_PLUS (1 << 1)
+# define DP_PCON_HDMI_ERROR_COUNT_HUNDRED_PLUS (1 << 2)
+
+/* PCON HDMI CONFIG PPS Override Buffer
+ * Valid Offsets to be added to Base : 0-127
+ */
+#define DP_PCON_HDMI_PPS_OVERRIDE_BASE 0x3100
+
+/* PCON HDMI CONFIG PPS Override Parameter: Slice height
+ * Offset-0 8LSBs of the Slice height.
+ * Offset-1 8MSBs of the Slice height.
+ */
+#define DP_PCON_HDMI_PPS_OVRD_SLICE_HEIGHT 0x3180
+
+/* PCON HDMI CONFIG PPS Override Parameter: Slice width
+ * Offset-0 8LSBs of the Slice width.
+ * Offset-1 8MSBs of the Slice width.
+ */
+#define DP_PCON_HDMI_PPS_OVRD_SLICE_WIDTH 0x3182
+
+/* PCON HDMI CONFIG PPS Override Parameter: bits_per_pixel
+ * Offset-0 8LSBs of the bits_per_pixel.
+ * Offset-1 2MSBs of the bits_per_pixel.
+ */
+#define DP_PCON_HDMI_PPS_OVRD_BPP 0x3184
/* HDCP 1.3 and HDCP 2.2 */
#define DP_AUX_HDCP_BKSV 0x68000
@@ -1837,16 +2029,13 @@ struct drm_dp_desc {
int drm_dp_read_desc(struct drm_dp_aux *aux, struct drm_dp_desc *desc,
bool is_branch);
-u32 drm_dp_get_edid_quirks(const struct edid *edid);
/**
* enum drm_dp_quirk - Display Port sink/branch device specific quirks
*
* Display Port sink and branch devices in the wild have a variety of bugs, try
* to collect them here. The quirks are shared, but it's up to the drivers to
- * implement workarounds for them. Note that because some devices have
- * unreliable OUIDs, the EDID of sinks should also be checked for quirks using
- * drm_dp_get_edid_quirks().
+ * implement workarounds for them.
*/
enum drm_dp_quirk {
/**
@@ -1879,16 +2068,6 @@ enum drm_dp_quirk {
*/
DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD,
/**
- * @DP_QUIRK_FORCE_DPCD_BACKLIGHT:
- *
- * The device is telling the truth when it says that it uses DPCD
- * backlight controls, even if the system's firmware disagrees. This
- * quirk should be checked against both the ident and panel EDID.
- * When present, the driver should honor the DPCD backlight
- * capabilities advertised.
- */
- DP_QUIRK_FORCE_DPCD_BACKLIGHT,
- /**
* @DP_DPCD_QUIRK_CAN_DO_MAX_LINK_RATE_3_24_GBPS:
*
* The device supports a link rate of 3.24 Gbps (multiplier 0xc) despite
@@ -1900,16 +2079,14 @@ enum drm_dp_quirk {
/**
* drm_dp_has_quirk() - does the DP device have a specific quirk
* @desc: Device descriptor filled by drm_dp_read_desc()
- * @edid_quirks: Optional quirk bitmask filled by drm_dp_get_edid_quirks()
* @quirk: Quirk to query for
*
* Return true if DP device identified by @desc has @quirk.
*/
static inline bool
-drm_dp_has_quirk(const struct drm_dp_desc *desc, u32 edid_quirks,
- enum drm_dp_quirk quirk)
+drm_dp_has_quirk(const struct drm_dp_desc *desc, enum drm_dp_quirk quirk)
{
- return (desc->quirks | edid_quirks) & BIT(quirk);
+ return desc->quirks & BIT(quirk);
}
#ifdef CONFIG_DRM_DP_CEC
@@ -1967,4 +2144,30 @@ int drm_dp_get_phy_test_pattern(struct drm_dp_aux *aux,
struct drm_dp_phy_test_params *data);
int drm_dp_set_phy_test_pattern(struct drm_dp_aux *aux,
struct drm_dp_phy_test_params *data, u8 dp_rev);
+int drm_dp_get_pcon_max_frl_bw(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
+ const u8 port_cap[4]);
+int drm_dp_pcon_frl_prepare(struct drm_dp_aux *aux, bool enable_frl_ready_hpd);
+bool drm_dp_pcon_is_frl_ready(struct drm_dp_aux *aux);
+int drm_dp_pcon_frl_configure_1(struct drm_dp_aux *aux, int max_frl_gbps,
+ bool concurrent_mode);
+int drm_dp_pcon_frl_configure_2(struct drm_dp_aux *aux, int max_frl_mask,
+ bool extended_train_mode);
+int drm_dp_pcon_reset_frl_config(struct drm_dp_aux *aux);
+int drm_dp_pcon_frl_enable(struct drm_dp_aux *aux);
+
+bool drm_dp_pcon_hdmi_link_active(struct drm_dp_aux *aux);
+int drm_dp_pcon_hdmi_link_mode(struct drm_dp_aux *aux, u8 *frl_trained_mask);
+void drm_dp_pcon_hdmi_frl_link_error_count(struct drm_dp_aux *aux,
+ struct drm_connector *connector);
+bool drm_dp_pcon_enc_is_dsc_1_2(const u8 pcon_dsc_dpcd[DP_PCON_DSC_ENCODER_CAP_SIZE]);
+int drm_dp_pcon_dsc_max_slices(const u8 pcon_dsc_dpcd[DP_PCON_DSC_ENCODER_CAP_SIZE]);
+int drm_dp_pcon_dsc_max_slice_width(const u8 pcon_dsc_dpcd[DP_PCON_DSC_ENCODER_CAP_SIZE]);
+int drm_dp_pcon_dsc_bpp_incr(const u8 pcon_dsc_dpcd[DP_PCON_DSC_ENCODER_CAP_SIZE]);
+int drm_dp_pcon_pps_default(struct drm_dp_aux *aux);
+int drm_dp_pcon_pps_override_buf(struct drm_dp_aux *aux, u8 pps_buf[128]);
+int drm_dp_pcon_pps_override_param(struct drm_dp_aux *aux, u8 pps_param[6]);
+bool drm_dp_downstream_rgb_to_ycbcr_conversion(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
+ const u8 port_cap[4], u8 color_spc);
+int drm_dp_pcon_convert_rgb_to_ycbcr(struct drm_dp_aux *aux, u8 color_spc);
+
#endif /* _DRM_DP_HELPER_H_ */
diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h
index f5e92fe9151c..bd1c39907b92 100644
--- a/include/drm/drm_dp_mst_helper.h
+++ b/include/drm/drm_dp_mst_helper.h
@@ -783,6 +783,7 @@ drm_dp_mst_detect_port(struct drm_connector *connector,
struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port);
+int drm_dp_get_vc_payload_bw(int link_rate, int link_lane_count);
int drm_dp_calc_pbn_mode(int clock, int bpp, bool dsc);
diff --git a/include/drm/drm_drv.h b/include/drm/drm_drv.h
index 02787319246a..827838e0a97e 100644
--- a/include/drm/drm_drv.h
+++ b/include/drm/drm_drv.h
@@ -499,8 +499,6 @@ struct drm_driver {
/* Everything below here is for legacy driver, never use! */
/* private: */
- /* List of devices hanging off this driver with stealth attach. */
- struct list_head legacy_dev_list;
int (*firstopen) (struct drm_device *);
void (*preclose) (struct drm_device *, struct drm_file *file_priv);
int (*dma_ioctl) (struct drm_device *dev, void *data, struct drm_file *file_priv);
diff --git a/include/drm/drm_dsc.h b/include/drm/drm_dsc.h
index 53c51231b31c..cf43561e60fa 100644
--- a/include/drm/drm_dsc.h
+++ b/include/drm/drm_dsc.h
@@ -603,6 +603,7 @@ struct drm_dsc_pps_infoframe {
} __packed;
void drm_dsc_dp_pps_header_init(struct dp_sdp_header *pps_header);
+int drm_dsc_dp_rc_buffer_size(u8 rc_buffer_block_size, u8 rc_buffer_size);
void drm_dsc_pps_payload_pack(struct drm_dsc_picture_parameter_set *pps_sdp,
const struct drm_dsc_config *dsc_cfg);
int drm_dsc_compute_rc_parameters(struct drm_dsc_config *vdsc_cfg);
diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h
index e97daf6ffbb1..a158f585f658 100644
--- a/include/drm/drm_edid.h
+++ b/include/drm/drm_edid.h
@@ -229,6 +229,36 @@ struct detailed_timing {
DRM_EDID_YCBCR420_DC_36 | \
DRM_EDID_YCBCR420_DC_30)
+/* HDMI 2.1 additional fields */
+#define DRM_EDID_MAX_FRL_RATE_MASK 0xf0
+#define DRM_EDID_FAPA_START_LOCATION (1 << 0)
+#define DRM_EDID_ALLM (1 << 1)
+#define DRM_EDID_FVA (1 << 2)
+
+/* Deep Color specific */
+#define DRM_EDID_DC_30BIT_420 (1 << 0)
+#define DRM_EDID_DC_36BIT_420 (1 << 1)
+#define DRM_EDID_DC_48BIT_420 (1 << 2)
+
+/* VRR specific */
+#define DRM_EDID_CNMVRR (1 << 3)
+#define DRM_EDID_CINEMA_VRR (1 << 4)
+#define DRM_EDID_MDELTA (1 << 5)
+#define DRM_EDID_VRR_MAX_UPPER_MASK 0xc0
+#define DRM_EDID_VRR_MAX_LOWER_MASK 0xff
+#define DRM_EDID_VRR_MIN_MASK 0x3f
+
+/* DSC specific */
+#define DRM_EDID_DSC_10BPC (1 << 0)
+#define DRM_EDID_DSC_12BPC (1 << 1)
+#define DRM_EDID_DSC_16BPC (1 << 2)
+#define DRM_EDID_DSC_ALL_BPP (1 << 3)
+#define DRM_EDID_DSC_NATIVE_420 (1 << 6)
+#define DRM_EDID_DSC_1P2 (1 << 7)
+#define DRM_EDID_DSC_MAX_FRL_RATE_MASK 0xf0
+#define DRM_EDID_DSC_MAX_SLICES 0xf
+#define DRM_EDID_DSC_TOTAL_CHUNK_KBYTES 0x3f
+
/* ELD Header Block */
#define DRM_ELD_HEADER_BLOCK_SIZE 4
diff --git a/include/drm/drm_encoder.h b/include/drm/drm_encoder.h
index 5dfa5f7a80a7..5bf78b5bcb2b 100644
--- a/include/drm/drm_encoder.h
+++ b/include/drm/drm_encoder.h
@@ -89,7 +89,7 @@ struct drm_encoder_funcs {
* @head: list management
* @base: base KMS object
* @name: human readable name, can be overwritten by the driver
- * @funcs: control functions
+ * @funcs: control functions, can be NULL for simple managed encoders
* @helper_private: mid-layer private data
*
* CRTCs drive pixels to encoders, which convert them into signals
@@ -194,6 +194,36 @@ int drm_encoder_init(struct drm_device *dev,
const struct drm_encoder_funcs *funcs,
int encoder_type, const char *name, ...);
+__printf(6, 7)
+void *__drmm_encoder_alloc(struct drm_device *dev,
+ size_t size, size_t offset,
+ const struct drm_encoder_funcs *funcs,
+ int encoder_type,
+ const char *name, ...);
+
+/**
+ * drmm_encoder_alloc - Allocate and initialize an encoder
+ * @dev: drm device
+ * @type: the type of the struct which contains struct &drm_encoder
+ * @member: the name of the &drm_encoder within @type
+ * @funcs: callbacks for this encoder (optional)
+ * @encoder_type: user visible type of the encoder
+ * @name: printf style format string for the encoder name, or NULL for default name
+ *
+ * Allocates and initializes an encoder. Encoder should be subclassed as part of
+ * driver encoder objects. Cleanup is automatically handled through registering
+ * drm_encoder_cleanup() with drmm_add_action().
+ *
+ * The @drm_encoder_funcs.destroy hook must be NULL.
+ *
+ * Returns:
+ * Pointer to new encoder, or ERR_PTR on failure.
+ */
+#define drmm_encoder_alloc(dev, type, member, funcs, encoder_type, name, ...) \
+ ((type *)__drmm_encoder_alloc(dev, sizeof(type), \
+ offsetof(type, member), funcs, \
+ encoder_type, name, ##__VA_ARGS__))
+
/**
* drm_encoder_index - find the index of a registered encoder
* @encoder: encoder to find index for
diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h
index 5e6daa1c982f..240049566592 100644
--- a/include/drm/drm_gem.h
+++ b/include/drm/drm_gem.h
@@ -416,8 +416,5 @@ int drm_gem_fence_array_add_implicit(struct xarray *fence_array,
bool write);
int drm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
u32 handle, u64 *offset);
-int drm_gem_dumb_destroy(struct drm_file *file,
- struct drm_device *dev,
- uint32_t handle);
#endif /* __DRM_GEM_H__ */
diff --git a/include/drm/drm_gem_cma_helper.h b/include/drm/drm_gem_cma_helper.h
index 5605c1b8f779..0a9711caa3e8 100644
--- a/include/drm/drm_gem_cma_helper.h
+++ b/include/drm/drm_gem_cma_helper.h
@@ -59,7 +59,7 @@ struct drm_gem_cma_object {
.poll = drm_poll,\
.read = drm_read,\
.llseek = noop_llseek,\
- .mmap = drm_gem_cma_mmap,\
+ .mmap = drm_gem_mmap,\
DRM_GEM_CMA_UNMAPPED_AREA_FOPS \
}
@@ -76,9 +76,6 @@ int drm_gem_cma_dumb_create(struct drm_file *file_priv,
struct drm_device *drm,
struct drm_mode_create_dumb *args);
-/* set vm_flags and we can change the VM attribute to other one at here */
-int drm_gem_cma_mmap(struct file *filp, struct vm_area_struct *vma);
-
/* allocate physical memory */
struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm,
size_t size);
@@ -96,14 +93,13 @@ unsigned long drm_gem_cma_get_unmapped_area(struct file *filp,
void drm_gem_cma_print_info(struct drm_printer *p, unsigned int indent,
const struct drm_gem_object *obj);
-struct sg_table *drm_gem_cma_prime_get_sg_table(struct drm_gem_object *obj);
+struct sg_table *drm_gem_cma_get_sg_table(struct drm_gem_object *obj);
struct drm_gem_object *
drm_gem_cma_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach,
struct sg_table *sgt);
-int drm_gem_cma_prime_mmap(struct drm_gem_object *obj,
- struct vm_area_struct *vma);
-int drm_gem_cma_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map);
+int drm_gem_cma_vmap(struct drm_gem_object *obj, struct dma_buf_map *map);
+int drm_gem_cma_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
/**
* DRM_GEM_CMA_DRIVER_OPS_WITH_DUMB_CREATE - CMA GEM driver operations
@@ -123,7 +119,7 @@ int drm_gem_cma_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map);
.prime_handle_to_fd = drm_gem_prime_handle_to_fd, \
.prime_fd_to_handle = drm_gem_prime_fd_to_handle, \
.gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table, \
- .gem_prime_mmap = drm_gem_cma_prime_mmap
+ .gem_prime_mmap = drm_gem_prime_mmap
/**
* DRM_GEM_CMA_DRIVER_OPS - CMA GEM driver operations
diff --git a/include/drm/drm_hdcp.h b/include/drm/drm_hdcp.h
index fe58dbb46962..ac22c246542a 100644
--- a/include/drm/drm_hdcp.h
+++ b/include/drm/drm_hdcp.h
@@ -101,11 +101,11 @@
/* Following Macros take a byte at a time for bit(s) masking */
/*
- * TODO: This has to be changed for DP MST, as multiple stream on
- * same port is possible.
- * For HDCP2.2 on HDMI and DP SST this value is always 1.
+ * TODO: HDCP_2_2_MAX_CONTENT_STREAMS_CNT is based upon actual
+ * H/W MST streams capacity.
+ * This required to be moved out to platform specific header.
*/
-#define HDCP_2_2_MAX_CONTENT_STREAMS_CNT 1
+#define HDCP_2_2_MAX_CONTENT_STREAMS_CNT 4
#define HDCP_2_2_TXCAP_MASK_LEN 2
#define HDCP_2_2_RXCAPS_LEN 3
#define HDCP_2_2_RX_REPEATER(x) ((x) & BIT(0))
diff --git a/include/drm/drm_irq.h b/include/drm/drm_irq.h
index d77f6e65b1c6..631b22f9757d 100644
--- a/include/drm/drm_irq.h
+++ b/include/drm/drm_irq.h
@@ -28,5 +28,5 @@ struct drm_device;
int drm_irq_install(struct drm_device *dev, int irq);
int drm_irq_uninstall(struct drm_device *dev);
-
+int devm_drm_irq_install(struct drm_device *dev, int irq);
#endif
diff --git a/include/drm/drm_legacy.h b/include/drm/drm_legacy.h
index 852d7451eeb1..8ed04e9be997 100644
--- a/include/drm/drm_legacy.h
+++ b/include/drm/drm_legacy.h
@@ -198,8 +198,10 @@ struct drm_dma_handle *drm_pci_alloc(struct drm_device *dev, size_t size,
size_t align);
void drm_pci_free(struct drm_device *dev, struct drm_dma_handle *dmah);
-int drm_legacy_pci_init(struct drm_driver *driver, struct pci_driver *pdriver);
-void drm_legacy_pci_exit(struct drm_driver *driver, struct pci_driver *pdriver);
+int drm_legacy_pci_init(const struct drm_driver *driver,
+ struct pci_driver *pdriver);
+void drm_legacy_pci_exit(const struct drm_driver *driver,
+ struct pci_driver *pdriver);
#else
@@ -214,13 +216,13 @@ static inline void drm_pci_free(struct drm_device *dev,
{
}
-static inline int drm_legacy_pci_init(struct drm_driver *driver,
+static inline int drm_legacy_pci_init(const struct drm_driver *driver,
struct pci_driver *pdriver)
{
return -EINVAL;
}
-static inline void drm_legacy_pci_exit(struct drm_driver *driver,
+static inline void drm_legacy_pci_exit(const struct drm_driver *driver,
struct pci_driver *pdriver)
{
}
diff --git a/include/drm/drm_managed.h b/include/drm/drm_managed.h
index ca4114633bf9..b45c6fbf53ac 100644
--- a/include/drm/drm_managed.h
+++ b/include/drm/drm_managed.h
@@ -44,8 +44,6 @@ int __must_check __drmm_add_action_or_reset(struct drm_device *dev,
drmres_release_t action,
void *data, const char *name);
-void drmm_add_final_kfree(struct drm_device *dev, void *container);
-
void *drmm_kmalloc(struct drm_device *dev, size_t size, gfp_t gfp) __malloc;
/**
diff --git a/include/drm/drm_mipi_dbi.h b/include/drm/drm_mipi_dbi.h
index c2827ceaba0d..f543d6e3e822 100644
--- a/include/drm/drm_mipi_dbi.h
+++ b/include/drm/drm_mipi_dbi.h
@@ -172,7 +172,7 @@ int mipi_dbi_buf_copy(void *dst, struct drm_framebuffer *fb,
* mipi_dbi_command - MIPI DCS command with optional parameter(s)
* @dbi: MIPI DBI structure
* @cmd: Command
- * @seq...: Optional parameter(s)
+ * @seq: Optional parameter(s)
*
* Send MIPI DCS command to the controller. Use mipi_dbi_command_read() for
* get/read.
diff --git a/include/drm/drm_modes.h b/include/drm/drm_modes.h
index a0d79d1c51e2..29ba4adf0c53 100644
--- a/include/drm/drm_modes.h
+++ b/include/drm/drm_modes.h
@@ -461,9 +461,19 @@ void drm_display_mode_from_videomode(const struct videomode *vm,
void drm_display_mode_to_videomode(const struct drm_display_mode *dmode,
struct videomode *vm);
void drm_bus_flags_from_videomode(const struct videomode *vm, u32 *bus_flags);
+
+#if defined(CONFIG_OF)
int of_get_drm_display_mode(struct device_node *np,
struct drm_display_mode *dmode, u32 *bus_flags,
int index);
+#else
+static inline int of_get_drm_display_mode(struct device_node *np,
+ struct drm_display_mode *dmode,
+ u32 *bus_flags, int index)
+{
+ return -EINVAL;
+}
+#endif
void drm_mode_set_name(struct drm_display_mode *mode);
int drm_mode_vrefresh(const struct drm_display_mode *mode);
diff --git a/include/drm/drm_modeset_helper_vtables.h b/include/drm/drm_modeset_helper_vtables.h
index 16ff3fa148f5..eb706342861d 100644
--- a/include/drm/drm_modeset_helper_vtables.h
+++ b/include/drm/drm_modeset_helper_vtables.h
@@ -866,13 +866,19 @@ struct drm_connector_helper_funcs {
* The usual way to implement this is to cache the EDID retrieved in the
* probe callback somewhere in the driver-private connector structure.
* In this function drivers then parse the modes in the EDID and add
- * them by calling drm_add_edid_modes(). But connectors that driver a
+ * them by calling drm_add_edid_modes(). But connectors that drive a
* fixed panel can also manually add specific modes using
* drm_mode_probed_add(). Drivers which manually add modes should also
* make sure that the &drm_connector.display_info,
* &drm_connector.width_mm and &drm_connector.height_mm fields are
* filled in.
*
+ * Note that the caller function will automatically add standard VESA
+ * DMT modes up to 1024x768 if the .get_modes() helper operation returns
+ * no mode and if the connector status is connector_status_connected or
+ * connector_status_unknown. There is no need to call
+ * drm_add_modes_noedid() manually in that case.
+ *
* Virtual drivers that just want some standard VESA mode with a given
* resolution can call drm_add_modes_noedid(), and mark the preferred
* one using drm_set_preferred_mode().
@@ -1395,6 +1401,27 @@ struct drm_mode_config_helper_funcs {
* drm_atomic_helper_commit_tail().
*/
void (*atomic_commit_tail)(struct drm_atomic_state *state);
+
+ /**
+ * @atomic_commit_setup:
+ *
+ * This hook is used by the default atomic_commit() hook implemented in
+ * drm_atomic_helper_commit() together with the nonblocking helpers (see
+ * drm_atomic_helper_setup_commit()) to extend the DRM commit setup. It
+ * is not used by the atomic helpers.
+ *
+ * This function is called at the end of
+ * drm_atomic_helper_setup_commit(), so once the commit has been
+ * properly setup across the generic DRM object states. It allows
+ * drivers to do some additional commit tracking that isn't related to a
+ * CRTC, plane or connector, tracked in a &drm_private_obj structure.
+ *
+ * Note that the documentation of &drm_private_obj has more details on
+ * how one should implement this.
+ *
+ * This hook is optional.
+ */
+ int (*atomic_commit_setup)(struct drm_atomic_state *state);
};
#endif
diff --git a/include/drm/drm_plane.h b/include/drm/drm_plane.h
index 1d82b264e5e4..8ef06ee1c8eb 100644
--- a/include/drm/drm_plane.h
+++ b/include/drm/drm_plane.h
@@ -764,6 +764,48 @@ int drm_plane_init(struct drm_device *dev,
bool is_primary);
void drm_plane_cleanup(struct drm_plane *plane);
+__printf(10, 11)
+void *__drmm_universal_plane_alloc(struct drm_device *dev,
+ size_t size, size_t offset,
+ uint32_t possible_crtcs,
+ const struct drm_plane_funcs *funcs,
+ const uint32_t *formats,
+ unsigned int format_count,
+ const uint64_t *format_modifiers,
+ enum drm_plane_type plane_type,
+ const char *name, ...);
+
+/**
+ * drmm_universal_plane_alloc - Allocate and initialize an universal plane object
+ * @dev: DRM device
+ * @type: the type of the struct which contains struct &drm_plane
+ * @member: the name of the &drm_plane within @type
+ * @possible_crtcs: bitmask of possible CRTCs
+ * @funcs: callbacks for the new plane
+ * @formats: array of supported formats (DRM_FORMAT\_\*)
+ * @format_count: number of elements in @formats
+ * @format_modifiers: array of struct drm_format modifiers terminated by
+ * DRM_FORMAT_MOD_INVALID
+ * @plane_type: type of plane (overlay, primary, cursor)
+ * @name: printf style format string for the plane name, or NULL for default name
+ *
+ * Allocates and initializes a plane object of type @type. Cleanup is
+ * automatically handled through registering drm_plane_cleanup() with
+ * drmm_add_action().
+ *
+ * The @drm_plane_funcs.destroy hook must be NULL.
+ *
+ * Returns:
+ * Pointer to new plane, or ERR_PTR on failure.
+ */
+#define drmm_universal_plane_alloc(dev, type, member, possible_crtcs, funcs, formats, \
+ format_count, format_modifiers, plane_type, name, ...) \
+ ((type *)__drmm_universal_plane_alloc(dev, sizeof(type), \
+ offsetof(type, member), \
+ possible_crtcs, funcs, formats, \
+ format_count, format_modifiers, \
+ plane_type, name, ##__VA_ARGS__))
+
/**
* drm_plane_index - find the index of a registered plane
* @plane: plane to find index for
diff --git a/include/drm/drm_prime.h b/include/drm/drm_prime.h
index 0991a47a1567..54f2c58305d2 100644
--- a/include/drm/drm_prime.h
+++ b/include/drm/drm_prime.h
@@ -105,8 +105,9 @@ struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev,
void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg);
-int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages,
- dma_addr_t *addrs, int max_pages);
-
+int drm_prime_sg_to_page_array(struct sg_table *sgt, struct page **pages,
+ int max_pages);
+int drm_prime_sg_to_dma_addr_array(struct sg_table *sgt, dma_addr_t *addrs,
+ int max_pages);
#endif /* __DRM_PRIME_H__ */
diff --git a/include/drm/drm_property.h b/include/drm/drm_property.h
index 4a0a80d658c7..bbf5c1fdd7b0 100644
--- a/include/drm/drm_property.h
+++ b/include/drm/drm_property.h
@@ -114,7 +114,7 @@ struct drm_property {
* by the property. Bitmask properties are created using
* drm_property_create_bitmask().
*
- * DRM_MODE_PROB_OBJECT
+ * DRM_MODE_PROP_OBJECT
* Object properties are used to link modeset objects. This is used
* extensively in the atomic support to create the display pipeline,
* by linking &drm_framebuffer to &drm_plane, &drm_plane to
diff --git a/include/drm/drm_rect.h b/include/drm/drm_rect.h
index e7f4d24cdd00..39f2deee709c 100644
--- a/include/drm/drm_rect.h
+++ b/include/drm/drm_rect.h
@@ -206,6 +206,19 @@ static inline bool drm_rect_equals(const struct drm_rect *r1,
r1->y1 == r2->y1 && r1->y2 == r2->y2;
}
+/**
+ * drm_rect_fp_to_int - Convert a rect in 16.16 fixed point form to int form.
+ * @dst: rect to be stored the converted value
+ * @src: rect in 16.16 fixed point form
+ */
+static inline void drm_rect_fp_to_int(struct drm_rect *dst,
+ const struct drm_rect *src)
+{
+ drm_rect_init(dst, src->x1 >> 16, src->y1 >> 16,
+ drm_rect_width(src) >> 16,
+ drm_rect_height(src) >> 16);
+}
+
bool drm_rect_intersect(struct drm_rect *r, const struct drm_rect *clip);
bool drm_rect_clip_scaled(struct drm_rect *src, struct drm_rect *dst,
const struct drm_rect *clip);
diff --git a/include/drm/drm_simple_kms_helper.h b/include/drm/drm_simple_kms_helper.h
index a026375464ff..e6dbf3161c2f 100644
--- a/include/drm/drm_simple_kms_helper.h
+++ b/include/drm/drm_simple_kms_helper.h
@@ -185,4 +185,28 @@ int drm_simple_encoder_init(struct drm_device *dev,
struct drm_encoder *encoder,
int encoder_type);
+void *__drmm_simple_encoder_alloc(struct drm_device *dev, size_t size,
+ size_t offset, int encoder_type);
+
+/**
+ * drmm_simple_encoder_alloc - Allocate and initialize an encoder with basic
+ * functionality.
+ * @dev: drm device
+ * @type: the type of the struct which contains struct &drm_encoder
+ * @member: the name of the &drm_encoder within @type.
+ * @encoder_type: user visible type of the encoder
+ *
+ * Allocates and initializes an encoder that has no further functionality.
+ * Settings for possible CRTC and clones are left to their initial values.
+ * Cleanup is automatically handled through registering drm_encoder_cleanup()
+ * with drmm_add_action().
+ *
+ * Returns:
+ * Pointer to new encoder, or ERR_PTR on failure.
+ */
+#define drmm_simple_encoder_alloc(dev, type, member, encoder_type) \
+ ((type *)__drmm_simple_encoder_alloc(dev, sizeof(type), \
+ offsetof(type, member), \
+ encoder_type))
+
#endif /* __LINUX_DRM_SIMPLE_KMS_HELPER_H */
diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
index 92436553fd6a..975e8a67947f 100644
--- a/include/drm/gpu_scheduler.h
+++ b/include/drm/gpu_scheduler.h
@@ -171,10 +171,10 @@ struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f);
* struct drm_sched_job - A job to be run by an entity.
*
* @queue_node: used to append this struct to the queue of jobs in an entity.
+ * @list: a job participates in a "pending" and "done" lists.
* @sched: the scheduler instance on which this job is scheduled.
* @s_fence: contains the fences for the scheduling of job.
* @finish_cb: the callback for the finished fence.
- * @node: used to append this struct to the @drm_gpu_scheduler.ring_mirror_list.
* @id: a unique id assigned to each job scheduled on the scheduler.
* @karma: increment on every hang caused by this job. If this exceeds the hang
* limit of the scheduler then the job is marked guilty and will not
@@ -189,21 +189,21 @@ struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f);
*/
struct drm_sched_job {
struct spsc_node queue_node;
+ struct list_head list;
struct drm_gpu_scheduler *sched;
struct drm_sched_fence *s_fence;
struct dma_fence_cb finish_cb;
- struct list_head node;
uint64_t id;
atomic_t karma;
enum drm_sched_priority s_priority;
- struct drm_sched_entity *entity;
+ struct drm_sched_entity *entity;
struct dma_fence_cb cb;
};
static inline bool drm_sched_invalidate_job(struct drm_sched_job *s_job,
int threshold)
{
- return (s_job && atomic_inc_return(&s_job->karma) > threshold);
+ return s_job && atomic_inc_return(&s_job->karma) > threshold;
}
/**
@@ -260,8 +260,8 @@ struct drm_sched_backend_ops {
* @work_tdr: schedules a delayed call to @drm_sched_job_timedout after the
* timeout interval is over.
* @thread: the kthread on which the scheduler which run.
- * @ring_mirror_list: the list of jobs which are currently in the job queue.
- * @job_list_lock: lock to protect the ring_mirror_list.
+ * @pending_list: the list of jobs which are currently in the job queue.
+ * @job_list_lock: lock to protect the pending_list.
* @hang_limit: once the hangs by a job crosses this limit then it is marked
* guilty and it will be considered for scheduling further.
* @score: score to help loadbalancer pick a idle sched
@@ -282,7 +282,7 @@ struct drm_gpu_scheduler {
atomic64_t job_id_count;
struct delayed_work work_tdr;
struct task_struct *thread;
- struct list_head ring_mirror_list;
+ struct list_head pending_list;
spinlock_t job_list_lock;
int hang_limit;
atomic_t score;
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
index 2564e66e67d7..e17be324d95f 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -125,7 +125,6 @@ struct ttm_buffer_object {
struct ttm_bo_device *bdev;
enum ttm_bo_type type;
void (*destroy) (struct ttm_buffer_object *);
- unsigned long num_pages;
size_t acc_size;
/**
@@ -310,6 +309,7 @@ void ttm_bo_put(struct ttm_buffer_object *bo);
* ttm_bo_move_to_lru_tail
*
* @bo: The buffer object.
+ * @mem: Resource object.
* @bulk: optional bulk move structure to remember BO positions
*
* Move this BO to the tail of all lru lists used to lookup and reserve an
@@ -317,6 +317,7 @@ void ttm_bo_put(struct ttm_buffer_object *bo);
* held, and is used to make a BO less likely to be considered for eviction.
*/
void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo,
+ struct ttm_resource *mem,
struct ttm_lru_bulk_move *bulk);
/**
@@ -397,13 +398,11 @@ size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev,
int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
struct ttm_buffer_object *bo,
- unsigned long size,
- enum ttm_bo_type type,
+ size_t size, enum ttm_bo_type type,
struct ttm_placement *placement,
uint32_t page_alignment,
struct ttm_operation_ctx *ctx,
- size_t acc_size,
- struct sg_table *sg,
+ size_t acc_size, struct sg_table *sg,
struct dma_resv *resv,
void (*destroy) (struct ttm_buffer_object *));
@@ -445,7 +444,7 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
* -ERESTARTSYS: Interrupted by signal while sleeping waiting for resources.
*/
int ttm_bo_init(struct ttm_bo_device *bdev, struct ttm_buffer_object *bo,
- unsigned long size, enum ttm_bo_type type,
+ size_t size, enum ttm_bo_type type,
struct ttm_placement *placement,
uint32_t page_alignment, bool interrubtible, size_t acc_size,
struct sg_table *sg, struct dma_resv *resv,
@@ -600,6 +599,7 @@ static inline bool ttm_bo_uses_embedded_gem_object(struct ttm_buffer_object *bo)
static inline void ttm_bo_pin(struct ttm_buffer_object *bo)
{
dma_resv_assert_held(bo->base.resv);
+ WARN_ON_ONCE(!kref_read(&bo->kref));
++bo->pin_count;
}
@@ -613,6 +613,7 @@ static inline void ttm_bo_unpin(struct ttm_buffer_object *bo)
{
dma_resv_assert_held(bo->base.resv);
WARN_ON_ONCE(!bo->pin_count);
+ WARN_ON_ONCE(!kref_read(&bo->kref));
--bo->pin_count;
}
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index f02f7cf9ae90..423348414c59 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -39,7 +39,6 @@
#include "ttm_bo_api.h"
#include "ttm_memory.h"
-#include "ttm_module.h"
#include "ttm_placement.h"
#include "ttm_tt.h"
#include "ttm_pool.h"
@@ -492,10 +491,11 @@ static inline int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo,
return 0;
}
-static inline void ttm_bo_move_to_lru_tail_unlocked(struct ttm_buffer_object *bo)
+static inline void
+ttm_bo_move_to_lru_tail_unlocked(struct ttm_buffer_object *bo)
{
spin_lock(&ttm_bo_glob.lru_lock);
- ttm_bo_move_to_lru_tail(bo, NULL);
+ ttm_bo_move_to_lru_tail(bo, &bo->mem, NULL);
spin_unlock(&ttm_bo_glob.lru_lock);
}
diff --git a/include/drm/ttm/ttm_resource.h b/include/drm/ttm/ttm_resource.h
index f48a70d39ac5..da0ed7e8c915 100644
--- a/include/drm/ttm/ttm_resource.h
+++ b/include/drm/ttm/ttm_resource.h
@@ -171,7 +171,6 @@ struct ttm_bus_placement {
struct ttm_resource {
void *mm_node;
unsigned long start;
- unsigned long size;
unsigned long num_pages;
uint32_t page_alignment;
uint32_t mem_type;
@@ -191,6 +190,10 @@ struct ttm_resource {
static inline void
ttm_resource_manager_set_used(struct ttm_resource_manager *man, bool used)
{
+ int i;
+
+ for (i = 0; i < TTM_MAX_BO_PRIORITY; i++)
+ WARN_ON(!list_empty(&man->lru[i]));
man->use_type = used;
}
diff --git a/include/drm/ttm/ttm_tt.h b/include/drm/ttm/ttm_tt.h
index da27e9d8fa64..6c8eb9a4de81 100644
--- a/include/drm/ttm/ttm_tt.h
+++ b/include/drm/ttm/ttm_tt.h
@@ -99,8 +99,6 @@ int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc);
*/
int ttm_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo,
uint32_t page_flags, enum ttm_caching caching);
-int ttm_dma_tt_init(struct ttm_tt *ttm_dma, struct ttm_buffer_object *bo,
- uint32_t page_flags, enum ttm_caching caching);
int ttm_sg_tt_init(struct ttm_tt *ttm_dma, struct ttm_buffer_object *bo,
uint32_t page_flags, enum ttm_caching caching);
diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h
index fc85f50fa0e9..8dcb3e1477bc 100644
--- a/include/kvm/arm_pmu.h
+++ b/include/kvm/arm_pmu.h
@@ -13,7 +13,7 @@
#define ARMV8_PMU_CYCLE_IDX (ARMV8_PMU_MAX_COUNTERS - 1)
#define ARMV8_PMU_MAX_COUNTER_PAIRS ((ARMV8_PMU_MAX_COUNTERS + 1) >> 1)
-#ifdef CONFIG_KVM_ARM_PMU
+#ifdef CONFIG_HW_PERF_EVENTS
struct kvm_pmc {
u8 idx; /* index into the pmu->pmc array */
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 2630c2e953f7..053bf05fb1f7 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -885,6 +885,13 @@ static inline int acpi_device_modalias(struct device *dev,
return -ENODEV;
}
+static inline struct platform_device *
+acpi_create_platform_device(struct acpi_device *adev,
+ struct property_entry *properties)
+{
+ return NULL;
+}
+
static inline bool acpi_dma_supported(struct acpi_device *adev)
{
return false;
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index 47b021952ac7..d705b174d346 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -447,8 +447,8 @@ enum {
BLK_MQ_REQ_NOWAIT = (__force blk_mq_req_flags_t)(1 << 0),
/* allocate from reserved pool */
BLK_MQ_REQ_RESERVED = (__force blk_mq_req_flags_t)(1 << 1),
- /* set RQF_PREEMPT */
- BLK_MQ_REQ_PREEMPT = (__force blk_mq_req_flags_t)(1 << 3),
+ /* set RQF_PM */
+ BLK_MQ_REQ_PM = (__force blk_mq_req_flags_t)(1 << 2),
};
struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op,
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 070de09425ad..f94ee3089e01 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -79,9 +79,6 @@ typedef __u32 __bitwise req_flags_t;
#define RQF_MQ_INFLIGHT ((__force req_flags_t)(1 << 6))
/* don't call prep for this one */
#define RQF_DONTPREP ((__force req_flags_t)(1 << 7))
-/* set for "ide_preempt" requests and also for requests for which the SCSI
- "quiesce" state must be ignored. */
-#define RQF_PREEMPT ((__force req_flags_t)(1 << 8))
/* vaguely specified driver internal error. Ignored by the block layer */
#define RQF_FAILED ((__force req_flags_t)(1 << 10))
/* don't warn about errors */
@@ -430,8 +427,7 @@ struct request_queue {
unsigned long queue_flags;
/*
* Number of contexts that have called blk_set_pm_only(). If this
- * counter is above zero then only RQF_PM and RQF_PREEMPT requests are
- * processed.
+ * counter is above zero then only RQF_PM requests are processed.
*/
atomic_t pm_only;
@@ -696,6 +692,18 @@ static inline bool queue_is_mq(struct request_queue *q)
return q->mq_ops;
}
+#ifdef CONFIG_PM
+static inline enum rpm_status queue_rpm_status(struct request_queue *q)
+{
+ return q->rpm_status;
+}
+#else
+static inline enum rpm_status queue_rpm_status(struct request_queue *q)
+{
+ return RPM_ACTIVE;
+}
+#endif
+
static inline enum blk_zoned_model
blk_queue_zoned_model(struct request_queue *q)
{
diff --git a/include/linux/build_bug.h b/include/linux/build_bug.h
index 7bb66e15b481..e3a0be2c90ad 100644
--- a/include/linux/build_bug.h
+++ b/include/linux/build_bug.h
@@ -77,9 +77,4 @@
#define static_assert(expr, ...) __static_assert(expr, ##__VA_ARGS__, #expr)
#define __static_assert(expr, msg, ...) _Static_assert(expr, msg)
-#ifdef __GENKSYMS__
-/* genksyms gets confused by _Static_assert */
-#define _Static_assert(expr, ...)
-#endif
-
#endif /* _LINUX_BUILD_BUG_H */
diff --git a/include/linux/ceph/msgr.h b/include/linux/ceph/msgr.h
index f5e02f6c0655..3989dcb94d3d 100644
--- a/include/linux/ceph/msgr.h
+++ b/include/linux/ceph/msgr.h
@@ -33,8 +33,8 @@
#define CEPH_MSGR2_INCARNATION_1 (0ull)
#define DEFINE_MSGR2_FEATURE(bit, incarnation, name) \
- static const uint64_t CEPH_MSGR2_FEATURE_##name = (1ULL << bit); \
- static const uint64_t CEPH_MSGR2_FEATUREMASK_##name = \
+ static const uint64_t __maybe_unused CEPH_MSGR2_FEATURE_##name = (1ULL << bit); \
+ static const uint64_t __maybe_unused CEPH_MSGR2_FEATUREMASK_##name = \
(1ULL << bit | CEPH_MSGR2_INCARNATION_##incarnation);
#define HAVE_MSGR2_FEATURE(x, name) \
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
index 74c6c0486eed..555ab0fddbef 100644
--- a/include/linux/compiler-gcc.h
+++ b/include/linux/compiler-gcc.h
@@ -13,6 +13,12 @@
/* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58145 */
#if GCC_VERSION < 40900
# error Sorry, your version of GCC is too old - please use 4.9 or newer.
+#elif defined(CONFIG_ARM64) && GCC_VERSION < 50100
+/*
+ * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=63293
+ * https://lore.kernel.org/r/[email protected]
+ */
+# error Sorry, your version of GCC is too old - please use 5.1 or newer.
#endif
/*
diff --git a/include/linux/compiler_attributes.h b/include/linux/compiler_attributes.h
index b2a3f4f641a7..ea5e04e75845 100644
--- a/include/linux/compiler_attributes.h
+++ b/include/linux/compiler_attributes.h
@@ -273,6 +273,12 @@
#define __used __attribute__((__used__))
/*
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-warn_005funused_005fresult-function-attribute
+ * clang: https://clang.llvm.org/docs/AttributeReference.html#nodiscard-warn-unused-result
+ */
+#define __must_check __attribute__((__warn_unused_result__))
+
+/*
* gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-weak-function-attribute
* gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Variable-Attributes.html#index-weak-variable-attribute
*/
diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h
index bbaa39e98f9f..e5dd5a4ae946 100644
--- a/include/linux/compiler_types.h
+++ b/include/linux/compiler_types.h
@@ -121,12 +121,6 @@ struct ftrace_likely_data {
unsigned long constant;
};
-#ifdef CONFIG_ENABLE_MUST_CHECK
-#define __must_check __attribute__((__warn_unused_result__))
-#else
-#define __must_check
-#endif
-
#if defined(CC_USING_HOTPATCH)
#define notrace __attribute__((hotpatch(0, 0)))
#elif defined(CC_USING_PATCHABLE_FUNCTION_ENTRY)
diff --git a/include/linux/console.h b/include/linux/console.h
index dbe78e8e2602..20874db50bc8 100644
--- a/include/linux/console.h
+++ b/include/linux/console.h
@@ -186,12 +186,9 @@ extern int braille_register_console(struct console *, int index,
extern int braille_unregister_console(struct console *);
#ifdef CONFIG_TTY
extern void console_sysfs_notify(void);
-extern void register_ttynull_console(void);
#else
static inline void console_sysfs_notify(void)
{ }
-static inline void register_ttynull_console(void)
-{ }
#endif
extern bool console_suspend_enabled;
diff --git a/include/linux/device.h b/include/linux/device.h
index 89bb8b84173e..1779f90eeb4c 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -609,6 +609,18 @@ static inline const char *dev_name(const struct device *dev)
return kobject_name(&dev->kobj);
}
+/**
+ * dev_bus_name - Return a device's bus/class name, if at all possible
+ * @dev: struct device to get the bus/class name of
+ *
+ * Will return the name of the bus/class the device is attached to. If it is
+ * not attached to a bus/class, an empty string will be returned.
+ */
+static inline const char *dev_bus_name(const struct device *dev)
+{
+ return dev->bus ? dev->bus->name : (dev->class ? dev->class->name : "");
+}
+
__printf(2, 3) int dev_set_name(struct device *dev, const char *name, ...);
#ifdef CONFIG_NUMA
diff --git a/include/linux/dm-bufio.h b/include/linux/dm-bufio.h
index 29d255fdd5d6..90bd558a17f5 100644
--- a/include/linux/dm-bufio.h
+++ b/include/linux/dm-bufio.h
@@ -150,6 +150,7 @@ void dm_bufio_set_minimum_buffers(struct dm_bufio_client *c, unsigned n);
unsigned dm_bufio_get_block_size(struct dm_bufio_client *c);
sector_t dm_bufio_get_device_size(struct dm_bufio_client *c);
+struct dm_io_client *dm_bufio_get_dm_io_client(struct dm_bufio_client *c);
sector_t dm_bufio_get_block_number(struct dm_buffer *b);
void *dm_bufio_get_block_data(struct dm_buffer *b);
void *dm_bufio_get_aux_data(struct dm_buffer *b);
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
index cf72699cb2bc..efdc56b9d95f 100644
--- a/include/linux/dma-buf.h
+++ b/include/linux/dma-buf.h
@@ -85,14 +85,16 @@ struct dma_buf_ops {
/**
* @pin:
*
- * This is called by dma_buf_pin and lets the exporter know that the
- * DMA-buf can't be moved any more.
+ * This is called by dma_buf_pin() and lets the exporter know that the
+ * DMA-buf can't be moved any more. The exporter should pin the buffer
+ * into system memory to make sure it is generally accessible by other
+ * devices.
*
- * This is called with the dmabuf->resv object locked and is mutual
+ * This is called with the &dmabuf.resv object locked and is mutual
* exclusive with @cache_sgt_mapping.
*
- * This callback is optional and should only be used in limited use
- * cases like scanout and not for temporary pin operations.
+ * This is called automatically for non-dynamic importers from
+ * dma_buf_attach().
*
* Returns:
*
@@ -103,7 +105,7 @@ struct dma_buf_ops {
/**
* @unpin:
*
- * This is called by dma_buf_unpin and lets the exporter know that the
+ * This is called by dma_buf_unpin() and lets the exporter know that the
* DMA-buf can be moved again.
*
* This is called with the dmabuf->resv object locked and is mutual
@@ -152,6 +154,12 @@ struct dma_buf_ops {
* On failure, returns a negative error value wrapped into a pointer.
* May also return -EINTR when a signal was received while being
* blocked.
+ *
+ * Note that exporters should not try to cache the scatter list, or
+ * return the same one for multiple calls. Caching is done either by the
+ * DMA-BUF code (for non-dynamic importers) or the importer. Ownership
+ * of the scatter list is transferred to the caller, and returned by
+ * @unmap_dma_buf.
*/
struct sg_table * (*map_dma_buf)(struct dma_buf_attachment *,
enum dma_data_direction);
@@ -183,24 +191,19 @@ struct dma_buf_ops {
* @begin_cpu_access:
*
* This is called from dma_buf_begin_cpu_access() and allows the
- * exporter to ensure that the memory is actually available for cpu
- * access - the exporter might need to allocate or swap-in and pin the
- * backing storage. The exporter also needs to ensure that cpu access is
- * coherent for the access direction. The direction can be used by the
- * exporter to optimize the cache flushing, i.e. access with a different
+ * exporter to ensure that the memory is actually coherent for cpu
+ * access. The exporter also needs to ensure that cpu access is coherent
+ * for the access direction. The direction can be used by the exporter
+ * to optimize the cache flushing, i.e. access with a different
* direction (read instead of write) might return stale or even bogus
* data (e.g. when the exporter needs to copy the data to temporary
* storage).
*
- * This callback is optional.
+ * Note that this is both called through the DMA_BUF_IOCTL_SYNC IOCTL
+ * command for userspace mappings established through @mmap, and also
+ * for kernel mappings established with @vmap.
*
- * FIXME: This is both called through the DMA_BUF_IOCTL_SYNC command
- * from userspace (where storage shouldn't be pinned to avoid handing
- * de-factor mlock rights to userspace) and for the kernel-internal
- * users of the various kmap interfaces, where the backing storage must
- * be pinned to guarantee that the atomic kmap calls can succeed. Since
- * there's no in-kernel users of the kmap interfaces yet this isn't a
- * real problem.
+ * This callback is optional.
*
* Returns:
*
@@ -216,9 +219,7 @@ struct dma_buf_ops {
*
* This is called from dma_buf_end_cpu_access() when the importer is
* done accessing the CPU. The exporter can use this to flush caches and
- * unpin any resources pinned in @begin_cpu_access.
- * The result of any dma_buf kmap calls after end_cpu_access is
- * undefined.
+ * undo anything else done in @begin_cpu_access.
*
* This callback is optional.
*
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
index d956987ed032..09c6a0bf3892 100644
--- a/include/linux/intel-iommu.h
+++ b/include/linux/intel-iommu.h
@@ -533,11 +533,10 @@ struct dmar_domain {
/* Domain ids per IOMMU. Use u16 since
* domain ids are 16 bit wide according
* to VT-d spec, section 9.3 */
- unsigned int auxd_refcnt; /* Refcount of auxiliary attaching */
bool has_iotlb_device;
struct list_head devices; /* all devices' list */
- struct list_head auxd; /* link to device's auxiliary list */
+ struct list_head subdevices; /* all subdevices' list */
struct iova_domain iovad; /* iova's that belong to this domain */
struct dma_pte *pgd; /* virtual address */
@@ -610,14 +609,21 @@ struct intel_iommu {
struct dmar_drhd_unit *drhd;
};
+/* Per subdevice private data */
+struct subdev_domain_info {
+ struct list_head link_phys; /* link to phys device siblings */
+ struct list_head link_domain; /* link to domain siblings */
+ struct device *pdev; /* physical device derived from */
+ struct dmar_domain *domain; /* aux-domain */
+ int users; /* user count */
+};
+
/* PCI domain-device relationship */
struct device_domain_info {
struct list_head link; /* link to domain siblings */
struct list_head global; /* link to global list */
struct list_head table; /* link to pasid table */
- struct list_head auxiliary_domains; /* auxiliary domains
- * attached to this device
- */
+ struct list_head subdevices; /* subdevices sibling */
u32 segment; /* PCI segment number */
u8 bus; /* PCI bus number */
u8 devfn; /* PCI devfn number */
@@ -758,6 +764,7 @@ struct intel_svm_dev {
struct list_head list;
struct rcu_head rcu;
struct device *dev;
+ struct intel_iommu *iommu;
struct svm_dev_ops *ops;
struct iommu_sva sva;
u32 pasid;
@@ -771,7 +778,6 @@ struct intel_svm {
struct mmu_notifier notifier;
struct mm_struct *mm;
- struct intel_iommu *iommu;
unsigned int flags;
u32 pasid;
int gpasid; /* In case that guest PASID is different from host PASID */
diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index 5e0655fb2a6f..fe1ae73ff8b5 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -35,8 +35,12 @@ struct kunit_kasan_expectation {
#define KASAN_SHADOW_INIT 0
#endif
+#ifndef PTE_HWTABLE_PTRS
+#define PTE_HWTABLE_PTRS 0
+#endif
+
extern unsigned char kasan_early_shadow_page[PAGE_SIZE];
-extern pte_t kasan_early_shadow_pte[PTRS_PER_PTE];
+extern pte_t kasan_early_shadow_pte[PTRS_PER_PTE + PTE_HWTABLE_PTRS];
extern pmd_t kasan_early_shadow_pmd[PTRS_PER_PMD];
extern pud_t kasan_early_shadow_pud[PTRS_PER_PUD];
extern p4d_t kasan_early_shadow_p4d[MAX_PTRS_PER_P4D];
diff --git a/include/linux/kcov.h b/include/linux/kcov.h
index a10e84707d82..4e3037dc1204 100644
--- a/include/linux/kcov.h
+++ b/include/linux/kcov.h
@@ -52,6 +52,25 @@ static inline void kcov_remote_start_usb(u64 id)
kcov_remote_start(kcov_remote_handle(KCOV_SUBSYSTEM_USB, id));
}
+/*
+ * The softirq flavor of kcov_remote_*() functions is introduced as a temporary
+ * work around for kcov's lack of nested remote coverage sections support in
+ * task context. Adding suport for nested sections is tracked in:
+ * https://bugzilla.kernel.org/show_bug.cgi?id=210337
+ */
+
+static inline void kcov_remote_start_usb_softirq(u64 id)
+{
+ if (in_serving_softirq())
+ kcov_remote_start_usb(id);
+}
+
+static inline void kcov_remote_stop_softirq(void)
+{
+ if (in_serving_softirq())
+ kcov_remote_stop();
+}
+
#else
static inline void kcov_task_init(struct task_struct *t) {}
@@ -66,6 +85,8 @@ static inline u64 kcov_common_handle(void)
}
static inline void kcov_remote_start_common(u64 id) {}
static inline void kcov_remote_start_usb(u64 id) {}
+static inline void kcov_remote_start_usb_softirq(u64 id) {}
+static inline void kcov_remote_stop_softirq(void) {}
#endif /* CONFIG_KCOV */
#endif /* _LINUX_KCOV_H */
diff --git a/include/linux/kdev_t.h b/include/linux/kdev_t.h
index 85b5151911cf..4856706fbfeb 100644
--- a/include/linux/kdev_t.h
+++ b/include/linux/kdev_t.h
@@ -21,61 +21,61 @@
})
/* acceptable for old filesystems */
-static inline bool old_valid_dev(dev_t dev)
+static __always_inline bool old_valid_dev(dev_t dev)
{
return MAJOR(dev) < 256 && MINOR(dev) < 256;
}
-static inline u16 old_encode_dev(dev_t dev)
+static __always_inline u16 old_encode_dev(dev_t dev)
{
return (MAJOR(dev) << 8) | MINOR(dev);
}
-static inline dev_t old_decode_dev(u16 val)
+static __always_inline dev_t old_decode_dev(u16 val)
{
return MKDEV((val >> 8) & 255, val & 255);
}
-static inline u32 new_encode_dev(dev_t dev)
+static __always_inline u32 new_encode_dev(dev_t dev)
{
unsigned major = MAJOR(dev);
unsigned minor = MINOR(dev);
return (minor & 0xff) | (major << 8) | ((minor & ~0xff) << 12);
}
-static inline dev_t new_decode_dev(u32 dev)
+static __always_inline dev_t new_decode_dev(u32 dev)
{
unsigned major = (dev & 0xfff00) >> 8;
unsigned minor = (dev & 0xff) | ((dev >> 12) & 0xfff00);
return MKDEV(major, minor);
}
-static inline u64 huge_encode_dev(dev_t dev)
+static __always_inline u64 huge_encode_dev(dev_t dev)
{
return new_encode_dev(dev);
}
-static inline dev_t huge_decode_dev(u64 dev)
+static __always_inline dev_t huge_decode_dev(u64 dev)
{
return new_decode_dev(dev);
}
-static inline int sysv_valid_dev(dev_t dev)
+static __always_inline int sysv_valid_dev(dev_t dev)
{
return MAJOR(dev) < (1<<14) && MINOR(dev) < (1<<18);
}
-static inline u32 sysv_encode_dev(dev_t dev)
+static __always_inline u32 sysv_encode_dev(dev_t dev)
{
return MINOR(dev) | (MAJOR(dev) << 18);
}
-static inline unsigned sysv_major(u32 dev)
+static __always_inline unsigned sysv_major(u32 dev)
{
return (dev >> 18) & 0x3fff;
}
-static inline unsigned sysv_minor(u32 dev)
+static __always_inline unsigned sysv_minor(u32 dev)
{
return dev & 0x3ffff;
}
diff --git a/include/linux/kthread.h b/include/linux/kthread.h
index 65b81e0c494d..2484ed97e72f 100644
--- a/include/linux/kthread.h
+++ b/include/linux/kthread.h
@@ -33,6 +33,9 @@ struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
unsigned int cpu,
const char *namefmt);
+void kthread_set_per_cpu(struct task_struct *k, int cpu);
+bool kthread_is_per_cpu(struct task_struct *k);
+
/**
* kthread_run - create and wake a thread.
* @threadfn: the function to run until signal_pending(current).
diff --git a/include/linux/ktime.h b/include/linux/ktime.h
index a12b5523cc18..73f20deb497d 100644
--- a/include/linux/ktime.h
+++ b/include/linux/ktime.h
@@ -230,6 +230,5 @@ static inline ktime_t ms_to_ktime(u64 ms)
}
# include <linux/timekeeping.h>
-# include <linux/timekeeping32.h>
#endif
diff --git a/include/linux/mdio-bitbang.h b/include/linux/mdio-bitbang.h
index 5d71e8a8500f..aca4dc037b70 100644
--- a/include/linux/mdio-bitbang.h
+++ b/include/linux/mdio-bitbang.h
@@ -35,6 +35,9 @@ struct mdiobb_ctrl {
const struct mdiobb_ops *ops;
};
+int mdiobb_read(struct mii_bus *bus, int phy, int reg);
+int mdiobb_write(struct mii_bus *bus, int phy, int reg, u16 val);
+
/* The returned bus is not yet registered with the phy layer. */
struct mii_bus *alloc_mdio_bitbang(struct mdiobb_ctrl *ctrl);
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index d827bd7f3bfe..eeb0b52203e9 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -665,7 +665,7 @@ static inline struct lruvec *mem_cgroup_page_lruvec(struct page *page,
{
struct mem_cgroup *memcg = page_memcg(page);
- VM_WARN_ON_ONCE_PAGE(!memcg, page);
+ VM_WARN_ON_ONCE_PAGE(!memcg && !mem_cgroup_disabled(), page);
return mem_cgroup_lruvec(memcg, pgdat);
}
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index 8fbddec26eb8..442c0160caab 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -1280,7 +1280,8 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 ece_support[0x1];
u8 reserved_at_a4[0x7];
u8 log_max_srq[0x5];
- u8 reserved_at_b0[0x2];
+ u8 reserved_at_b0[0x1];
+ u8 uplink_follow[0x1];
u8 ts_cqe_to_dest_cqn[0x1];
u8 reserved_at_b3[0xd];
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 5299b90a6c40..ecdf8a8cd6ae 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -216,6 +216,13 @@ int overcommit_kbytes_handler(struct ctl_table *, int, void *, size_t *,
loff_t *);
int overcommit_policy_handler(struct ctl_table *, int, void *, size_t *,
loff_t *);
+/*
+ * Any attempt to mark this function as static leads to build failure
+ * when CONFIG_DEBUG_INFO_BTF is enabled because __add_to_page_cache_locked()
+ * is referred to by BPF code. This must be visible for error injection.
+ */
+int __add_to_page_cache_locked(struct page *page, struct address_space *mapping,
+ pgoff_t index, gfp_t gfp, void **shadowp);
#define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n))
@@ -2432,8 +2439,9 @@ extern int __meminit early_pfn_to_nid(unsigned long pfn);
#endif
extern void set_dma_reserve(unsigned long new_dma_reserve);
-extern void memmap_init_zone(unsigned long, int, unsigned long, unsigned long,
- enum meminit_context, struct vmem_altmap *, int migratetype);
+extern void memmap_init_zone(unsigned long, int, unsigned long,
+ unsigned long, unsigned long, enum meminit_context,
+ struct vmem_altmap *, int migratetype);
extern void setup_per_zone_wmarks(void);
extern int __meminit init_per_zone_wmark_min(void);
extern void mem_init(void);
diff --git a/include/linux/nvme.h b/include/linux/nvme.h
index d92535997687..bfed36e342cc 100644
--- a/include/linux/nvme.h
+++ b/include/linux/nvme.h
@@ -116,6 +116,9 @@ enum {
NVME_REG_BPMBL = 0x0048, /* Boot Partition Memory Buffer
* Location
*/
+ NVME_REG_CMBMSC = 0x0050, /* Controller Memory Buffer Memory
+ * Space Control
+ */
NVME_REG_PMRCAP = 0x0e00, /* Persistent Memory Capabilities */
NVME_REG_PMRCTL = 0x0e04, /* Persistent Memory Region Control */
NVME_REG_PMRSTS = 0x0e08, /* Persistent Memory Region Status */
@@ -135,6 +138,7 @@ enum {
#define NVME_CAP_CSS(cap) (((cap) >> 37) & 0xff)
#define NVME_CAP_MPSMIN(cap) (((cap) >> 48) & 0xf)
#define NVME_CAP_MPSMAX(cap) (((cap) >> 52) & 0xf)
+#define NVME_CAP_CMBS(cap) (((cap) >> 57) & 0x1)
#define NVME_CMB_BIR(cmbloc) ((cmbloc) & 0x7)
#define NVME_CMB_OFST(cmbloc) (((cmbloc) >> 12) & 0xfffff)
@@ -192,6 +196,8 @@ enum {
NVME_CSTS_SHST_OCCUR = 1 << 2,
NVME_CSTS_SHST_CMPLT = 2 << 2,
NVME_CSTS_SHST_MASK = 3 << 2,
+ NVME_CMBMSC_CRE = 1 << 0,
+ NVME_CMBMSC_CMSE = 1 << 1,
};
struct nvme_id_power_state {
diff --git a/include/linux/pci.h b/include/linux/pci.h
index b32126d26997..53f4904ee83d 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -1232,6 +1232,15 @@ void pci_update_resource(struct pci_dev *dev, int resno);
int __must_check pci_assign_resource(struct pci_dev *dev, int i);
int __must_check pci_reassign_resource(struct pci_dev *dev, int i, resource_size_t add_size, resource_size_t align);
void pci_release_resource(struct pci_dev *dev, int resno);
+static inline int pci_rebar_bytes_to_size(u64 bytes)
+{
+ bytes = roundup_pow_of_two(bytes);
+
+ /* Return BAR size as defined in the resizable BAR specification */
+ return max(ilog2(bytes), 20) - 20;
+}
+
+u32 pci_rebar_get_possible_sizes(struct pci_dev *pdev, int bar);
int __must_check pci_resize_resource(struct pci_dev *dev, int i, int size);
int pci_select_bars(struct pci_dev *dev, unsigned long flags);
bool pci_device_is_present(struct pci_dev *pdev);
diff --git a/include/linux/perf/arm_pmu.h b/include/linux/perf/arm_pmu.h
index bf7966776c55..505480217cf1 100644
--- a/include/linux/perf/arm_pmu.h
+++ b/include/linux/perf/arm_pmu.h
@@ -163,8 +163,6 @@ int arm_pmu_acpi_probe(armpmu_init_fn init_fn);
static inline int arm_pmu_acpi_probe(armpmu_init_fn init_fn) { return 0; }
#endif
-bool arm_pmu_irq_is_nmi(void);
-
/* Internal functions only for core arm_pmu code */
struct arm_pmu *armpmu_alloc(void);
struct arm_pmu *armpmu_alloc_atomic(void);
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index de0826411311..fd02c5fa60cb 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -86,6 +86,12 @@ void rcu_sched_clock_irq(int user);
void rcu_report_dead(unsigned int cpu);
void rcutree_migrate_callbacks(int cpu);
+#ifdef CONFIG_TASKS_RCU_GENERIC
+void rcu_init_tasks_generic(void);
+#else
+static inline void rcu_init_tasks_generic(void) { }
+#endif
+
#ifdef CONFIG_RCU_STALL_COMMON
void rcu_sysrq_start(void);
void rcu_sysrq_end(void);
diff --git a/include/linux/sizes.h b/include/linux/sizes.h
index 9874f6f67537..1ac79bcee2bb 100644
--- a/include/linux/sizes.h
+++ b/include/linux/sizes.h
@@ -44,6 +44,9 @@
#define SZ_2G 0x80000000
#define SZ_4G _AC(0x100000000, ULL)
+#define SZ_8G _AC(0x200000000, ULL)
+#define SZ_16G _AC(0x400000000, ULL)
+#define SZ_32G _AC(0x800000000, ULL)
#define SZ_64T _AC(0x400000000000, ULL)
#endif /* __LINUX_SIZES_H__ */
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 333bcdc39635..5f60c9e907c9 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -366,7 +366,7 @@ static inline void skb_frag_size_sub(skb_frag_t *frag, int delta)
static inline bool skb_frag_must_loop(struct page *p)
{
#if defined(CONFIG_HIGHMEM)
- if (PageHighMem(p))
+ if (IS_ENABLED(CONFIG_DEBUG_KMAP_LOCAL_FORCE_MAP) || PageHighMem(p))
return true;
#endif
return false;
@@ -1203,6 +1203,7 @@ struct skb_seq_state {
struct sk_buff *root_skb;
struct sk_buff *cur_skb;
__u8 *frag_data;
+ __u32 frag_off;
};
void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from,
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index f3929aff39cf..7688bc983de5 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -251,6 +251,30 @@ static inline int is_syscall_trace_event(struct trace_event_call *tp_event)
static inline long __do_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__))
#endif /* __SYSCALL_DEFINEx */
+/* For split 64-bit arguments on 32-bit architectures */
+#ifdef __LITTLE_ENDIAN
+#define SC_ARG64(name) u32, name##_lo, u32, name##_hi
+#else
+#define SC_ARG64(name) u32, name##_hi, u32, name##_lo
+#endif
+#define SC_VAL64(type, name) ((type) name##_hi << 32 | name##_lo)
+
+#ifdef CONFIG_COMPAT
+#define SYSCALL32_DEFINE1 COMPAT_SYSCALL_DEFINE1
+#define SYSCALL32_DEFINE2 COMPAT_SYSCALL_DEFINE2
+#define SYSCALL32_DEFINE3 COMPAT_SYSCALL_DEFINE3
+#define SYSCALL32_DEFINE4 COMPAT_SYSCALL_DEFINE4
+#define SYSCALL32_DEFINE5 COMPAT_SYSCALL_DEFINE5
+#define SYSCALL32_DEFINE6 COMPAT_SYSCALL_DEFINE6
+#else
+#define SYSCALL32_DEFINE1 SYSCALL_DEFINE1
+#define SYSCALL32_DEFINE2 SYSCALL_DEFINE2
+#define SYSCALL32_DEFINE3 SYSCALL_DEFINE3
+#define SYSCALL32_DEFINE4 SYSCALL_DEFINE4
+#define SYSCALL32_DEFINE5 SYSCALL_DEFINE5
+#define SYSCALL32_DEFINE6 SYSCALL_DEFINE6
+#endif
+
/*
* Called before coming back to user-mode. Returning to user-mode with an
* address limit different than USER_DS can allow to overwrite kernel memory.
diff --git a/include/linux/timekeeping32.h b/include/linux/timekeeping32.h
deleted file mode 100644
index 266017fc9ee9..000000000000
--- a/include/linux/timekeeping32.h
+++ /dev/null
@@ -1,14 +0,0 @@
-#ifndef _LINUX_TIMEKEEPING32_H
-#define _LINUX_TIMEKEEPING32_H
-/*
- * These interfaces are all based on the old timespec type
- * and should get replaced with the timespec64 based versions
- * over time so we can remove the file here.
- */
-
-static inline unsigned long get_seconds(void)
-{
- return ktime_get_real_seconds();
-}
-
-#endif
diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h
index 88a7673894d5..cfbfd6fe01df 100644
--- a/include/linux/usb/usbnet.h
+++ b/include/linux/usb/usbnet.h
@@ -81,6 +81,8 @@ struct usbnet {
# define EVENT_LINK_CHANGE 11
# define EVENT_SET_RX_MODE 12
# define EVENT_NO_IP_ALIGN 13
+ u32 rx_speed; /* in bps - NOT Mbps */
+ u32 tx_speed; /* in bps - NOT Mbps */
};
static inline struct usb_driver *driver_of(struct usb_interface *intf)
diff --git a/include/linux/vgaarb.h b/include/linux/vgaarb.h
index 977caf96c8d2..fc6dfeba04a5 100644
--- a/include/linux/vgaarb.h
+++ b/include/linux/vgaarb.h
@@ -121,9 +121,9 @@ extern struct pci_dev *vga_default_device(void);
extern void vga_set_default_device(struct pci_dev *pdev);
extern int vga_remove_vgacon(struct pci_dev *pdev);
#else
-static inline struct pci_dev *vga_default_device(void) { return NULL; };
-static inline void vga_set_default_device(struct pci_dev *pdev) { };
-static inline int vga_remove_vgacon(struct pci_dev *pdev) { return 0; };
+static inline struct pci_dev *vga_default_device(void) { return NULL; }
+static inline void vga_set_default_device(struct pci_dev *pdev) { }
+static inline int vga_remove_vgacon(struct pci_dev *pdev) { return 0; }
#endif
/*
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 9a4bbccddc7f..0d6f7ec86061 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -1756,7 +1756,7 @@ struct cfg80211_sar_specs {
/**
- * @struct cfg80211_sar_chan_ranges - sar frequency ranges
+ * struct cfg80211_sar_freq_ranges - sar frequency ranges
* @start_freq: start range edge frequency
* @end_freq: end range edge frequency
*/
@@ -3972,6 +3972,8 @@ struct mgmt_frame_regs {
* This callback may sleep.
* @reset_tid_config: Reset TID specific configuration for the peer, for the
* given TIDs. This callback may sleep.
+ *
+ * @set_sar_specs: Update the SAR (TX power) settings.
*/
struct cfg80211_ops {
int (*suspend)(struct wiphy *wiphy, struct cfg80211_wowlan *wow);
@@ -4929,6 +4931,7 @@ struct wiphy_iftype_akm_suites {
* @max_data_retry_count: maximum supported per TID retry count for
* configuration through the %NL80211_TID_CONFIG_ATTR_RETRY_SHORT and
* %NL80211_TID_CONFIG_ATTR_RETRY_LONG attributes
+ * @sar_capa: SAR control capabilities
*/
struct wiphy {
/* assign these fields before you register the wiphy */
diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
index 7338b3865a2a..111d7771b208 100644
--- a/include/net/inet_connection_sock.h
+++ b/include/net/inet_connection_sock.h
@@ -76,6 +76,8 @@ struct inet_connection_sock_af_ops {
* @icsk_ext_hdr_len: Network protocol overhead (IP/IPv6 options)
* @icsk_ack: Delayed ACK control data
* @icsk_mtup; MTU probing control data
+ * @icsk_probes_tstamp: Probe timestamp (cleared by non-zero window ack)
+ * @icsk_user_timeout: TCP_USER_TIMEOUT value
*/
struct inet_connection_sock {
/* inet_sock has to be the first member! */
@@ -129,6 +131,7 @@ struct inet_connection_sock {
u32 probe_timestamp;
} icsk_mtup;
+ u32 icsk_probes_tstamp;
u32 icsk_user_timeout;
u64 icsk_ca_priv[104 / sizeof(u64)];
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index d315740581f1..2bdbf62f4ecd 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -3880,6 +3880,7 @@ enum ieee80211_reconfig_type {
* This callback may sleep.
* @sta_set_4addr: Called to notify the driver when a station starts/stops using
* 4-address mode
+ * @set_sar_specs: Update the SAR (TX power) settings.
*/
struct ieee80211_ops {
void (*tx)(struct ieee80211_hw *hw,
diff --git a/include/net/red.h b/include/net/red.h
index fc455445f4b2..932f0d79d60c 100644
--- a/include/net/red.h
+++ b/include/net/red.h
@@ -168,12 +168,14 @@ static inline void red_set_vars(struct red_vars *v)
v->qcount = -1;
}
-static inline bool red_check_params(u32 qth_min, u32 qth_max, u8 Wlog)
+static inline bool red_check_params(u32 qth_min, u32 qth_max, u8 Wlog, u8 Scell_log)
{
if (fls(qth_min) + Wlog > 32)
return false;
if (fls(qth_max) + Wlog > 32)
return false;
+ if (Scell_log >= 32)
+ return false;
if (qth_max < qth_min)
return false;
return true;
diff --git a/include/net/sock.h b/include/net/sock.h
index bdc4323ce53c..129d200bccb4 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -1921,10 +1921,13 @@ static inline void sk_set_txhash(struct sock *sk)
sk->sk_txhash = net_tx_rndhash();
}
-static inline void sk_rethink_txhash(struct sock *sk)
+static inline bool sk_rethink_txhash(struct sock *sk)
{
- if (sk->sk_txhash)
+ if (sk->sk_txhash) {
sk_set_txhash(sk);
+ return true;
+ }
+ return false;
}
static inline struct dst_entry *
@@ -1947,12 +1950,10 @@ sk_dst_get(struct sock *sk)
return dst;
}
-static inline void dst_negative_advice(struct sock *sk)
+static inline void __dst_negative_advice(struct sock *sk)
{
struct dst_entry *ndst, *dst = __sk_dst_get(sk);
- sk_rethink_txhash(sk);
-
if (dst && dst->ops->negative_advice) {
ndst = dst->ops->negative_advice(dst);
@@ -1964,6 +1965,12 @@ static inline void dst_negative_advice(struct sock *sk)
}
}
+static inline void dst_negative_advice(struct sock *sk)
+{
+ sk_rethink_txhash(sk);
+ __dst_negative_advice(sk);
+}
+
static inline void
__sk_dst_set(struct sock *sk, struct dst_entry *dst)
{
diff --git a/include/net/xdp_sock.h b/include/net/xdp_sock.h
index 4f4e93bf814c..cc17bc957548 100644
--- a/include/net/xdp_sock.h
+++ b/include/net/xdp_sock.h
@@ -58,10 +58,6 @@ struct xdp_sock {
struct xsk_queue *tx ____cacheline_aligned_in_smp;
struct list_head tx_list;
- /* Mutual exclusion of NAPI TX thread and sendmsg error paths
- * in the SKB destructor callback.
- */
- spinlock_t tx_completion_lock;
/* Protects generic receive. */
spinlock_t rx_lock;
diff --git a/include/net/xsk_buff_pool.h b/include/net/xsk_buff_pool.h
index 01755b838c74..eaa8386dbc63 100644
--- a/include/net/xsk_buff_pool.h
+++ b/include/net/xsk_buff_pool.h
@@ -73,6 +73,11 @@ struct xsk_buff_pool {
bool dma_need_sync;
bool unaligned;
void *addrs;
+ /* Mutual exclusion of the completion ring in the SKB mode. Two cases to protect:
+ * NAPI TX thread and sendmsg error paths in the SKB destructor callback and when
+ * sockets share a single cq when the same netdev and queue id is shared.
+ */
+ spinlock_t cq_lock;
struct xdp_buff_xsk *free_heads[];
};
diff --git a/include/soc/nps/common.h b/include/soc/nps/common.h
deleted file mode 100644
index 8c18dc6d3fde..000000000000
--- a/include/soc/nps/common.h
+++ /dev/null
@@ -1,172 +0,0 @@
-/*
- * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#ifndef SOC_NPS_COMMON_H
-#define SOC_NPS_COMMON_H
-
-#ifdef CONFIG_SMP
-#define NPS_IPI_IRQ 5
-#endif
-
-#define NPS_HOST_REG_BASE 0xF6000000
-
-#define NPS_MSU_BLKID 0x018
-
-#define CTOP_INST_RSPI_GIC_0_R12 0x3C56117E
-#define CTOP_INST_MOV2B_FLIP_R3_B1_B2_INST 0x5B60
-#define CTOP_INST_MOV2B_FLIP_R3_B1_B2_LIMM 0x00010422
-
-#ifndef AUX_IENABLE
-#define AUX_IENABLE 0x40c
-#endif
-
-#define CTOP_AUX_IACK (0xFFFFF800 + 0x088)
-
-#ifndef __ASSEMBLY__
-
-/* In order to increase compilation test coverage */
-#ifdef CONFIG_ARC
-static inline void nps_ack_gic(void)
-{
- __asm__ __volatile__ (
- " .word %0\n"
- :
- : "i"(CTOP_INST_RSPI_GIC_0_R12)
- : "memory");
-}
-#else
-static inline void nps_ack_gic(void) { }
-#define write_aux_reg(r, v)
-#define read_aux_reg(r) 0
-#endif
-
-/* CPU global ID */
-struct global_id {
- union {
- struct {
-#ifdef CONFIG_EZNPS_MTM_EXT
- u32 __reserved:20, cluster:4, core:4, thread:4;
-#else
- u32 __reserved:24, cluster:4, core:4;
-#endif
- };
- u32 value;
- };
-};
-
-/*
- * Convert logical to physical CPU IDs
- *
- * The conversion swap bits 1 and 2 of cluster id (out of 4 bits)
- * Now quad of logical clusters id's are adjacent physically,
- * and not like the id's physically came with each cluster.
- * Below table is 4x4 mesh of core clusters as it layout on chip.
- * Cluster ids are in format: logical (physical)
- *
- * ----------------- ------------------
- * 3 | 5 (3) 7 (7) | | 13 (11) 15 (15)|
- *
- * 2 | 4 (2) 6 (6) | | 12 (10) 14 (14)|
- * ----------------- ------------------
- * 1 | 1 (1) 3 (5) | | 9 (9) 11 (13)|
- *
- * 0 | 0 (0) 2 (4) | | 8 (8) 10 (12)|
- * ----------------- ------------------
- * 0 1 2 3
- */
-static inline int nps_cluster_logic_to_phys(int cluster)
-{
-#ifdef __arc__
- __asm__ __volatile__(
- " mov r3,%0\n"
- " .short %1\n"
- " .word %2\n"
- " mov %0,r3\n"
- : "+r"(cluster)
- : "i"(CTOP_INST_MOV2B_FLIP_R3_B1_B2_INST),
- "i"(CTOP_INST_MOV2B_FLIP_R3_B1_B2_LIMM)
- : "r3");
-#endif
-
- return cluster;
-}
-
-#define NPS_CPU_TO_CLUSTER_NUM(cpu) \
- ({ struct global_id gid; gid.value = cpu; \
- nps_cluster_logic_to_phys(gid.cluster); })
-
-struct nps_host_reg_address {
- union {
- struct {
- u32 base:8, cl_x:4, cl_y:4,
- blkid:6, reg:8, __reserved:2;
- };
- u32 value;
- };
-};
-
-struct nps_host_reg_address_non_cl {
- union {
- struct {
- u32 base:7, blkid:11, reg:12, __reserved:2;
- };
- u32 value;
- };
-};
-
-static inline void *nps_host_reg_non_cl(u32 blkid, u32 reg)
-{
- struct nps_host_reg_address_non_cl reg_address;
-
- reg_address.value = NPS_HOST_REG_BASE;
- reg_address.blkid = blkid;
- reg_address.reg = reg;
-
- return (void *)reg_address.value;
-}
-
-static inline void *nps_host_reg(u32 cpu, u32 blkid, u32 reg)
-{
- struct nps_host_reg_address reg_address;
- u32 cl = NPS_CPU_TO_CLUSTER_NUM(cpu);
-
- reg_address.value = NPS_HOST_REG_BASE;
- reg_address.cl_x = (cl >> 2) & 0x3;
- reg_address.cl_y = cl & 0x3;
- reg_address.blkid = blkid;
- reg_address.reg = reg;
-
- return (void *)reg_address.value;
-}
-#endif /* __ASSEMBLY__ */
-
-#endif /* SOC_NPS_COMMON_H */
diff --git a/include/soc/nps/mtm.h b/include/soc/nps/mtm.h
deleted file mode 100644
index d2f5e7e3703e..000000000000
--- a/include/soc/nps/mtm.h
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
- * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#ifndef SOC_NPS_MTM_H
-#define SOC_NPS_MTM_H
-
-#define CTOP_INST_HWSCHD_OFF_R3 0x3B6F00BF
-#define CTOP_INST_HWSCHD_RESTORE_R3 0x3E6F70C3
-
-static inline void hw_schd_save(unsigned int *flags)
-{
- __asm__ __volatile__(
- " .word %1\n"
- " st r3,[%0]\n"
- :
- : "r"(flags), "i"(CTOP_INST_HWSCHD_OFF_R3)
- : "r3", "memory");
-}
-
-static inline void hw_schd_restore(unsigned int flags)
-{
- __asm__ __volatile__(
- " mov r3, %0\n"
- " .word %1\n"
- :
- : "r"(flags), "i"(CTOP_INST_HWSCHD_RESTORE_R3)
- : "r3");
-}
-
-#endif /* SOC_NPS_MTM_H */
diff --git a/include/trace/events/afs.h b/include/trace/events/afs.h
index 4eef374d4413..4a5cc8c64be3 100644
--- a/include/trace/events/afs.h
+++ b/include/trace/events/afs.h
@@ -231,6 +231,7 @@ enum afs_file_error {
afs_file_error_dir_bad_magic,
afs_file_error_dir_big,
afs_file_error_dir_missing_page,
+ afs_file_error_dir_name_too_long,
afs_file_error_dir_over_end,
afs_file_error_dir_small,
afs_file_error_dir_unmarked_ext,
@@ -488,6 +489,7 @@ enum afs_cb_break_reason {
EM(afs_file_error_dir_bad_magic, "DIR_BAD_MAGIC") \
EM(afs_file_error_dir_big, "DIR_BIG") \
EM(afs_file_error_dir_missing_page, "DIR_MISSING_PAGE") \
+ EM(afs_file_error_dir_name_too_long, "DIR_NAME_TOO_LONG") \
EM(afs_file_error_dir_over_end, "DIR_ENT_OVER_END") \
EM(afs_file_error_dir_small, "DIR_SMALL") \
EM(afs_file_error_dir_unmarked_ext, "DIR_UNMARKED_EXT") \
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index 5039af667645..cbe3e152d24c 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -366,7 +366,7 @@ TRACE_EVENT(sched_process_wait,
);
/*
- * Tracepoint for do_fork:
+ * Tracepoint for kernel_clone:
*/
TRACE_EVENT(sched_process_fork,
diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h
index 58994e013022..6f89c27265f5 100644
--- a/include/trace/events/sunrpc.h
+++ b/include/trace/events/sunrpc.h
@@ -1424,13 +1424,61 @@ TRACE_EVENT(rpcb_unregister,
)
);
+/* Record an xdr_buf containing a fully-formed RPC message */
+DECLARE_EVENT_CLASS(svc_xdr_msg_class,
+ TP_PROTO(
+ const struct xdr_buf *xdr
+ ),
+
+ TP_ARGS(xdr),
+
+ TP_STRUCT__entry(
+ __field(u32, xid)
+ __field(const void *, head_base)
+ __field(size_t, head_len)
+ __field(const void *, tail_base)
+ __field(size_t, tail_len)
+ __field(unsigned int, page_len)
+ __field(unsigned int, msg_len)
+ ),
+
+ TP_fast_assign(
+ __be32 *p = (__be32 *)xdr->head[0].iov_base;
+
+ __entry->xid = be32_to_cpu(*p);
+ __entry->head_base = p;
+ __entry->head_len = xdr->head[0].iov_len;
+ __entry->tail_base = xdr->tail[0].iov_base;
+ __entry->tail_len = xdr->tail[0].iov_len;
+ __entry->page_len = xdr->page_len;
+ __entry->msg_len = xdr->len;
+ ),
+
+ TP_printk("xid=0x%08x head=[%p,%zu] page=%u tail=[%p,%zu] len=%u",
+ __entry->xid,
+ __entry->head_base, __entry->head_len, __entry->page_len,
+ __entry->tail_base, __entry->tail_len, __entry->msg_len
+ )
+);
+
+#define DEFINE_SVCXDRMSG_EVENT(name) \
+ DEFINE_EVENT(svc_xdr_msg_class, \
+ svc_xdr_##name, \
+ TP_PROTO( \
+ const struct xdr_buf *xdr \
+ ), \
+ TP_ARGS(xdr))
+
+DEFINE_SVCXDRMSG_EVENT(recvfrom);
+
+/* Record an xdr_buf containing arbitrary data, tagged with an XID */
DECLARE_EVENT_CLASS(svc_xdr_buf_class,
TP_PROTO(
- const struct svc_rqst *rqst,
+ __be32 xid,
const struct xdr_buf *xdr
),
- TP_ARGS(rqst, xdr),
+ TP_ARGS(xid, xdr),
TP_STRUCT__entry(
__field(u32, xid)
@@ -1443,7 +1491,7 @@ DECLARE_EVENT_CLASS(svc_xdr_buf_class,
),
TP_fast_assign(
- __entry->xid = be32_to_cpu(rqst->rq_xid);
+ __entry->xid = be32_to_cpu(xid);
__entry->head_base = xdr->head[0].iov_base;
__entry->head_len = xdr->head[0].iov_len;
__entry->tail_base = xdr->tail[0].iov_base;
@@ -1463,12 +1511,11 @@ DECLARE_EVENT_CLASS(svc_xdr_buf_class,
DEFINE_EVENT(svc_xdr_buf_class, \
svc_xdr_##name, \
TP_PROTO( \
- const struct svc_rqst *rqst, \
+ __be32 xid, \
const struct xdr_buf *xdr \
), \
- TP_ARGS(rqst, xdr))
+ TP_ARGS(xid, xdr))
-DEFINE_SVCXDRBUF_EVENT(recvfrom);
DEFINE_SVCXDRBUF_EVENT(sendto);
/*
diff --git a/include/uapi/drm/drm.h b/include/uapi/drm/drm.h
index 808b48a93330..0827037c5484 100644
--- a/include/uapi/drm/drm.h
+++ b/include/uapi/drm/drm.h
@@ -1,11 +1,10 @@
-/**
- * \file drm.h
+/*
* Header for the Direct Rendering Manager
*
- * \author Rickard E. (Rik) Faith <[email protected]>
+ * Author: Rickard E. (Rik) Faith <[email protected]>
*
- * \par Acknowledgments:
- * Dec 1999, Richard Henderson <[email protected]>, move to generic \c cmpxchg.
+ * Acknowledgments:
+ * Dec 1999, Richard Henderson <[email protected]>, move to generic cmpxchg.
*/
/*
@@ -85,7 +84,7 @@ typedef unsigned int drm_context_t;
typedef unsigned int drm_drawable_t;
typedef unsigned int drm_magic_t;
-/**
+/*
* Cliprect.
*
* \warning: If you change this structure, make sure you change
@@ -101,7 +100,7 @@ struct drm_clip_rect {
unsigned short y2;
};
-/**
+/*
* Drawable information.
*/
struct drm_drawable_info {
@@ -109,7 +108,7 @@ struct drm_drawable_info {
struct drm_clip_rect *rects;
};
-/**
+/*
* Texture region,
*/
struct drm_tex_region {
@@ -120,7 +119,7 @@ struct drm_tex_region {
unsigned int age;
};
-/**
+/*
* Hardware lock.
*
* The lock structure is a simple cache-line aligned integer. To avoid
@@ -132,7 +131,7 @@ struct drm_hw_lock {
char padding[60]; /**< Pad to cache line */
};
-/**
+/*
* DRM_IOCTL_VERSION ioctl argument type.
*
* \sa drmGetVersion().
@@ -149,7 +148,7 @@ struct drm_version {
char __user *desc; /**< User-space buffer to hold desc */
};
-/**
+/*
* DRM_IOCTL_GET_UNIQUE ioctl argument type.
*
* \sa drmGetBusid() and drmSetBusId().
@@ -168,7 +167,7 @@ struct drm_block {
int unused;
};
-/**
+/*
* DRM_IOCTL_CONTROL ioctl argument type.
*
* \sa drmCtlInstHandler() and drmCtlUninstHandler().
@@ -183,7 +182,7 @@ struct drm_control {
int irq;
};
-/**
+/*
* Type of memory to map.
*/
enum drm_map_type {
@@ -195,7 +194,7 @@ enum drm_map_type {
_DRM_CONSISTENT = 5 /**< Consistent memory for PCI DMA */
};
-/**
+/*
* Memory mapping flags.
*/
enum drm_map_flags {
@@ -214,7 +213,7 @@ struct drm_ctx_priv_map {
void *handle; /**< Handle of map */
};
-/**
+/*
* DRM_IOCTL_GET_MAP, DRM_IOCTL_ADD_MAP and DRM_IOCTL_RM_MAP ioctls
* argument type.
*
@@ -231,7 +230,7 @@ struct drm_map {
/* Private data */
};
-/**
+/*
* DRM_IOCTL_GET_CLIENT ioctl argument type.
*/
struct drm_client {
@@ -263,7 +262,7 @@ enum drm_stat_type {
/* Add to the *END* of the list */
};
-/**
+/*
* DRM_IOCTL_GET_STATS ioctl argument type.
*/
struct drm_stats {
@@ -274,7 +273,7 @@ struct drm_stats {
} data[15];
};
-/**
+/*
* Hardware locking flags.
*/
enum drm_lock_flags {
@@ -289,7 +288,7 @@ enum drm_lock_flags {
_DRM_HALT_CUR_QUEUES = 0x20 /**< Halt all current queues */
};
-/**
+/*
* DRM_IOCTL_LOCK, DRM_IOCTL_UNLOCK and DRM_IOCTL_FINISH ioctl argument type.
*
* \sa drmGetLock() and drmUnlock().
@@ -299,7 +298,7 @@ struct drm_lock {
enum drm_lock_flags flags;
};
-/**
+/*
* DMA flags
*
* \warning
@@ -328,7 +327,7 @@ enum drm_dma_flags {
_DRM_DMA_LARGER_OK = 0x40 /**< Larger-than-requested buffers OK */
};
-/**
+/*
* DRM_IOCTL_ADD_BUFS and DRM_IOCTL_MARK_BUFS ioctl argument type.
*
* \sa drmAddBufs().
@@ -351,7 +350,7 @@ struct drm_buf_desc {
*/
};
-/**
+/*
* DRM_IOCTL_INFO_BUFS ioctl argument type.
*/
struct drm_buf_info {
@@ -359,7 +358,7 @@ struct drm_buf_info {
struct drm_buf_desc __user *list;
};
-/**
+/*
* DRM_IOCTL_FREE_BUFS ioctl argument type.
*/
struct drm_buf_free {
@@ -367,7 +366,7 @@ struct drm_buf_free {
int __user *list;
};
-/**
+/*
* Buffer information
*
* \sa drm_buf_map.
@@ -379,7 +378,7 @@ struct drm_buf_pub {
void __user *address; /**< Address of buffer */
};
-/**
+/*
* DRM_IOCTL_MAP_BUFS ioctl argument type.
*/
struct drm_buf_map {
@@ -392,7 +391,7 @@ struct drm_buf_map {
struct drm_buf_pub __user *list; /**< Buffer information */
};
-/**
+/*
* DRM_IOCTL_DMA ioctl argument type.
*
* Indices here refer to the offset into the buffer list in drm_buf_get.
@@ -417,7 +416,7 @@ enum drm_ctx_flags {
_DRM_CONTEXT_2DONLY = 0x02
};
-/**
+/*
* DRM_IOCTL_ADD_CTX ioctl argument type.
*
* \sa drmCreateContext() and drmDestroyContext().
@@ -427,7 +426,7 @@ struct drm_ctx {
enum drm_ctx_flags flags;
};
-/**
+/*
* DRM_IOCTL_RES_CTX ioctl argument type.
*/
struct drm_ctx_res {
@@ -435,14 +434,14 @@ struct drm_ctx_res {
struct drm_ctx __user *contexts;
};
-/**
+/*
* DRM_IOCTL_ADD_DRAW and DRM_IOCTL_RM_DRAW ioctl argument type.
*/
struct drm_draw {
drm_drawable_t handle;
};
-/**
+/*
* DRM_IOCTL_UPDATE_DRAW ioctl argument type.
*/
typedef enum {
@@ -456,14 +455,14 @@ struct drm_update_draw {
unsigned long long data;
};
-/**
+/*
* DRM_IOCTL_GET_MAGIC and DRM_IOCTL_AUTH_MAGIC ioctl argument type.
*/
struct drm_auth {
drm_magic_t magic;
};
-/**
+/*
* DRM_IOCTL_IRQ_BUSID ioctl argument type.
*
* \sa drmGetInterruptFromBusID().
@@ -505,7 +504,7 @@ struct drm_wait_vblank_reply {
long tval_usec;
};
-/**
+/*
* DRM_IOCTL_WAIT_VBLANK ioctl argument type.
*
* \sa drmWaitVBlank().
@@ -518,7 +517,7 @@ union drm_wait_vblank {
#define _DRM_PRE_MODESET 1
#define _DRM_POST_MODESET 2
-/**
+/*
* DRM_IOCTL_MODESET_CTL ioctl argument type
*
* \sa drmModesetCtl().
@@ -528,7 +527,7 @@ struct drm_modeset_ctl {
__u32 cmd;
};
-/**
+/*
* DRM_IOCTL_AGP_ENABLE ioctl argument type.
*
* \sa drmAgpEnable().
@@ -537,7 +536,7 @@ struct drm_agp_mode {
unsigned long mode; /**< AGP mode */
};
-/**
+/*
* DRM_IOCTL_AGP_ALLOC and DRM_IOCTL_AGP_FREE ioctls argument type.
*
* \sa drmAgpAlloc() and drmAgpFree().
@@ -549,7 +548,7 @@ struct drm_agp_buffer {
unsigned long physical; /**< Physical used by i810 */
};
-/**
+/*
* DRM_IOCTL_AGP_BIND and DRM_IOCTL_AGP_UNBIND ioctls argument type.
*
* \sa drmAgpBind() and drmAgpUnbind().
@@ -559,7 +558,7 @@ struct drm_agp_binding {
unsigned long offset; /**< In bytes -- will round to page boundary */
};
-/**
+/*
* DRM_IOCTL_AGP_INFO ioctl argument type.
*
* \sa drmAgpVersionMajor(), drmAgpVersionMinor(), drmAgpGetMode(),
@@ -580,7 +579,7 @@ struct drm_agp_info {
unsigned short id_device;
};
-/**
+/*
* DRM_IOCTL_SG_ALLOC ioctl argument type.
*/
struct drm_scatter_gather {
@@ -588,7 +587,7 @@ struct drm_scatter_gather {
unsigned long handle; /**< Used for mapping / unmapping */
};
-/**
+/*
* DRM_IOCTL_SET_VERSION ioctl argument type.
*/
struct drm_set_version {
@@ -598,14 +597,14 @@ struct drm_set_version {
int drm_dd_minor;
};
-/** DRM_IOCTL_GEM_CLOSE ioctl argument type */
+/* DRM_IOCTL_GEM_CLOSE ioctl argument type */
struct drm_gem_close {
/** Handle of the object to be closed. */
__u32 handle;
__u32 pad;
};
-/** DRM_IOCTL_GEM_FLINK ioctl argument type */
+/* DRM_IOCTL_GEM_FLINK ioctl argument type */
struct drm_gem_flink {
/** Handle for the object being named */
__u32 handle;
@@ -614,7 +613,7 @@ struct drm_gem_flink {
__u32 name;
};
-/** DRM_IOCTL_GEM_OPEN ioctl argument type */
+/* DRM_IOCTL_GEM_OPEN ioctl argument type */
struct drm_gem_open {
/** Name of object being opened */
__u32 name;
@@ -652,7 +651,7 @@ struct drm_gem_open {
#define DRM_CAP_SYNCOBJ 0x13
#define DRM_CAP_SYNCOBJ_TIMELINE 0x14
-/** DRM_IOCTL_GET_CAP ioctl argument type */
+/* DRM_IOCTL_GET_CAP ioctl argument type */
struct drm_get_cap {
__u64 capability;
__u64 value;
@@ -678,7 +677,9 @@ struct drm_get_cap {
/**
* DRM_CLIENT_CAP_ATOMIC
*
- * If set to 1, the DRM core will expose atomic properties to userspace
+ * If set to 1, the DRM core will expose atomic properties to userspace. This
+ * implicitly enables &DRM_CLIENT_CAP_UNIVERSAL_PLANES and
+ * &DRM_CLIENT_CAP_ASPECT_RATIO.
*/
#define DRM_CLIENT_CAP_ATOMIC 3
@@ -698,7 +699,7 @@ struct drm_get_cap {
*/
#define DRM_CLIENT_CAP_WRITEBACK_CONNECTORS 5
-/** DRM_IOCTL_SET_CLIENT_CAP ioctl argument type */
+/* DRM_IOCTL_SET_CLIENT_CAP ioctl argument type */
struct drm_set_client_cap {
__u64 capability;
__u64 value;
@@ -950,7 +951,7 @@ extern "C" {
#define DRM_IOCTL_MODE_GETFB2 DRM_IOWR(0xCE, struct drm_mode_fb_cmd2)
-/**
+/*
* Device specific ioctls should only be in their respective headers
* The device specific ioctl range is from 0x40 to 0x9f.
* Generic IOCTLS restart at 0xA0.
@@ -961,7 +962,7 @@ extern "C" {
#define DRM_COMMAND_BASE 0x40
#define DRM_COMMAND_END 0xA0
-/**
+/*
* Header for events written back to userspace on the drm fd. The
* type defines the type of event, the length specifies the total
* length of the event (including the header), and user_data is
diff --git a/include/uapi/drm/drm_fourcc.h b/include/uapi/drm/drm_fourcc.h
index 723c8e23ca87..f76de49c768f 100644
--- a/include/uapi/drm/drm_fourcc.h
+++ b/include/uapi/drm/drm_fourcc.h
@@ -528,6 +528,25 @@ extern "C" {
#define I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS fourcc_mod_code(INTEL, 7)
/*
+ * Intel Color Control Surface with Clear Color (CCS) for Gen-12 render
+ * compression.
+ *
+ * The main surface is Y-tiled and is at plane index 0 whereas CCS is linear
+ * and at index 1. The clear color is stored at index 2, and the pitch should
+ * be ignored. The clear color structure is 256 bits. The first 128 bits
+ * represents Raw Clear Color Red, Green, Blue and Alpha color each represented
+ * by 32 bits. The raw clear color is consumed by the 3d engine and generates
+ * the converted clear color of size 64 bits. The first 32 bits store the Lower
+ * Converted Clear Color value and the next 32 bits store the Higher Converted
+ * Clear Color value when applicable. The Converted Clear Color values are
+ * consumed by the DE. The last 64 bits are used to store Color Discard Enable
+ * and Depth Clear Value Valid which are ignored by the DE. A CCS cache line
+ * corresponds to an area of 4x1 tiles in the main surface. The main surface
+ * pitch is required to be a multiple of 4 tile widths.
+ */
+#define I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC fourcc_mod_code(INTEL, 8)
+
+/*
* Tiled, NV12MT, grouped in 64 (pixels) x 32 (lines) -sized macroblocks
*
* Macroblocks are laid in a Z-shape, and each pixel data is following the
@@ -1036,9 +1055,9 @@ drm_fourcc_canonicalize_nvidia_format_mod(__u64 modifier)
* Not all combinations are valid, and different SoCs may support different
* combinations of layout and options.
*/
-#define __fourcc_mod_amlogic_layout_mask 0xf
+#define __fourcc_mod_amlogic_layout_mask 0xff
#define __fourcc_mod_amlogic_options_shift 8
-#define __fourcc_mod_amlogic_options_mask 0xf
+#define __fourcc_mod_amlogic_options_mask 0xff
#define DRM_FORMAT_MOD_AMLOGIC_FBC(__layout, __options) \
fourcc_mod_code(AMLOGIC, \
diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h
index b49fbf2bdc40..1c064627e6c3 100644
--- a/include/uapi/drm/drm_mode.h
+++ b/include/uapi/drm/drm_mode.h
@@ -414,15 +414,12 @@ enum drm_mode_subconnector {
*
* If the @count_modes field is set to zero, the kernel will perform a forced
* probe on the connector to refresh the connector status, modes and EDID.
- * A forced-probe can be slow and the ioctl will block. A force-probe can cause
- * flickering and temporary freezes, so it should not be performed
- * automatically.
+ * A forced-probe can be slow, might cause flickering and the ioctl will block.
*
- * User-space shouldn't need to force-probe connectors in general: the kernel
- * will automatically take care of probing connectors that don't support
- * hot-plug detection when appropriate. However, user-space may force-probe
- * connectors on user request (e.g. clicking a "Scan connectors" button, or
- * opening a UI to manage screens).
+ * User-space needs to force-probe connectors to ensure their metadata is
+ * up-to-date at startup and after receiving a hot-plug event. User-space
+ * may perform a forced-probe when the user explicitly requests it. User-space
+ * shouldn't perform a forced-probe in other situations.
*/
struct drm_mode_get_connector {
/** @encoders_ptr: Pointer to ``__u32`` array of object IDs. */
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
index fa1f3d62f9a6..1987e2ea79a3 100644
--- a/include/uapi/drm/i915_drm.h
+++ b/include/uapi/drm/i915_drm.h
@@ -177,8 +177,9 @@ enum drm_i915_pmu_engine_sample {
#define I915_PMU_REQUESTED_FREQUENCY __I915_PMU_OTHER(1)
#define I915_PMU_INTERRUPTS __I915_PMU_OTHER(2)
#define I915_PMU_RC6_RESIDENCY __I915_PMU_OTHER(3)
+#define I915_PMU_SOFTWARE_GT_AWAKE_TIME __I915_PMU_OTHER(4)
-#define I915_PMU_LAST I915_PMU_RC6_RESIDENCY
+#define I915_PMU_LAST /* Deprecated - do not use */ I915_PMU_RC6_RESIDENCY
/* Each region is a minimum of 16k, and there are at most 255 of them.
*/
diff --git a/include/uapi/linux/bcache.h b/include/uapi/linux/bcache.h
index 52e8bcb33981..cf7399f03b71 100644
--- a/include/uapi/linux/bcache.h
+++ b/include/uapi/linux/bcache.h
@@ -213,7 +213,7 @@ struct cache_sb_disk {
__le16 keys;
};
__le64 d[SB_JOURNAL_BUCKETS]; /* journal buckets */
- __le16 bucket_size_hi;
+ __le16 obso_bucket_size_hi; /* obsoleted */
};
/*
diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h
index 874cc12a34d9..82708c6db432 100644
--- a/include/uapi/linux/if_link.h
+++ b/include/uapi/linux/if_link.h
@@ -75,8 +75,9 @@ struct rtnl_link_stats {
*
* @rx_dropped: Number of packets received but not processed,
* e.g. due to lack of resources or unsupported protocol.
- * For hardware interfaces this counter should not include packets
- * dropped by the device which are counted separately in
+ * For hardware interfaces this counter may include packets discarded
+ * due to L2 address filtering but should not include packets dropped
+ * by the device due to buffer exhaustion which are counted separately in
* @rx_missed_errors (since procfs folds those two counters together).
*
* @tx_dropped: Number of packets dropped on their way to transmission,
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index 886802b8ffba..374c67875cdb 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -251,6 +251,7 @@ struct kvm_hyperv_exit {
#define KVM_EXIT_X86_RDMSR 29
#define KVM_EXIT_X86_WRMSR 30
#define KVM_EXIT_DIRTY_RING_FULL 31
+#define KVM_EXIT_AP_RESET_HOLD 32
/* For KVM_EXIT_INTERNAL_ERROR */
/* Emulate instruction failed. */
@@ -573,6 +574,7 @@ struct kvm_vapic_addr {
#define KVM_MP_STATE_CHECK_STOP 6
#define KVM_MP_STATE_OPERATING 7
#define KVM_MP_STATE_LOAD 8
+#define KVM_MP_STATE_AP_RESET_HOLD 9
struct kvm_mp_state {
__u32 mp_state;
diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h
index 28b6ee53305f..b1633e7ba529 100644
--- a/include/uapi/linux/netfilter/nf_tables.h
+++ b/include/uapi/linux/netfilter/nf_tables.h
@@ -293,6 +293,7 @@ enum nft_rule_compat_attributes {
* @NFT_SET_EVAL: set can be updated from the evaluation path
* @NFT_SET_OBJECT: set contains stateful objects
* @NFT_SET_CONCAT: set contains a concatenation
+ * @NFT_SET_EXPR: set contains expressions
*/
enum nft_set_flags {
NFT_SET_ANONYMOUS = 0x1,
@@ -303,6 +304,7 @@ enum nft_set_flags {
NFT_SET_EVAL = 0x20,
NFT_SET_OBJECT = 0x40,
NFT_SET_CONCAT = 0x80,
+ NFT_SET_EXPR = 0x100,
};
/**
@@ -706,6 +708,7 @@ enum nft_dynset_ops {
enum nft_dynset_flags {
NFT_DYNSET_F_INV = (1 << 0),
+ NFT_DYNSET_F_EXPR = (1 << 1),
};
/**
diff --git a/include/uapi/linux/ppp-ioctl.h b/include/uapi/linux/ppp-ioctl.h
index 8dbecb3ad036..1cc5ce0ae062 100644
--- a/include/uapi/linux/ppp-ioctl.h
+++ b/include/uapi/linux/ppp-ioctl.h
@@ -116,7 +116,7 @@ struct pppol2tp_ioc_stats {
#define PPPIOCGCHAN _IOR('t', 55, int) /* get ppp channel number */
#define PPPIOCGL2TPSTATS _IOR('t', 54, struct pppol2tp_ioc_stats)
#define PPPIOCBRIDGECHAN _IOW('t', 53, int) /* bridge one channel to another */
-#define PPPIOCUNBRIDGECHAN _IO('t', 54) /* unbridge channel */
+#define PPPIOCUNBRIDGECHAN _IO('t', 52) /* unbridge channel */
#define SIOCGPPPSTATS (SIOCDEVPRIVATE + 0)
#define SIOCGPPPVER (SIOCDEVPRIVATE + 1) /* NEVER change this!! */
diff --git a/include/uapi/misc/habanalabs.h b/include/uapi/misc/habanalabs.h
index 8c15a7d336a0..dba3827c43ca 100644
--- a/include/uapi/misc/habanalabs.h
+++ b/include/uapi/misc/habanalabs.h
@@ -279,6 +279,7 @@ enum hl_device_status {
* HL_INFO_CLK_THROTTLE_REASON - Retrieve clock throttling reason
* HL_INFO_SYNC_MANAGER - Retrieve sync manager info per dcore
* HL_INFO_TOTAL_ENERGY - Retrieve total energy consumption
+ * HL_INFO_PLL_FREQUENCY - Retrieve PLL frequency
*/
#define HL_INFO_HW_IP_INFO 0
#define HL_INFO_HW_EVENTS 1
@@ -425,6 +426,8 @@ struct hl_info_sync_manager {
* @ctx_device_in_reset_drop_cnt: context dropped due to device in reset
* @total_max_cs_in_flight_drop_cnt: total dropped due to maximum CS in-flight
* @ctx_max_cs_in_flight_drop_cnt: context dropped due to maximum CS in-flight
+ * @total_validation_drop_cnt: total dropped due to validation error
+ * @ctx_validation_drop_cnt: context dropped due to validation error
*/
struct hl_info_cs_counters {
__u64 total_out_of_mem_drop_cnt;
@@ -437,6 +440,8 @@ struct hl_info_cs_counters {
__u64 ctx_device_in_reset_drop_cnt;
__u64 total_max_cs_in_flight_drop_cnt;
__u64 ctx_max_cs_in_flight_drop_cnt;
+ __u64 total_validation_drop_cnt;
+ __u64 ctx_validation_drop_cnt;
};
enum gaudi_dcores {
diff --git a/include/video/sstfb.h b/include/video/sstfb.h
index 28384f354773..d4a5e41d1173 100644
--- a/include/video/sstfb.h
+++ b/include/video/sstfb.h
@@ -23,7 +23,7 @@
# define SST_DEBUG_FUNC 1
# define SST_DEBUG_VAR 1
#else
-# define dprintk(X...)
+# define dprintk(X...) no_printk(X)
# define SST_DEBUG_REG 0
# define SST_DEBUG_FUNC 0
# define SST_DEBUG_VAR 0
@@ -48,7 +48,7 @@
#if (SST_DEBUG_FUNC > 1)
# define f_ddprintk(X...) dprintk(" " X)
#else
-# define f_ddprintk(X...)
+# define f_ddprintk(X...) no_printk(X)
#endif
#if (SST_DEBUG_FUNC > 2)
# define f_dddprintk(X...) dprintk(" " X)
diff --git a/include/xen/xenbus.h b/include/xen/xenbus.h
index 00c7235ae93e..2c43b0ef1e4d 100644
--- a/include/xen/xenbus.h
+++ b/include/xen/xenbus.h
@@ -192,7 +192,7 @@ void xs_suspend_cancel(void);
struct work_struct;
-void xenbus_probe(struct work_struct *);
+void xenbus_probe(void);
#define XENBUS_IS_ERR_READ(str) ({ \
if (!IS_ERR(str) && strlen(str) == 0) { \
diff --git a/init/main.c b/init/main.c
index 6feee7f11eaf..c68d784376ca 100644
--- a/init/main.c
+++ b/init/main.c
@@ -1480,14 +1480,8 @@ void __init console_on_rootfs(void)
struct file *file = filp_open("/dev/console", O_RDWR, 0);
if (IS_ERR(file)) {
- pr_err("Warning: unable to open an initial console. Fallback to ttynull.\n");
- register_ttynull_console();
-
- file = filp_open("/dev/console", O_RDWR, 0);
- if (IS_ERR(file)) {
- pr_err("Warning: Failed to add ttynull console. No stdin, stdout, and stderr for the init process!\n");
- return;
- }
+ pr_err("Warning: unable to open an initial console.\n");
+ return;
}
init_dup(file);
init_dup(file);
@@ -1518,6 +1512,7 @@ static noinline void __init kernel_init_freeable(void)
init_mm_internals();
+ rcu_init_tasks_generic();
do_pre_smp_initcalls();
lockup_detector_init();
diff --git a/kernel/bpf/bpf_inode_storage.c b/kernel/bpf/bpf_inode_storage.c
index 6edff97ad594..2f0597320b6d 100644
--- a/kernel/bpf/bpf_inode_storage.c
+++ b/kernel/bpf/bpf_inode_storage.c
@@ -176,14 +176,14 @@ BPF_CALL_4(bpf_inode_storage_get, struct bpf_map *, map, struct inode *, inode,
* bpf_local_storage_update expects the owner to have a
* valid storage pointer.
*/
- if (!inode_storage_ptr(inode))
+ if (!inode || !inode_storage_ptr(inode))
return (unsigned long)NULL;
sdata = inode_storage_lookup(inode, map, true);
if (sdata)
return (unsigned long)sdata->data;
- /* This helper must only called from where the inode is gurranteed
+ /* This helper must only called from where the inode is guaranteed
* to have a refcount and cannot be freed.
*/
if (flags & BPF_LOCAL_STORAGE_GET_F_CREATE) {
@@ -200,7 +200,10 @@ BPF_CALL_4(bpf_inode_storage_get, struct bpf_map *, map, struct inode *, inode,
BPF_CALL_2(bpf_inode_storage_delete,
struct bpf_map *, map, struct inode *, inode)
{
- /* This helper must only called from where the inode is gurranteed
+ if (!inode)
+ return -EINVAL;
+
+ /* This helper must only called from where the inode is guaranteed
* to have a refcount and cannot be freed.
*/
return inode_storage_delete(inode, map);
diff --git a/kernel/bpf/bpf_task_storage.c b/kernel/bpf/bpf_task_storage.c
index 4ef1959a78f2..e0da0258b732 100644
--- a/kernel/bpf/bpf_task_storage.c
+++ b/kernel/bpf/bpf_task_storage.c
@@ -218,7 +218,7 @@ BPF_CALL_4(bpf_task_storage_get, struct bpf_map *, map, struct task_struct *,
* bpf_local_storage_update expects the owner to have a
* valid storage pointer.
*/
- if (!task_storage_ptr(task))
+ if (!task || !task_storage_ptr(task))
return (unsigned long)NULL;
sdata = task_storage_lookup(task, map, true);
@@ -243,6 +243,9 @@ BPF_CALL_4(bpf_task_storage_get, struct bpf_map *, map, struct task_struct *,
BPF_CALL_2(bpf_task_storage_delete, struct bpf_map *, map, struct task_struct *,
task)
{
+ if (!task)
+ return -EINVAL;
+
/* This helper must only be called from places where the lifetime of the task
* is guaranteed. Either by being refcounted or by being protected
* by an RCU read-side critical section.
diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c
index 8d6bdb4f4d61..84a36ee4a4c2 100644
--- a/kernel/bpf/btf.c
+++ b/kernel/bpf/btf.c
@@ -4172,7 +4172,7 @@ static int btf_parse_hdr(struct btf_verifier_env *env)
return -ENOTSUPP;
}
- if (btf_data_size == hdr->hdr_len) {
+ if (!btf->base_btf && btf_data_size == hdr->hdr_len) {
btf_verifier_log(env, "No data");
return -EINVAL;
}
diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c
index 6ec088a96302..96555a8a2c54 100644
--- a/kernel/bpf/cgroup.c
+++ b/kernel/bpf/cgroup.c
@@ -1391,12 +1391,13 @@ int __cgroup_bpf_run_filter_setsockopt(struct sock *sk, int *level,
if (ctx.optlen != 0) {
*optlen = ctx.optlen;
*kernel_optval = ctx.optval;
+ /* export and don't free sockopt buf */
+ return 0;
}
}
out:
- if (ret)
- sockopt_free_buf(&ctx);
+ sockopt_free_buf(&ctx);
return ret;
}
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
index 7e848200cd26..c1ac7f964bc9 100644
--- a/kernel/bpf/hashtab.c
+++ b/kernel/bpf/hashtab.c
@@ -152,6 +152,7 @@ static void htab_init_buckets(struct bpf_htab *htab)
lockdep_set_class(&htab->buckets[i].lock,
&htab->lockdep_key);
}
+ cond_resched();
}
}
diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
index bd8a3183d030..41ca280b1dc1 100644
--- a/kernel/bpf/helpers.c
+++ b/kernel/bpf/helpers.c
@@ -108,7 +108,7 @@ BPF_CALL_2(bpf_map_peek_elem, struct bpf_map *, map, void *, value)
}
const struct bpf_func_proto bpf_map_peek_elem_proto = {
- .func = bpf_map_pop_elem,
+ .func = bpf_map_peek_elem,
.gpl_only = false,
.ret_type = RET_INTEGER,
.arg1_type = ARG_CONST_MAP_PTR,
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 4caf06fe4152..e5999d86c76e 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -17,7 +17,6 @@
#include <linux/fs.h>
#include <linux/license.h>
#include <linux/filter.h>
-#include <linux/version.h>
#include <linux/kernel.h>
#include <linux/idr.h>
#include <linux/cred.h>
@@ -2713,7 +2712,6 @@ out_unlock:
out_put_prog:
if (tgt_prog_fd && tgt_prog)
bpf_prog_put(tgt_prog);
- bpf_prog_put(prog);
return err;
}
@@ -2826,7 +2824,10 @@ static int bpf_raw_tracepoint_open(const union bpf_attr *attr)
tp_name = prog->aux->attach_func_name;
break;
}
- return bpf_tracing_prog_attach(prog, 0, 0);
+ err = bpf_tracing_prog_attach(prog, 0, 0);
+ if (err >= 0)
+ return err;
+ goto out_put_prog;
case BPF_PROG_TYPE_RAW_TRACEPOINT:
case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE:
if (strncpy_from_user(buf,
diff --git a/kernel/bpf/task_iter.c b/kernel/bpf/task_iter.c
index e73c07593024..175b7b42bfc4 100644
--- a/kernel/bpf/task_iter.c
+++ b/kernel/bpf/task_iter.c
@@ -37,7 +37,7 @@ retry:
if (!task) {
++*tid;
goto retry;
- } else if (skip_if_dup_files && task->tgid != task->pid &&
+ } else if (skip_if_dup_files && !thread_group_leader(task) &&
task->files == task->group_leader->files) {
put_task_struct(task);
task = NULL;
@@ -151,13 +151,14 @@ again:
curr_task = info->task;
curr_fd = info->fd;
} else {
- curr_task = task_seq_get_next(ns, &curr_tid, true);
- if (!curr_task) {
- info->task = NULL;
- return NULL;
- }
-
- /* set info->task and info->tid */
+ curr_task = task_seq_get_next(ns, &curr_tid, true);
+ if (!curr_task) {
+ info->task = NULL;
+ info->tid = curr_tid;
+ return NULL;
+ }
+
+ /* set info->task and info->tid */
info->task = curr_task;
if (curr_tid == info->tid) {
curr_fd = info->fd;
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 17270b8404f1..e7368c5eacb7 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -2217,6 +2217,8 @@ static bool is_spillable_regtype(enum bpf_reg_type type)
case PTR_TO_RDWR_BUF:
case PTR_TO_RDWR_BUF_OR_NULL:
case PTR_TO_PERCPU_BTF_ID:
+ case PTR_TO_MEM:
+ case PTR_TO_MEM_OR_NULL:
return true;
default:
return false;
@@ -5311,7 +5313,7 @@ static bool signed_add_overflows(s64 a, s64 b)
return res < a;
}
-static bool signed_add32_overflows(s64 a, s64 b)
+static bool signed_add32_overflows(s32 a, s32 b)
{
/* Do the add in u32, where overflow is well-defined */
s32 res = (s32)((u32)a + (u32)b);
@@ -5321,7 +5323,7 @@ static bool signed_add32_overflows(s64 a, s64 b)
return res < a;
}
-static bool signed_sub_overflows(s32 a, s32 b)
+static bool signed_sub_overflows(s64 a, s64 b)
{
/* Do the sub in u64, where overflow is well-defined */
s64 res = (s64)((u64)a - (u64)b);
@@ -5333,7 +5335,7 @@ static bool signed_sub_overflows(s32 a, s32 b)
static bool signed_sub32_overflows(s32 a, s32 b)
{
- /* Do the sub in u64, where overflow is well-defined */
+ /* Do the sub in u32, where overflow is well-defined */
s32 res = (s32)((u32)a - (u32)b);
if (b < 0)
diff --git a/kernel/cgroup/cgroup-v1.c b/kernel/cgroup/cgroup-v1.c
index 191c329e482a..32596fdbcd5b 100644
--- a/kernel/cgroup/cgroup-v1.c
+++ b/kernel/cgroup/cgroup-v1.c
@@ -908,6 +908,8 @@ int cgroup1_parse_param(struct fs_context *fc, struct fs_parameter *param)
opt = fs_parse(fc, cgroup1_fs_parameters, param, &result);
if (opt == -ENOPARAM) {
if (strcmp(param->key, "source") == 0) {
+ if (fc->source)
+ return invalf(fc, "Multiple sources not supported");
fc->source = param->string;
param->string = NULL;
return 0;
diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
index fefa21981027..613845769103 100644
--- a/kernel/cgroup/cgroup.c
+++ b/kernel/cgroup/cgroup.c
@@ -244,7 +244,7 @@ bool cgroup_ssid_enabled(int ssid)
*
* The default hierarchy is the v2 interface of cgroup and this function
* can be used to test whether a cgroup is on the default hierarchy for
- * cases where a subsystem should behave differnetly depending on the
+ * cases where a subsystem should behave differently depending on the
* interface version.
*
* List of changed behaviors:
@@ -262,7 +262,7 @@ bool cgroup_ssid_enabled(int ssid)
* "cgroup.procs" instead.
*
* - "cgroup.procs" is not sorted. pids will be unique unless they got
- * recycled inbetween reads.
+ * recycled in-between reads.
*
* - "release_agent" and "notify_on_release" are removed. Replacement
* notification mechanism will be implemented.
@@ -342,7 +342,7 @@ static bool cgroup_is_mixable(struct cgroup *cgrp)
return !cgroup_parent(cgrp);
}
-/* can @cgrp become a thread root? should always be true for a thread root */
+/* can @cgrp become a thread root? Should always be true for a thread root */
static bool cgroup_can_be_thread_root(struct cgroup *cgrp)
{
/* mixables don't care */
@@ -527,7 +527,7 @@ static struct cgroup_subsys_state *cgroup_e_css_by_mask(struct cgroup *cgrp,
* the root css is returned, so this function always returns a valid css.
*
* The returned css is not guaranteed to be online, and therefore it is the
- * callers responsiblity to tryget a reference for it.
+ * callers responsibility to try get a reference for it.
*/
struct cgroup_subsys_state *cgroup_e_css(struct cgroup *cgrp,
struct cgroup_subsys *ss)
@@ -699,7 +699,7 @@ EXPORT_SYMBOL_GPL(of_css);
; \
else
-/* walk live descendants in preorder */
+/* walk live descendants in pre order */
#define cgroup_for_each_live_descendant_pre(dsct, d_css, cgrp) \
css_for_each_descendant_pre((d_css), cgroup_css((cgrp), NULL)) \
if (({ lockdep_assert_held(&cgroup_mutex); \
@@ -933,7 +933,7 @@ void put_css_set_locked(struct css_set *cset)
WARN_ON_ONCE(!list_empty(&cset->threaded_csets));
- /* This css_set is dead. unlink it and release cgroup and css refs */
+ /* This css_set is dead. Unlink it and release cgroup and css refs */
for_each_subsys(ss, ssid) {
list_del(&cset->e_cset_node[ssid]);
css_put(cset->subsys[ssid]);
@@ -1058,7 +1058,7 @@ static struct css_set *find_existing_css_set(struct css_set *old_cset,
/*
* Build the set of subsystem state objects that we want to see in the
- * new css_set. while subsystems can change globally, the entries here
+ * new css_set. While subsystems can change globally, the entries here
* won't change, so no need for locking.
*/
for_each_subsys(ss, i) {
@@ -1148,7 +1148,7 @@ static void link_css_set(struct list_head *tmp_links, struct css_set *cset,
/*
* Always add links to the tail of the lists so that the lists are
- * in choronological order.
+ * in chronological order.
*/
list_move_tail(&link->cset_link, &cgrp->cset_links);
list_add_tail(&link->cgrp_link, &cset->cgrp_links);
@@ -3654,7 +3654,7 @@ static ssize_t cgroup_freeze_write(struct kernfs_open_file *of,
static int cgroup_file_open(struct kernfs_open_file *of)
{
- struct cftype *cft = of->kn->priv;
+ struct cftype *cft = of_cft(of);
if (cft->open)
return cft->open(of);
@@ -3663,7 +3663,7 @@ static int cgroup_file_open(struct kernfs_open_file *of)
static void cgroup_file_release(struct kernfs_open_file *of)
{
- struct cftype *cft = of->kn->priv;
+ struct cftype *cft = of_cft(of);
if (cft->release)
cft->release(of);
@@ -3674,7 +3674,7 @@ static ssize_t cgroup_file_write(struct kernfs_open_file *of, char *buf,
{
struct cgroup_namespace *ns = current->nsproxy->cgroup_ns;
struct cgroup *cgrp = of->kn->parent->priv;
- struct cftype *cft = of->kn->priv;
+ struct cftype *cft = of_cft(of);
struct cgroup_subsys_state *css;
int ret;
@@ -3724,7 +3724,7 @@ static ssize_t cgroup_file_write(struct kernfs_open_file *of, char *buf,
static __poll_t cgroup_file_poll(struct kernfs_open_file *of, poll_table *pt)
{
- struct cftype *cft = of->kn->priv;
+ struct cftype *cft = of_cft(of);
if (cft->poll)
return cft->poll(of, pt);
@@ -4134,7 +4134,7 @@ struct cgroup_subsys_state *css_next_child(struct cgroup_subsys_state *pos,
* implies that if we observe !CSS_RELEASED on @pos in this RCU
* critical section, the one pointed to by its next pointer is
* guaranteed to not have finished its RCU grace period even if we
- * have dropped rcu_read_lock() inbetween iterations.
+ * have dropped rcu_read_lock() in-between iterations.
*
* If @pos has CSS_RELEASED set, its next pointer can't be
* dereferenced; however, as each css is given a monotonically
@@ -4382,7 +4382,7 @@ static struct css_set *css_task_iter_next_css_set(struct css_task_iter *it)
}
/**
- * css_task_iter_advance_css_set - advance a task itererator to the next css_set
+ * css_task_iter_advance_css_set - advance a task iterator to the next css_set
* @it: the iterator to advance
*
* Advance @it to the next css_set to walk.
@@ -6308,7 +6308,7 @@ struct cgroup_subsys_state *css_from_id(int id, struct cgroup_subsys *ss)
*
* Find the cgroup at @path on the default hierarchy, increment its
* reference count and return it. Returns pointer to the found cgroup on
- * success, ERR_PTR(-ENOENT) if @path doens't exist and ERR_PTR(-ENOTDIR)
+ * success, ERR_PTR(-ENOENT) if @path doesn't exist and ERR_PTR(-ENOTDIR)
* if @path points to a non-directory.
*/
struct cgroup *cgroup_get_from_path(const char *path)
diff --git a/kernel/configs/android-recommended.config b/kernel/configs/android-recommended.config
index 53d688bdd894..eb0029c9a6a6 100644
--- a/kernel/configs/android-recommended.config
+++ b/kernel/configs/android-recommended.config
@@ -81,7 +81,6 @@ CONFIG_INPUT_JOYSTICK=y
CONFIG_INPUT_MISC=y
CONFIG_INPUT_TABLET=y
CONFIG_INPUT_UINPUT=y
-CONFIG_ION=y
CONFIG_JOYSTICK_XPAD=y
CONFIG_JOYSTICK_XPAD_FF=y
CONFIG_JOYSTICK_XPAD_LEDS=y
diff --git a/kernel/exit.c b/kernel/exit.c
index 3594291a8542..04029e35e69a 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -63,6 +63,7 @@
#include <linux/random.h>
#include <linux/rcuwait.h>
#include <linux/compat.h>
+#include <linux/io_uring.h>
#include <linux/uaccess.h>
#include <asm/unistd.h>
@@ -776,6 +777,7 @@ void __noreturn do_exit(long code)
schedule();
}
+ io_uring_files_cancel(tsk->files);
exit_signals(tsk); /* sets PF_EXITING */
/* sync mm's RSS info before statistics gathering */
diff --git a/kernel/fork.c b/kernel/fork.c
index 37720a6d04ea..d66cd1014211 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -819,9 +819,8 @@ void __init fork_init(void)
init_task.signal->rlim[RLIMIT_SIGPENDING] =
init_task.signal->rlim[RLIMIT_NPROC];
- for (i = 0; i < UCOUNT_COUNTS; i++) {
+ for (i = 0; i < UCOUNT_COUNTS; i++)
init_user_ns.ucount_max[i] = max_threads/2;
- }
#ifdef CONFIG_VMAP_STACK
cpuhp_setup_state(CPUHP_BP_PREPARE_DYN, "fork:vm_stack_cache",
@@ -1654,9 +1653,8 @@ static inline void init_task_pid_links(struct task_struct *task)
{
enum pid_type type;
- for (type = PIDTYPE_PID; type < PIDTYPE_MAX; ++type) {
+ for (type = PIDTYPE_PID; type < PIDTYPE_MAX; ++type)
INIT_HLIST_NODE(&task->pid_links[type]);
- }
}
static inline void
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index ab8567f32501..dec3f73e8db9 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -2859,3 +2859,4 @@ bool irq_check_status_bit(unsigned int irq, unsigned int bitmask)
rcu_read_unlock();
return res;
}
+EXPORT_SYMBOL_GPL(irq_check_status_bit);
diff --git a/kernel/irq/msi.c b/kernel/irq/msi.c
index 2c0c4d6d0f83..dc0e2d7fbdfd 100644
--- a/kernel/irq/msi.c
+++ b/kernel/irq/msi.c
@@ -402,7 +402,7 @@ int __msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,
struct msi_domain_ops *ops = info->ops;
struct irq_data *irq_data;
struct msi_desc *desc;
- msi_alloc_info_t arg;
+ msi_alloc_info_t arg = { };
int i, ret, virq;
bool can_reserve;
diff --git a/kernel/kthread.c b/kernel/kthread.c
index a5eceecd4513..1578973c5740 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -294,7 +294,7 @@ static int kthread(void *_create)
do_exit(ret);
}
-/* called from do_fork() to get node information for about to be created task */
+/* called from kernel_clone() to get node information for about to be created task */
int tsk_fork_get_node(struct task_struct *tsk)
{
#ifdef CONFIG_NUMA
@@ -493,11 +493,36 @@ struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
return p;
kthread_bind(p, cpu);
/* CPU hotplug need to bind once again when unparking the thread. */
- set_bit(KTHREAD_IS_PER_CPU, &to_kthread(p)->flags);
to_kthread(p)->cpu = cpu;
return p;
}
+void kthread_set_per_cpu(struct task_struct *k, int cpu)
+{
+ struct kthread *kthread = to_kthread(k);
+ if (!kthread)
+ return;
+
+ WARN_ON_ONCE(!(k->flags & PF_NO_SETAFFINITY));
+
+ if (cpu < 0) {
+ clear_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
+ return;
+ }
+
+ kthread->cpu = cpu;
+ set_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
+}
+
+bool kthread_is_per_cpu(struct task_struct *k)
+{
+ struct kthread *kthread = to_kthread(k);
+ if (!kthread)
+ return false;
+
+ return test_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
+}
+
/**
* kthread_unpark - unpark a thread created by kthread_create().
* @k: thread created by kthread_create().
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index c1418b47f625..bdaf4829098c 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -79,7 +79,7 @@ module_param(lock_stat, int, 0644);
DEFINE_PER_CPU(unsigned int, lockdep_recursion);
EXPORT_PER_CPU_SYMBOL_GPL(lockdep_recursion);
-static inline bool lockdep_enabled(void)
+static __always_inline bool lockdep_enabled(void)
{
if (!debug_locks)
return false;
@@ -5271,12 +5271,15 @@ static void __lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie cookie
/*
* Check whether we follow the irq-flags state precisely:
*/
-static void check_flags(unsigned long flags)
+static noinstr void check_flags(unsigned long flags)
{
#if defined(CONFIG_PROVE_LOCKING) && defined(CONFIG_DEBUG_LOCKDEP)
if (!debug_locks)
return;
+ /* Get the warning out.. */
+ instrumentation_begin();
+
if (irqs_disabled_flags(flags)) {
if (DEBUG_LOCKS_WARN_ON(lockdep_hardirqs_enabled())) {
printk("possible reason: unannotated irqs-off.\n");
@@ -5304,6 +5307,8 @@ static void check_flags(unsigned long flags)
if (!debug_locks)
print_irqtrace_events(current);
+
+ instrumentation_end();
#endif
}
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
index ffdd0dc7ec6d..6639a0cfe0ac 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -1291,11 +1291,16 @@ static size_t info_print_prefix(const struct printk_info *info, bool syslog,
* done:
*
* - Add prefix for each line.
+ * - Drop truncated lines that no longer fit into the buffer.
* - Add the trailing newline that has been removed in vprintk_store().
- * - Drop truncated lines that do not longer fit into the buffer.
+ * - Add a string terminator.
+ *
+ * Since the produced string is always terminated, the maximum possible
+ * return value is @r->text_buf_size - 1;
*
* Return: The length of the updated/prepared text, including the added
- * prefixes and the newline. The dropped line(s) are not counted.
+ * prefixes and the newline. The terminator is not counted. The dropped
+ * line(s) are not counted.
*/
static size_t record_print_text(struct printk_record *r, bool syslog,
bool time)
@@ -1338,26 +1343,31 @@ static size_t record_print_text(struct printk_record *r, bool syslog,
/*
* Truncate the text if there is not enough space to add the
- * prefix and a trailing newline.
+ * prefix and a trailing newline and a terminator.
*/
- if (len + prefix_len + text_len + 1 > buf_size) {
+ if (len + prefix_len + text_len + 1 + 1 > buf_size) {
/* Drop even the current line if no space. */
- if (len + prefix_len + line_len + 1 > buf_size)
+ if (len + prefix_len + line_len + 1 + 1 > buf_size)
break;
- text_len = buf_size - len - prefix_len - 1;
+ text_len = buf_size - len - prefix_len - 1 - 1;
truncated = true;
}
memmove(text + prefix_len, text, text_len);
memcpy(text, prefix, prefix_len);
+ /*
+ * Increment the prepared length to include the text and
+ * prefix that were just moved+copied. Also increment for the
+ * newline at the end of this line. If this is the last line,
+ * there is no newline, but it will be added immediately below.
+ */
len += prefix_len + line_len + 1;
-
if (text_len == line_len) {
/*
- * Add the trailing newline removed in
- * vprintk_store().
+ * This is the last line. Add the trailing newline
+ * removed in vprintk_store().
*/
text[prefix_len + line_len] = '\n';
break;
@@ -1382,6 +1392,14 @@ static size_t record_print_text(struct printk_record *r, bool syslog,
text_len -= line_len + 1;
}
+ /*
+ * If a buffer was provided, it will be terminated. Space for the
+ * string terminator is guaranteed to be available. The terminator is
+ * not counted in the return value.
+ */
+ if (buf_size > 0)
+ text[len] = 0;
+
return len;
}
@@ -3427,7 +3445,7 @@ bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog,
while (prb_read_valid_info(prb, seq, &info, &line_count)) {
if (r.info->seq >= dumper->next_seq)
break;
- l += get_record_print_text_size(&info, line_count, true, time);
+ l += get_record_print_text_size(&info, line_count, syslog, time);
seq = r.info->seq + 1;
}
@@ -3437,7 +3455,7 @@ bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog,
&info, &line_count)) {
if (r.info->seq >= dumper->next_seq)
break;
- l -= get_record_print_text_size(&info, line_count, true, time);
+ l -= get_record_print_text_size(&info, line_count, syslog, time);
seq = r.info->seq + 1;
}
diff --git a/kernel/printk/printk_ringbuffer.c b/kernel/printk/printk_ringbuffer.c
index 6704f06e0417..8a7b7362c0dd 100644
--- a/kernel/printk/printk_ringbuffer.c
+++ b/kernel/printk/printk_ringbuffer.c
@@ -1718,7 +1718,7 @@ static bool copy_data(struct prb_data_ring *data_ring,
/* Caller interested in the line count? */
if (line_count)
- *line_count = count_lines(data, data_size);
+ *line_count = count_lines(data, len);
/* Caller interested in the data content? */
if (!buf || !buf_size)
diff --git a/kernel/rcu/tasks.h b/kernel/rcu/tasks.h
index 35bdcfd84d42..36607551f966 100644
--- a/kernel/rcu/tasks.h
+++ b/kernel/rcu/tasks.h
@@ -241,7 +241,7 @@ static int __noreturn rcu_tasks_kthread(void *arg)
}
}
-/* Spawn RCU-tasks grace-period kthread, e.g., at core_initcall() time. */
+/* Spawn RCU-tasks grace-period kthread. */
static void __init rcu_spawn_tasks_kthread_generic(struct rcu_tasks *rtp)
{
struct task_struct *t;
@@ -564,7 +564,6 @@ static int __init rcu_spawn_tasks_kthread(void)
rcu_spawn_tasks_kthread_generic(&rcu_tasks);
return 0;
}
-core_initcall(rcu_spawn_tasks_kthread);
#if !defined(CONFIG_TINY_RCU)
void show_rcu_tasks_classic_gp_kthread(void)
@@ -692,7 +691,6 @@ static int __init rcu_spawn_tasks_rude_kthread(void)
rcu_spawn_tasks_kthread_generic(&rcu_tasks_rude);
return 0;
}
-core_initcall(rcu_spawn_tasks_rude_kthread);
#if !defined(CONFIG_TINY_RCU)
void show_rcu_tasks_rude_gp_kthread(void)
@@ -968,6 +966,11 @@ static void rcu_tasks_trace_pregp_step(void)
static void rcu_tasks_trace_pertask(struct task_struct *t,
struct list_head *hop)
{
+ // During early boot when there is only the one boot CPU, there
+ // is no idle task for the other CPUs. Just return.
+ if (unlikely(t == NULL))
+ return;
+
WRITE_ONCE(t->trc_reader_special.b.need_qs, false);
WRITE_ONCE(t->trc_reader_checked, false);
t->trc_ipi_to_cpu = -1;
@@ -1193,7 +1196,6 @@ static int __init rcu_spawn_tasks_trace_kthread(void)
rcu_spawn_tasks_kthread_generic(&rcu_tasks_trace);
return 0;
}
-core_initcall(rcu_spawn_tasks_trace_kthread);
#if !defined(CONFIG_TINY_RCU)
void show_rcu_tasks_trace_gp_kthread(void)
@@ -1222,6 +1224,21 @@ void show_rcu_tasks_gp_kthreads(void)
}
#endif /* #ifndef CONFIG_TINY_RCU */
+void __init rcu_init_tasks_generic(void)
+{
+#ifdef CONFIG_TASKS_RCU
+ rcu_spawn_tasks_kthread();
+#endif
+
+#ifdef CONFIG_TASKS_RUDE_RCU
+ rcu_spawn_tasks_rude_kthread();
+#endif
+
+#ifdef CONFIG_TASKS_TRACE_RCU
+ rcu_spawn_tasks_trace_kthread();
+#endif
+}
+
#else /* #ifdef CONFIG_TASKS_RCU_GENERIC */
static inline void rcu_tasks_bootup_oddness(void) {}
void show_rcu_tasks_gp_kthreads(void) {}
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 15d2562118d1..ff74fca39ed2 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1796,13 +1796,28 @@ static inline bool rq_has_pinned_tasks(struct rq *rq)
*/
static inline bool is_cpu_allowed(struct task_struct *p, int cpu)
{
+ /* When not in the task's cpumask, no point in looking further. */
if (!cpumask_test_cpu(cpu, p->cpus_ptr))
return false;
- if (is_per_cpu_kthread(p) || is_migration_disabled(p))
+ /* migrate_disabled() must be allowed to finish. */
+ if (is_migration_disabled(p))
return cpu_online(cpu);
- return cpu_active(cpu);
+ /* Non kernel threads are not allowed during either online or offline. */
+ if (!(p->flags & PF_KTHREAD))
+ return cpu_active(cpu);
+
+ /* KTHREAD_IS_PER_CPU is always allowed. */
+ if (kthread_is_per_cpu(p))
+ return cpu_online(cpu);
+
+ /* Regular kernel threads don't get to stay during offline. */
+ if (cpu_rq(cpu)->balance_push)
+ return false;
+
+ /* But are allowed during online. */
+ return cpu_online(cpu);
}
/*
@@ -2327,7 +2342,9 @@ static int __set_cpus_allowed_ptr(struct task_struct *p,
if (p->flags & PF_KTHREAD || is_migration_disabled(p)) {
/*
- * Kernel threads are allowed on online && !active CPUs.
+ * Kernel threads are allowed on online && !active CPUs,
+ * however, during cpu-hot-unplug, even these might get pushed
+ * away if not KTHREAD_IS_PER_CPU.
*
* Specifically, migration_disabled() tasks must not fail the
* cpumask_any_and_distribute() pick below, esp. so on
@@ -2371,16 +2388,6 @@ static int __set_cpus_allowed_ptr(struct task_struct *p,
__do_set_cpus_allowed(p, new_mask, flags);
- if (p->flags & PF_KTHREAD) {
- /*
- * For kernel threads that do indeed end up on online &&
- * !active we want to ensure they are strict per-CPU threads.
- */
- WARN_ON(cpumask_intersects(new_mask, cpu_online_mask) &&
- !cpumask_intersects(new_mask, cpu_active_mask) &&
- p->nr_cpus_allowed != 1);
- }
-
return affine_move_task(rq, p, &rf, dest_cpu, flags);
out:
@@ -3122,6 +3129,13 @@ bool cpus_share_cache(int this_cpu, int that_cpu)
static inline bool ttwu_queue_cond(int cpu, int wake_flags)
{
/*
+ * Do not complicate things with the async wake_list while the CPU is
+ * in hotplug state.
+ */
+ if (!cpu_active(cpu))
+ return false;
+
+ /*
* If the CPU does not share cache, then queue the task on the
* remote rqs wakelist to avoid accessing remote data.
*/
@@ -7276,8 +7290,14 @@ static void balance_push(struct rq *rq)
/*
* Both the cpu-hotplug and stop task are in this case and are
* required to complete the hotplug process.
+ *
+ * XXX: the idle task does not match kthread_is_per_cpu() due to
+ * histerical raisins.
*/
- if (is_per_cpu_kthread(push_task) || is_migration_disabled(push_task)) {
+ if (rq->idle == push_task ||
+ ((push_task->flags & PF_KTHREAD) && kthread_is_per_cpu(push_task)) ||
+ is_migration_disabled(push_task)) {
+
/*
* If this is the idle task on the outgoing CPU try to wake
* up the hotplug control thread which might wait for the
@@ -7309,7 +7329,7 @@ static void balance_push(struct rq *rq)
/*
* At this point need_resched() is true and we'll take the loop in
* schedule(). The next pick is obviously going to be the stop task
- * which is_per_cpu_kthread() and will push this task away.
+ * which kthread_is_per_cpu() and will push this task away.
*/
raw_spin_lock(&rq->lock);
}
@@ -7320,10 +7340,13 @@ static void balance_push_set(int cpu, bool on)
struct rq_flags rf;
rq_lock_irqsave(rq, &rf);
- if (on)
+ rq->balance_push = on;
+ if (on) {
+ WARN_ON_ONCE(rq->balance_callback);
rq->balance_callback = &balance_push_callback;
- else
+ } else if (rq->balance_callback == &balance_push_callback) {
rq->balance_callback = NULL;
+ }
rq_unlock_irqrestore(rq, &rf);
}
@@ -7441,6 +7464,10 @@ int sched_cpu_activate(unsigned int cpu)
struct rq *rq = cpu_rq(cpu);
struct rq_flags rf;
+ /*
+ * Make sure that when the hotplug state machine does a roll-back
+ * we clear balance_push. Ideally that would happen earlier...
+ */
balance_push_set(cpu, false);
#ifdef CONFIG_SCHED_SMT
@@ -7483,17 +7510,27 @@ int sched_cpu_deactivate(unsigned int cpu)
int ret;
set_cpu_active(cpu, false);
+
+ /*
+ * From this point forward, this CPU will refuse to run any task that
+ * is not: migrate_disable() or KTHREAD_IS_PER_CPU, and will actively
+ * push those tasks away until this gets cleared, see
+ * sched_cpu_dying().
+ */
+ balance_push_set(cpu, true);
+
/*
- * We've cleared cpu_active_mask, wait for all preempt-disabled and RCU
- * users of this state to go away such that all new such users will
- * observe it.
+ * We've cleared cpu_active_mask / set balance_push, wait for all
+ * preempt-disabled and RCU users of this state to go away such that
+ * all new such users will observe it.
+ *
+ * Specifically, we rely on ttwu to no longer target this CPU, see
+ * ttwu_queue_cond() and is_cpu_allowed().
*
* Do sync before park smpboot threads to take care the rcu boost case.
*/
synchronize_rcu();
- balance_push_set(cpu, true);
-
rq_lock_irqsave(rq, &rf);
if (rq->rd) {
update_rq_clock(rq);
@@ -7574,6 +7611,25 @@ static void calc_load_migrate(struct rq *rq)
atomic_long_add(delta, &calc_load_tasks);
}
+static void dump_rq_tasks(struct rq *rq, const char *loglvl)
+{
+ struct task_struct *g, *p;
+ int cpu = cpu_of(rq);
+
+ lockdep_assert_held(&rq->lock);
+
+ printk("%sCPU%d enqueued tasks (%u total):\n", loglvl, cpu, rq->nr_running);
+ for_each_process_thread(g, p) {
+ if (task_cpu(p) != cpu)
+ continue;
+
+ if (!task_on_rq_queued(p))
+ continue;
+
+ printk("%s\tpid: %d, name: %s\n", loglvl, p->pid, p->comm);
+ }
+}
+
int sched_cpu_dying(unsigned int cpu)
{
struct rq *rq = cpu_rq(cpu);
@@ -7583,9 +7639,18 @@ int sched_cpu_dying(unsigned int cpu)
sched_tick_stop(cpu);
rq_lock_irqsave(rq, &rf);
- BUG_ON(rq->nr_running != 1 || rq_has_pinned_tasks(rq));
+ if (rq->nr_running != 1 || rq_has_pinned_tasks(rq)) {
+ WARN(true, "Dying CPU not properly vacated!");
+ dump_rq_tasks(rq, KERN_WARNING);
+ }
rq_unlock_irqrestore(rq, &rf);
+ /*
+ * Now that the CPU is offline, make sure we're welcome
+ * to new tasks once we come back up.
+ */
+ balance_push_set(cpu, false);
+
calc_load_migrate(rq);
update_max_interval();
nohz_balance_exit_idle(rq);
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 12ada79d40f3..bb09988451a0 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -975,6 +975,7 @@ struct rq {
unsigned long cpu_capacity_orig;
struct callback_head *balance_callback;
+ unsigned char balance_push;
unsigned char nohz_idle_balance;
unsigned char idle_balance;
diff --git a/kernel/signal.c b/kernel/signal.c
index 5736c55aaa1a..5ad8566534e7 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -2550,6 +2550,9 @@ bool get_signal(struct ksignal *ksig)
struct signal_struct *signal = current->signal;
int signr;
+ if (unlikely(current->task_works))
+ task_work_run();
+
/*
* For non-generic architectures, check for TIF_NOTIFY_SIGNAL so
* that the arch handlers don't all have to do it. If we get here
@@ -3701,7 +3704,8 @@ static bool access_pidfd_pidns(struct pid *pid)
return true;
}
-static int copy_siginfo_from_user_any(kernel_siginfo_t *kinfo, siginfo_t *info)
+static int copy_siginfo_from_user_any(kernel_siginfo_t *kinfo,
+ siginfo_t __user *info)
{
#ifdef CONFIG_COMPAT
/*
diff --git a/kernel/smpboot.c b/kernel/smpboot.c
index 2efe1e206167..f25208e8df83 100644
--- a/kernel/smpboot.c
+++ b/kernel/smpboot.c
@@ -188,6 +188,7 @@ __smpboot_create_thread(struct smp_hotplug_thread *ht, unsigned int cpu)
kfree(td);
return PTR_ERR(tsk);
}
+ kthread_set_per_cpu(tsk, cpu);
/*
* Park the thread so that it could start right on the CPU
* when it is available.
diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c
index 7404d3831527..87389b9e21ab 100644
--- a/kernel/time/ntp.c
+++ b/kernel/time/ntp.c
@@ -498,7 +498,7 @@ out:
static void sync_hw_clock(struct work_struct *work);
static DECLARE_WORK(sync_work, sync_hw_clock);
static struct hrtimer sync_hrtimer;
-#define SYNC_PERIOD_NS (11UL * 60 * NSEC_PER_SEC)
+#define SYNC_PERIOD_NS (11ULL * 60 * NSEC_PER_SEC)
static enum hrtimer_restart sync_timer_callback(struct hrtimer *timer)
{
@@ -512,7 +512,7 @@ static void sched_sync_hw_clock(unsigned long offset_nsec, bool retry)
ktime_t exp = ktime_set(ktime_get_real_seconds(), 0);
if (retry)
- exp = ktime_add_ns(exp, 2 * NSEC_PER_SEC - offset_nsec);
+ exp = ktime_add_ns(exp, 2ULL * NSEC_PER_SEC - offset_nsec);
else
exp = ktime_add_ns(exp, SYNC_PERIOD_NS - offset_nsec);
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index a45cedda93a7..6aee5768c86f 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -991,8 +991,7 @@ EXPORT_SYMBOL_GPL(ktime_get_seconds);
/**
* ktime_get_real_seconds - Get the seconds portion of CLOCK_REALTIME
*
- * Returns the wall clock seconds since 1970. This replaces the
- * get_seconds() interface which is not y2038 safe on 32bit systems.
+ * Returns the wall clock seconds since 1970.
*
* For 64bit systems the fast access to tk->xtime_sec is preserved. On
* 32bit systems the access must be protected with the sequence
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index d5a19413d4f8..c1a62ae7e812 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -538,7 +538,7 @@ config KPROBE_EVENTS
config KPROBE_EVENTS_ON_NOTRACE
bool "Do NOT protect notrace function from kprobe events"
depends on KPROBE_EVENTS
- depends on KPROBES_ON_FTRACE
+ depends on DYNAMIC_FTRACE
default n
help
This is only for the developers who want to debug ftrace itself
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index 9c31f42245e9..e6fba1798771 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -434,7 +434,7 @@ static int disable_trace_kprobe(struct trace_event_call *call,
return 0;
}
-#if defined(CONFIG_KPROBES_ON_FTRACE) && \
+#if defined(CONFIG_DYNAMIC_FTRACE) && \
!defined(CONFIG_KPROBE_EVENTS_ON_NOTRACE)
static bool __within_notrace_func(unsigned long addr)
{
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index b5295a0b0536..894bb885b40b 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -1849,18 +1849,17 @@ static void worker_attach_to_pool(struct worker *worker,
mutex_lock(&wq_pool_attach_mutex);
/*
- * set_cpus_allowed_ptr() will fail if the cpumask doesn't have any
- * online CPUs. It'll be re-applied when any of the CPUs come up.
- */
- set_cpus_allowed_ptr(worker->task, pool->attrs->cpumask);
-
- /*
* The wq_pool_attach_mutex ensures %POOL_DISASSOCIATED remains
* stable across this function. See the comments above the flag
* definition for details.
*/
if (pool->flags & POOL_DISASSOCIATED)
worker->flags |= WORKER_UNBOUND;
+ else
+ kthread_set_per_cpu(worker->task, pool->cpu);
+
+ if (worker->rescue_wq)
+ set_cpus_allowed_ptr(worker->task, pool->attrs->cpumask);
list_add_tail(&worker->node, &pool->workers);
worker->pool = pool;
@@ -1883,6 +1882,7 @@ static void worker_detach_from_pool(struct worker *worker)
mutex_lock(&wq_pool_attach_mutex);
+ kthread_set_per_cpu(worker->task, -1);
list_del(&worker->node);
worker->pool = NULL;
@@ -3731,17 +3731,24 @@ static void pwq_adjust_max_active(struct pool_workqueue *pwq)
* is updated and visible.
*/
if (!freezable || !workqueue_freezing) {
+ bool kick = false;
+
pwq->max_active = wq->saved_max_active;
while (!list_empty(&pwq->delayed_works) &&
- pwq->nr_active < pwq->max_active)
+ pwq->nr_active < pwq->max_active) {
pwq_activate_first_delayed(pwq);
+ kick = true;
+ }
/*
* Need to kick a worker after thawed or an unbound wq's
- * max_active is bumped. It's a slow path. Do it always.
+ * max_active is bumped. In realtime scenarios, always kicking a
+ * worker will cause interference on the isolated cpu cores, so
+ * let's kick iff work items were activated.
*/
- wake_up_worker(pwq->pool);
+ if (kick)
+ wake_up_worker(pwq->pool);
} else {
pwq->max_active = 0;
}
@@ -4912,8 +4919,10 @@ static void unbind_workers(int cpu)
raw_spin_unlock_irq(&pool->lock);
- for_each_pool_worker(worker, pool)
- WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, cpu_active_mask) < 0);
+ for_each_pool_worker(worker, pool) {
+ kthread_set_per_cpu(worker->task, -1);
+ WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, cpu_possible_mask) < 0);
+ }
mutex_unlock(&wq_pool_attach_mutex);
@@ -4965,9 +4974,11 @@ static void rebind_workers(struct worker_pool *pool)
* of all workers first and then clear UNBOUND. As we're called
* from CPU_ONLINE, the following shouldn't fail.
*/
- for_each_pool_worker(worker, pool)
+ for_each_pool_worker(worker, pool) {
+ kthread_set_per_cpu(worker->task, pool->cpu);
WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task,
pool->attrs->cpumask) < 0);
+ }
raw_spin_lock_irq(&pool->lock);
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index e6e58b26e888..7937265ef879 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -295,14 +295,6 @@ config GDB_SCRIPTS
endif # DEBUG_INFO
-config ENABLE_MUST_CHECK
- bool "Enable __must_check logic"
- default y
- help
- Enable the __must_check logic in the kernel build. Disable this to
- suppress the "warning: ignoring return value of 'foo', declared with
- attribute warn_unused_result" messages.
-
config FRAME_WARN
int "Warn for stack frames larger than"
range 0 8192
diff --git a/lib/Kconfig.ubsan b/lib/Kconfig.ubsan
index 8b635fd75fe4..3a0b1c930733 100644
--- a/lib/Kconfig.ubsan
+++ b/lib/Kconfig.ubsan
@@ -123,6 +123,7 @@ config UBSAN_SIGNED_OVERFLOW
config UBSAN_UNSIGNED_OVERFLOW
bool "Perform checking for unsigned arithmetic overflow"
depends on $(cc-option,-fsanitize=unsigned-integer-overflow)
+ depends on !X86_32 # avoid excessive stack usage on x86-32/clang
help
This option enables -fsanitize=unsigned-integer-overflow which checks
for overflow of any arithmetic operations with unsigned integers. This
diff --git a/lib/fonts/font_ter16x32.c b/lib/fonts/font_ter16x32.c
index 1955d624177c..5baedc573dd6 100644
--- a/lib/fonts/font_ter16x32.c
+++ b/lib/fonts/font_ter16x32.c
@@ -774,8 +774,8 @@ static const struct font_data fontdata_ter16x32 = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7f, 0xfc,
0x7f, 0xfc, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 95 */
- 0x00, 0x00, 0x1c, 0x00, 0x0e, 0x00, 0x07, 0x00,
- 0x03, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x0e, 0x00,
+ 0x07, 0x00, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
@@ -1169,7 +1169,7 @@ static const struct font_data fontdata_ter16x32 = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x7f, 0xf8, 0x7f, 0xfc, 0x03, 0x9e, 0x03, 0x8e,
+ 0x7e, 0xf8, 0x7f, 0xfc, 0x03, 0x9e, 0x03, 0x8e,
0x03, 0x8e, 0x3f, 0x8e, 0x7f, 0xfe, 0xf3, 0xfe,
0xe3, 0x80, 0xe3, 0x80, 0xe3, 0x80, 0xf3, 0xce,
0x7f, 0xfe, 0x3e, 0xfc, 0x00, 0x00, 0x00, 0x00,
diff --git a/lib/genalloc.c b/lib/genalloc.c
index 7f1244b5294a..dab97bb69df6 100644
--- a/lib/genalloc.c
+++ b/lib/genalloc.c
@@ -81,14 +81,14 @@ static int clear_bits_ll(unsigned long *addr, unsigned long mask_to_clear)
* users set the same bit, one user will return remain bits, otherwise
* return 0.
*/
-static int bitmap_set_ll(unsigned long *map, int start, int nr)
+static int bitmap_set_ll(unsigned long *map, unsigned long start, unsigned long nr)
{
unsigned long *p = map + BIT_WORD(start);
- const int size = start + nr;
+ const unsigned long size = start + nr;
int bits_to_set = BITS_PER_LONG - (start % BITS_PER_LONG);
unsigned long mask_to_set = BITMAP_FIRST_WORD_MASK(start);
- while (nr - bits_to_set >= 0) {
+ while (nr >= bits_to_set) {
if (set_bits_ll(p, mask_to_set))
return nr;
nr -= bits_to_set;
@@ -116,14 +116,15 @@ static int bitmap_set_ll(unsigned long *map, int start, int nr)
* users clear the same bit, one user will return remain bits,
* otherwise return 0.
*/
-static int bitmap_clear_ll(unsigned long *map, int start, int nr)
+static unsigned long
+bitmap_clear_ll(unsigned long *map, unsigned long start, unsigned long nr)
{
unsigned long *p = map + BIT_WORD(start);
- const int size = start + nr;
+ const unsigned long size = start + nr;
int bits_to_clear = BITS_PER_LONG - (start % BITS_PER_LONG);
unsigned long mask_to_clear = BITMAP_FIRST_WORD_MASK(start);
- while (nr - bits_to_clear >= 0) {
+ while (nr >= bits_to_clear) {
if (clear_bits_ll(p, mask_to_clear))
return nr;
nr -= bits_to_clear;
@@ -183,8 +184,8 @@ int gen_pool_add_owner(struct gen_pool *pool, unsigned long virt, phys_addr_t ph
size_t size, int nid, void *owner)
{
struct gen_pool_chunk *chunk;
- int nbits = size >> pool->min_alloc_order;
- int nbytes = sizeof(struct gen_pool_chunk) +
+ unsigned long nbits = size >> pool->min_alloc_order;
+ unsigned long nbytes = sizeof(struct gen_pool_chunk) +
BITS_TO_LONGS(nbits) * sizeof(long);
chunk = vzalloc_node(nbytes, nid);
@@ -242,7 +243,7 @@ void gen_pool_destroy(struct gen_pool *pool)
struct list_head *_chunk, *_next_chunk;
struct gen_pool_chunk *chunk;
int order = pool->min_alloc_order;
- int bit, end_bit;
+ unsigned long bit, end_bit;
list_for_each_safe(_chunk, _next_chunk, &pool->chunks) {
chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk);
@@ -278,7 +279,7 @@ unsigned long gen_pool_alloc_algo_owner(struct gen_pool *pool, size_t size,
struct gen_pool_chunk *chunk;
unsigned long addr = 0;
int order = pool->min_alloc_order;
- int nbits, start_bit, end_bit, remain;
+ unsigned long nbits, start_bit, end_bit, remain;
#ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
BUG_ON(in_nmi());
@@ -487,7 +488,7 @@ void gen_pool_free_owner(struct gen_pool *pool, unsigned long addr, size_t size,
{
struct gen_pool_chunk *chunk;
int order = pool->min_alloc_order;
- int start_bit, nbits, remain;
+ unsigned long start_bit, nbits, remain;
#ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
BUG_ON(in_nmi());
@@ -755,7 +756,7 @@ unsigned long gen_pool_best_fit(unsigned long *map, unsigned long size,
index = bitmap_find_next_zero_area(map, size, start, nr, 0);
while (index < size) {
- int next_bit = find_next_bit(map, size, index + nr);
+ unsigned long next_bit = find_next_bit(map, size, index + nr);
if ((next_bit - index) < len) {
len = next_bit - index;
start_bit = index;
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index 1635111c5bd2..a21e6a5792c5 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -1658,7 +1658,7 @@ static int copy_compat_iovec_from_user(struct iovec *iov,
(const struct compat_iovec __user *)uvec;
int ret = -EFAULT, i;
- if (!user_access_begin(uvec, nr_segs * sizeof(*uvec)))
+ if (!user_access_begin(uiov, nr_segs * sizeof(*uiov)))
return -EFAULT;
for (i = 0; i < nr_segs; i++) {
diff --git a/lib/raid6/Makefile b/lib/raid6/Makefile
index b4c0df6d706d..c770570bfe4f 100644
--- a/lib/raid6/Makefile
+++ b/lib/raid6/Makefile
@@ -48,7 +48,7 @@ endif
endif
quiet_cmd_unroll = UNROLL $@
- cmd_unroll = $(AWK) -f$(srctree)/$(src)/unroll.awk -vN=$* < $< > $@
+ cmd_unroll = $(AWK) -v N=$* -f $(srctree)/$(src)/unroll.awk < $< > $@
targets += int1.c int2.c int4.c int8.c int16.c int32.c
$(obj)/int%.c: $(src)/int.uc $(src)/unroll.awk FORCE
diff --git a/lib/zlib_dfltcc/Makefile b/lib/zlib_dfltcc/Makefile
index 8e4d5afbbb10..66e1c96387c4 100644
--- a/lib/zlib_dfltcc/Makefile
+++ b/lib/zlib_dfltcc/Makefile
@@ -8,4 +8,4 @@
obj-$(CONFIG_ZLIB_DFLTCC) += zlib_dfltcc.o
-zlib_dfltcc-objs := dfltcc.o dfltcc_deflate.o dfltcc_inflate.o dfltcc_syms.o
+zlib_dfltcc-objs := dfltcc.o dfltcc_deflate.o dfltcc_inflate.o
diff --git a/lib/zlib_dfltcc/dfltcc.c b/lib/zlib_dfltcc/dfltcc.c
index c30de430b30c..782f76e9d4da 100644
--- a/lib/zlib_dfltcc/dfltcc.c
+++ b/lib/zlib_dfltcc/dfltcc.c
@@ -1,7 +1,8 @@
// SPDX-License-Identifier: Zlib
/* dfltcc.c - SystemZ DEFLATE CONVERSION CALL support. */
-#include <linux/zutil.h>
+#include <linux/export.h>
+#include <linux/module.h>
#include "dfltcc_util.h"
#include "dfltcc.h"
@@ -53,3 +54,6 @@ void dfltcc_reset(
dfltcc_state->dht_threshold = DFLTCC_DHT_MIN_SAMPLE_SIZE;
dfltcc_state->param.ribm = DFLTCC_RIBM;
}
+EXPORT_SYMBOL(dfltcc_reset);
+
+MODULE_LICENSE("GPL");
diff --git a/lib/zlib_dfltcc/dfltcc_deflate.c b/lib/zlib_dfltcc/dfltcc_deflate.c
index 00c185101c6d..6c946e8532ee 100644
--- a/lib/zlib_dfltcc/dfltcc_deflate.c
+++ b/lib/zlib_dfltcc/dfltcc_deflate.c
@@ -4,6 +4,7 @@
#include "dfltcc_util.h"
#include "dfltcc.h"
#include <asm/setup.h>
+#include <linux/export.h>
#include <linux/zutil.h>
/*
@@ -34,6 +35,7 @@ int dfltcc_can_deflate(
return 1;
}
+EXPORT_SYMBOL(dfltcc_can_deflate);
static void dfltcc_gdht(
z_streamp strm
@@ -277,3 +279,4 @@ again:
goto again; /* deflate() must use all input or all output */
return 1;
}
+EXPORT_SYMBOL(dfltcc_deflate);
diff --git a/lib/zlib_dfltcc/dfltcc_inflate.c b/lib/zlib_dfltcc/dfltcc_inflate.c
index db107016d29b..fb60b5a6a1cb 100644
--- a/lib/zlib_dfltcc/dfltcc_inflate.c
+++ b/lib/zlib_dfltcc/dfltcc_inflate.c
@@ -125,7 +125,7 @@ dfltcc_inflate_action dfltcc_inflate(
param->ho = (state->write - state->whave) & ((1 << HB_BITS) - 1);
if (param->hl)
param->nt = 0; /* Honor history for the first block */
- param->cv = state->flags ? REVERSE(state->check) : state->check;
+ param->cv = state->check;
/* Inflate */
do {
@@ -138,7 +138,7 @@ dfltcc_inflate_action dfltcc_inflate(
state->bits = param->sbb;
state->whave = param->hl;
state->write = (param->ho + param->hl) & ((1 << HB_BITS) - 1);
- state->check = state->flags ? REVERSE(param->cv) : param->cv;
+ state->check = param->cv;
if (cc == DFLTCC_CC_OP2_CORRUPT && param->oesc != 0) {
/* Report an error if stream is corrupted */
state->mode = BAD;
diff --git a/lib/zlib_dfltcc/dfltcc_syms.c b/lib/zlib_dfltcc/dfltcc_syms.c
deleted file mode 100644
index 6f23481804c1..000000000000
--- a/lib/zlib_dfltcc/dfltcc_syms.c
+++ /dev/null
@@ -1,17 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * linux/lib/zlib_dfltcc/dfltcc_syms.c
- *
- * Exported symbols for the s390 zlib dfltcc support.
- *
- */
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/zlib.h>
-#include "dfltcc.h"
-
-EXPORT_SYMBOL(dfltcc_can_deflate);
-EXPORT_SYMBOL(dfltcc_deflate);
-EXPORT_SYMBOL(dfltcc_reset);
-MODULE_LICENSE("GPL");
diff --git a/mm/highmem.c b/mm/highmem.c
index c3a9ea7875ef..874b732b120c 100644
--- a/mm/highmem.c
+++ b/mm/highmem.c
@@ -473,6 +473,11 @@ static inline void *arch_kmap_local_high_get(struct page *page)
}
#endif
+#ifndef arch_kmap_local_set_pte
+#define arch_kmap_local_set_pte(mm, vaddr, ptep, ptev) \
+ set_pte_at(mm, vaddr, ptep, ptev)
+#endif
+
/* Unmap a local mapping which was obtained by kmap_high_get() */
static inline bool kmap_high_unmap_local(unsigned long vaddr)
{
@@ -515,7 +520,7 @@ void *__kmap_local_pfn_prot(unsigned long pfn, pgprot_t prot)
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
BUG_ON(!pte_none(*(kmap_pte - idx)));
pteval = pfn_pte(pfn, prot);
- set_pte_at(&init_mm, vaddr, kmap_pte - idx, pteval);
+ arch_kmap_local_set_pte(&init_mm, vaddr, kmap_pte - idx, pteval);
arch_kmap_local_post_map(vaddr, pteval);
current->kmap_ctrl.pteval[kmap_local_idx()] = pteval;
preempt_enable();
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index cbf32d2824fd..18f6ee317900 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -4105,10 +4105,30 @@ retry_avoidcopy:
* may get SIGKILLed if it later faults.
*/
if (outside_reserve) {
+ struct address_space *mapping = vma->vm_file->f_mapping;
+ pgoff_t idx;
+ u32 hash;
+
put_page(old_page);
BUG_ON(huge_pte_none(pte));
+ /*
+ * Drop hugetlb_fault_mutex and i_mmap_rwsem before
+ * unmapping. unmapping needs to hold i_mmap_rwsem
+ * in write mode. Dropping i_mmap_rwsem in read mode
+ * here is OK as COW mappings do not interact with
+ * PMD sharing.
+ *
+ * Reacquire both after unmap operation.
+ */
+ idx = vma_hugecache_offset(h, vma, haddr);
+ hash = hugetlb_fault_mutex_hash(mapping, idx);
+ mutex_unlock(&hugetlb_fault_mutex_table[hash]);
+ i_mmap_unlock_read(mapping);
+
unmap_ref_private(mm, vma, old_page, haddr);
- BUG_ON(huge_pte_none(pte));
+
+ i_mmap_lock_read(mapping);
+ mutex_lock(&hugetlb_fault_mutex_table[hash]);
spin_lock(ptl);
ptep = huge_pte_offset(mm, haddr, huge_page_size(h));
if (likely(ptep &&
@@ -4351,7 +4371,7 @@ retry:
* So we need to block hugepage fault by PG_hwpoison bit check.
*/
if (unlikely(PageHWPoison(page))) {
- ret = VM_FAULT_HWPOISON |
+ ret = VM_FAULT_HWPOISON_LARGE |
VM_FAULT_SET_HINDEX(hstate_index(h));
goto backout_unlocked;
}
diff --git a/mm/kasan/generic.c b/mm/kasan/generic.c
index 1dd5a0f99372..5106b84b07d4 100644
--- a/mm/kasan/generic.c
+++ b/mm/kasan/generic.c
@@ -337,6 +337,8 @@ void kasan_record_aux_stack(void *addr)
cache = page->slab_cache;
object = nearest_obj(cache, page, addr);
alloc_meta = kasan_get_alloc_meta(cache, object);
+ if (!alloc_meta)
+ return;
alloc_meta->aux_stack[1] = alloc_meta->aux_stack[0];
alloc_meta->aux_stack[0] = kasan_save_stack(GFP_NOWAIT);
diff --git a/mm/kasan/hw_tags.c b/mm/kasan/hw_tags.c
index 55bd6f09c70f..e529428e7a11 100644
--- a/mm/kasan/hw_tags.c
+++ b/mm/kasan/hw_tags.c
@@ -19,11 +19,10 @@
#include "kasan.h"
-enum kasan_arg_mode {
- KASAN_ARG_MODE_DEFAULT,
- KASAN_ARG_MODE_OFF,
- KASAN_ARG_MODE_PROD,
- KASAN_ARG_MODE_FULL,
+enum kasan_arg {
+ KASAN_ARG_DEFAULT,
+ KASAN_ARG_OFF,
+ KASAN_ARG_ON,
};
enum kasan_arg_stacktrace {
@@ -38,7 +37,7 @@ enum kasan_arg_fault {
KASAN_ARG_FAULT_PANIC,
};
-static enum kasan_arg_mode kasan_arg_mode __ro_after_init;
+static enum kasan_arg kasan_arg __ro_after_init;
static enum kasan_arg_stacktrace kasan_arg_stacktrace __ro_after_init;
static enum kasan_arg_fault kasan_arg_fault __ro_after_init;
@@ -52,26 +51,24 @@ DEFINE_STATIC_KEY_FALSE(kasan_flag_stacktrace);
/* Whether panic or disable tag checking on fault. */
bool kasan_flag_panic __ro_after_init;
-/* kasan.mode=off/prod/full */
-static int __init early_kasan_mode(char *arg)
+/* kasan=off/on */
+static int __init early_kasan_flag(char *arg)
{
if (!arg)
return -EINVAL;
if (!strcmp(arg, "off"))
- kasan_arg_mode = KASAN_ARG_MODE_OFF;
- else if (!strcmp(arg, "prod"))
- kasan_arg_mode = KASAN_ARG_MODE_PROD;
- else if (!strcmp(arg, "full"))
- kasan_arg_mode = KASAN_ARG_MODE_FULL;
+ kasan_arg = KASAN_ARG_OFF;
+ else if (!strcmp(arg, "on"))
+ kasan_arg = KASAN_ARG_ON;
else
return -EINVAL;
return 0;
}
-early_param("kasan.mode", early_kasan_mode);
+early_param("kasan", early_kasan_flag);
-/* kasan.stack=off/on */
+/* kasan.stacktrace=off/on */
static int __init early_kasan_flag_stacktrace(char *arg)
{
if (!arg)
@@ -113,8 +110,8 @@ void kasan_init_hw_tags_cpu(void)
* as this function is only called for MTE-capable hardware.
*/
- /* If KASAN is disabled, do nothing. */
- if (kasan_arg_mode == KASAN_ARG_MODE_OFF)
+ /* If KASAN is disabled via command line, don't initialize it. */
+ if (kasan_arg == KASAN_ARG_OFF)
return;
hw_init_tags(KASAN_TAG_MAX);
@@ -124,43 +121,28 @@ void kasan_init_hw_tags_cpu(void)
/* kasan_init_hw_tags() is called once on boot CPU. */
void __init kasan_init_hw_tags(void)
{
- /* If hardware doesn't support MTE, do nothing. */
+ /* If hardware doesn't support MTE, don't initialize KASAN. */
if (!system_supports_mte())
return;
- /* Choose KASAN mode if kasan boot parameter is not provided. */
- if (kasan_arg_mode == KASAN_ARG_MODE_DEFAULT) {
- if (IS_ENABLED(CONFIG_DEBUG_KERNEL))
- kasan_arg_mode = KASAN_ARG_MODE_FULL;
- else
- kasan_arg_mode = KASAN_ARG_MODE_PROD;
- }
-
- /* Preset parameter values based on the mode. */
- switch (kasan_arg_mode) {
- case KASAN_ARG_MODE_DEFAULT:
- /* Shouldn't happen as per the check above. */
- WARN_ON(1);
- return;
- case KASAN_ARG_MODE_OFF:
- /* If KASAN is disabled, do nothing. */
+ /* If KASAN is disabled via command line, don't initialize it. */
+ if (kasan_arg == KASAN_ARG_OFF)
return;
- case KASAN_ARG_MODE_PROD:
- static_branch_enable(&kasan_flag_enabled);
- break;
- case KASAN_ARG_MODE_FULL:
- static_branch_enable(&kasan_flag_enabled);
- static_branch_enable(&kasan_flag_stacktrace);
- break;
- }
- /* Now, optionally override the presets. */
+ /* Enable KASAN. */
+ static_branch_enable(&kasan_flag_enabled);
switch (kasan_arg_stacktrace) {
case KASAN_ARG_STACKTRACE_DEFAULT:
+ /*
+ * Default to enabling stack trace collection for
+ * debug kernels.
+ */
+ if (IS_ENABLED(CONFIG_DEBUG_KERNEL))
+ static_branch_enable(&kasan_flag_stacktrace);
break;
case KASAN_ARG_STACKTRACE_OFF:
- static_branch_disable(&kasan_flag_stacktrace);
+ /* Do nothing, kasan_flag_stacktrace keeps its default value. */
break;
case KASAN_ARG_STACKTRACE_ON:
static_branch_enable(&kasan_flag_stacktrace);
@@ -169,11 +151,16 @@ void __init kasan_init_hw_tags(void)
switch (kasan_arg_fault) {
case KASAN_ARG_FAULT_DEFAULT:
+ /*
+ * Default to no panic on report.
+ * Do nothing, kasan_flag_panic keeps its default value.
+ */
break;
case KASAN_ARG_FAULT_REPORT:
- kasan_flag_panic = false;
+ /* Do nothing, kasan_flag_panic keeps its default value. */
break;
case KASAN_ARG_FAULT_PANIC:
+ /* Enable panic on report. */
kasan_flag_panic = true;
break;
}
diff --git a/mm/kasan/init.c b/mm/kasan/init.c
index bc0ad208b3a7..c4605ac9837b 100644
--- a/mm/kasan/init.c
+++ b/mm/kasan/init.c
@@ -64,7 +64,8 @@ static inline bool kasan_pmd_table(pud_t pud)
return false;
}
#endif
-pte_t kasan_early_shadow_pte[PTRS_PER_PTE] __page_aligned_bss;
+pte_t kasan_early_shadow_pte[PTRS_PER_PTE + PTE_HWTABLE_PTRS]
+ __page_aligned_bss;
static inline bool kasan_pte_table(pmd_t pmd)
{
@@ -372,9 +373,10 @@ static void kasan_remove_pmd_table(pmd_t *pmd, unsigned long addr,
if (kasan_pte_table(*pmd)) {
if (IS_ALIGNED(addr, PMD_SIZE) &&
- IS_ALIGNED(next, PMD_SIZE))
+ IS_ALIGNED(next, PMD_SIZE)) {
pmd_clear(pmd);
- continue;
+ continue;
+ }
}
pte = pte_offset_kernel(pmd, addr);
kasan_remove_pte_table(pte, addr, next);
@@ -397,9 +399,10 @@ static void kasan_remove_pud_table(pud_t *pud, unsigned long addr,
if (kasan_pmd_table(*pud)) {
if (IS_ALIGNED(addr, PUD_SIZE) &&
- IS_ALIGNED(next, PUD_SIZE))
+ IS_ALIGNED(next, PUD_SIZE)) {
pud_clear(pud);
- continue;
+ continue;
+ }
}
pmd = pmd_offset(pud, addr);
pmd_base = pmd_offset(pud, 0);
@@ -423,9 +426,10 @@ static void kasan_remove_p4d_table(p4d_t *p4d, unsigned long addr,
if (kasan_pud_table(*p4d)) {
if (IS_ALIGNED(addr, P4D_SIZE) &&
- IS_ALIGNED(next, P4D_SIZE))
+ IS_ALIGNED(next, P4D_SIZE)) {
p4d_clear(p4d);
- continue;
+ continue;
+ }
}
pud = pud_offset(p4d, addr);
kasan_remove_pud_table(pud, addr, next);
@@ -456,9 +460,10 @@ void kasan_remove_zero_shadow(void *start, unsigned long size)
if (kasan_p4d_table(*pgd)) {
if (IS_ALIGNED(addr, PGDIR_SIZE) &&
- IS_ALIGNED(next, PGDIR_SIZE))
+ IS_ALIGNED(next, PGDIR_SIZE)) {
pgd_clear(pgd);
- continue;
+ continue;
+ }
}
p4d = p4d_offset(pgd, addr);
@@ -481,7 +486,6 @@ int kasan_add_zero_shadow(void *start, unsigned long size)
ret = kasan_populate_early_shadow(shadow_start, shadow_end);
if (ret)
- kasan_remove_zero_shadow(shadow_start,
- size >> KASAN_SHADOW_SCALE_SHIFT);
+ kasan_remove_zero_shadow(start, size);
return ret;
}
diff --git a/mm/memblock.c b/mm/memblock.c
index d24bcfa88d2f..1eaaec1e7687 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -1427,7 +1427,7 @@ phys_addr_t __init memblock_phys_alloc_range(phys_addr_t size,
}
/**
- * memblock_phys_alloc_try_nid - allocate a memory block from specified MUMA node
+ * memblock_phys_alloc_try_nid - allocate a memory block from specified NUMA node
* @size: size of memory block to be allocated in bytes
* @align: alignment of the region and block's size
* @nid: nid of the free area to find, %NUMA_NO_NODE for any node
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 605f671203ef..e2de77b5bcc2 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -3115,9 +3115,7 @@ void __memcg_kmem_uncharge(struct mem_cgroup *memcg, unsigned int nr_pages)
if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
page_counter_uncharge(&memcg->kmem, nr_pages);
- page_counter_uncharge(&memcg->memory, nr_pages);
- if (do_memsw_account())
- page_counter_uncharge(&memcg->memsw, nr_pages);
+ refill_stock(memcg, nr_pages);
}
/**
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 5a38e9eade94..e9481632fcd1 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -1885,6 +1885,12 @@ static int soft_offline_free_page(struct page *page)
return rc;
}
+static void put_ref_page(struct page *page)
+{
+ if (page)
+ put_page(page);
+}
+
/**
* soft_offline_page - Soft offline a page.
* @pfn: pfn to soft-offline
@@ -1910,20 +1916,26 @@ static int soft_offline_free_page(struct page *page)
int soft_offline_page(unsigned long pfn, int flags)
{
int ret;
- struct page *page;
bool try_again = true;
+ struct page *page, *ref_page = NULL;
+
+ WARN_ON_ONCE(!pfn_valid(pfn) && (flags & MF_COUNT_INCREASED));
if (!pfn_valid(pfn))
return -ENXIO;
+ if (flags & MF_COUNT_INCREASED)
+ ref_page = pfn_to_page(pfn);
+
/* Only online pages can be soft-offlined (esp., not ZONE_DEVICE). */
page = pfn_to_online_page(pfn);
- if (!page)
+ if (!page) {
+ put_ref_page(ref_page);
return -EIO;
+ }
if (PageHWPoison(page)) {
pr_info("%s: %#lx page already poisoned\n", __func__, pfn);
- if (flags & MF_COUNT_INCREASED)
- put_page(page);
+ put_ref_page(ref_page);
return 0;
}
@@ -1940,7 +1952,7 @@ retry:
goto retry;
}
} else if (ret == -EIO) {
- pr_info("%s: %#lx: unknown page type: %lx (%pGP)\n",
+ pr_info("%s: %#lx: unknown page type: %lx (%pGp)\n",
__func__, pfn, page->flags, &page->flags);
}
diff --git a/mm/memory.c b/mm/memory.c
index 7d608765932b..feff48e1465a 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2892,11 +2892,13 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
entry = mk_pte(new_page, vma->vm_page_prot);
entry = pte_sw_mkyoung(entry);
entry = maybe_mkwrite(pte_mkdirty(entry), vma);
+
/*
* Clear the pte entry and flush it first, before updating the
- * pte with the new entry. This will avoid a race condition
- * seen in the presence of one thread doing SMC and another
- * thread doing COW.
+ * pte with the new entry, to keep TLBs on different CPUs in
+ * sync. This code used to set the new PTE then flush TLBs, but
+ * that left a window where the new PTE could be loaded into
+ * some TLBs while the old PTE remains in others.
*/
ptep_clear_flush_notify(vma, vmf->address, vmf->pte);
page_add_new_anon_rmap(new_page, vma, vmf->address, false);
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index af41fb990820..f9d57b9be8c7 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -713,7 +713,7 @@ void __ref move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn,
* expects the zone spans the pfn range. All the pages in the range
* are reserved so nobody should be touching them so we should be safe
*/
- memmap_init_zone(nr_pages, nid, zone_idx(zone), start_pfn,
+ memmap_init_zone(nr_pages, nid, zone_idx(zone), start_pfn, 0,
MEMINIT_HOTPLUG, altmap, migratetype);
set_zone_contiguous(zone);
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 8cf96bd21341..2c3a86502053 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -1111,7 +1111,7 @@ int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
const nodemask_t *to, int flags)
{
int busy = 0;
- int err;
+ int err = 0;
nodemask_t tmp;
migrate_prep();
diff --git a/mm/migrate.c b/mm/migrate.c
index ee5e612b4cd8..c0efe921bca5 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -402,6 +402,7 @@ int migrate_page_move_mapping(struct address_space *mapping,
struct zone *oldzone, *newzone;
int dirty;
int expected_count = expected_page_refs(mapping, page) + extra_count;
+ int nr = thp_nr_pages(page);
if (!mapping) {
/* Anonymous page without mapping */
@@ -437,7 +438,7 @@ int migrate_page_move_mapping(struct address_space *mapping,
*/
newpage->index = page->index;
newpage->mapping = page->mapping;
- page_ref_add(newpage, thp_nr_pages(page)); /* add cache reference */
+ page_ref_add(newpage, nr); /* add cache reference */
if (PageSwapBacked(page)) {
__SetPageSwapBacked(newpage);
if (PageSwapCache(page)) {
@@ -459,7 +460,7 @@ int migrate_page_move_mapping(struct address_space *mapping,
if (PageTransHuge(page)) {
int i;
- for (i = 1; i < HPAGE_PMD_NR; i++) {
+ for (i = 1; i < nr; i++) {
xas_next(&xas);
xas_store(&xas, newpage);
}
@@ -470,7 +471,7 @@ int migrate_page_move_mapping(struct address_space *mapping,
* to one less reference.
* We know this isn't the last reference.
*/
- page_ref_unfreeze(page, expected_count - thp_nr_pages(page));
+ page_ref_unfreeze(page, expected_count - nr);
xas_unlock(&xas);
/* Leave irq disabled to prevent preemption while updating stats */
@@ -493,17 +494,17 @@ int migrate_page_move_mapping(struct address_space *mapping,
old_lruvec = mem_cgroup_lruvec(memcg, oldzone->zone_pgdat);
new_lruvec = mem_cgroup_lruvec(memcg, newzone->zone_pgdat);
- __dec_lruvec_state(old_lruvec, NR_FILE_PAGES);
- __inc_lruvec_state(new_lruvec, NR_FILE_PAGES);
+ __mod_lruvec_state(old_lruvec, NR_FILE_PAGES, -nr);
+ __mod_lruvec_state(new_lruvec, NR_FILE_PAGES, nr);
if (PageSwapBacked(page) && !PageSwapCache(page)) {
- __dec_lruvec_state(old_lruvec, NR_SHMEM);
- __inc_lruvec_state(new_lruvec, NR_SHMEM);
+ __mod_lruvec_state(old_lruvec, NR_SHMEM, -nr);
+ __mod_lruvec_state(new_lruvec, NR_SHMEM, nr);
}
if (dirty && mapping_can_writeback(mapping)) {
- __dec_node_state(oldzone->zone_pgdat, NR_FILE_DIRTY);
- __dec_zone_state(oldzone, NR_ZONE_WRITE_PENDING);
- __inc_node_state(newzone->zone_pgdat, NR_FILE_DIRTY);
- __inc_zone_state(newzone, NR_ZONE_WRITE_PENDING);
+ __mod_lruvec_state(old_lruvec, NR_FILE_DIRTY, -nr);
+ __mod_zone_page_state(oldzone, NR_ZONE_WRITE_PENDING, -nr);
+ __mod_lruvec_state(new_lruvec, NR_FILE_DIRTY, nr);
+ __mod_zone_page_state(newzone, NR_ZONE_WRITE_PENDING, nr);
}
}
local_irq_enable();
diff --git a/mm/mremap.c b/mm/mremap.c
index c5590afe7165..f554320281cc 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -358,7 +358,9 @@ static unsigned long get_extent(enum pgt_entry entry, unsigned long old_addr,
next = (old_addr + size) & mask;
/* even if next overflowed, extent below will be ok */
- extent = (next > old_end) ? old_end - old_addr : next - old_addr;
+ extent = next - old_addr;
+ if (extent > old_end - old_addr)
+ extent = old_end - old_addr;
next = (new_addr + size) & mask;
if (extent > next - new_addr)
extent = next - new_addr;
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 586042472ac9..eb34d204d4ee 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -2826,7 +2826,7 @@ EXPORT_SYMBOL(__test_set_page_writeback);
*/
void wait_on_page_writeback(struct page *page)
{
- if (PageWriteback(page)) {
+ while (PageWriteback(page)) {
trace_wait_on_page_writeback(page, page_mapping(page));
wait_on_page_bit(page, PG_writeback);
}
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 7a2c89b21115..783913e41f65 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -423,6 +423,8 @@ defer_init(int nid, unsigned long pfn, unsigned long end_pfn)
if (end_pfn < pgdat_end_pfn(NODE_DATA(nid)))
return false;
+ if (NODE_DATA(nid)->first_deferred_pfn != ULONG_MAX)
+ return true;
/*
* We start only with one section of pages, more pages are added as
* needed until the rest of deferred pages are initialized.
@@ -1205,8 +1207,10 @@ static void kernel_init_free_pages(struct page *page, int numpages)
/* s390's use of memset() could override KASAN redzones. */
kasan_disable_current();
for (i = 0; i < numpages; i++) {
+ u8 tag = page_kasan_tag(page + i);
page_kasan_tag_reset(page + i);
clear_highpage(page + i);
+ page_kasan_tag_set(page + i, tag);
}
kasan_enable_current();
}
@@ -2860,20 +2864,20 @@ __rmqueue(struct zone *zone, unsigned int order, int migratetype,
{
struct page *page;
-#ifdef CONFIG_CMA
- /*
- * Balance movable allocations between regular and CMA areas by
- * allocating from CMA when over half of the zone's free memory
- * is in the CMA area.
- */
- if (alloc_flags & ALLOC_CMA &&
- zone_page_state(zone, NR_FREE_CMA_PAGES) >
- zone_page_state(zone, NR_FREE_PAGES) / 2) {
- page = __rmqueue_cma_fallback(zone, order);
- if (page)
- return page;
+ if (IS_ENABLED(CONFIG_CMA)) {
+ /*
+ * Balance movable allocations between regular and CMA areas by
+ * allocating from CMA when over half of the zone's free memory
+ * is in the CMA area.
+ */
+ if (alloc_flags & ALLOC_CMA &&
+ zone_page_state(zone, NR_FREE_CMA_PAGES) >
+ zone_page_state(zone, NR_FREE_PAGES) / 2) {
+ page = __rmqueue_cma_fallback(zone, order);
+ if (page)
+ goto out;
+ }
}
-#endif
retry:
page = __rmqueue_smallest(zone, order, migratetype);
if (unlikely(!page)) {
@@ -2884,8 +2888,9 @@ retry:
alloc_flags))
goto retry;
}
-
- trace_mm_page_alloc_zone_locked(page, order, migratetype);
+out:
+ if (page)
+ trace_mm_page_alloc_zone_locked(page, order, migratetype);
return page;
}
@@ -6116,7 +6121,7 @@ overlap_memmap_init(unsigned long zone, unsigned long *pfn)
* zone stats (e.g., nr_isolate_pageblock) are touched.
*/
void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
- unsigned long start_pfn,
+ unsigned long start_pfn, unsigned long zone_end_pfn,
enum meminit_context context,
struct vmem_altmap *altmap, int migratetype)
{
@@ -6152,7 +6157,7 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
if (context == MEMINIT_EARLY) {
if (overlap_memmap_init(zone, &pfn))
continue;
- if (defer_init(nid, pfn, end_pfn))
+ if (defer_init(nid, pfn, zone_end_pfn))
break;
}
@@ -6266,7 +6271,7 @@ void __meminit __weak memmap_init(unsigned long size, int nid,
if (end_pfn > start_pfn) {
size = end_pfn - start_pfn;
- memmap_init_zone(size, nid, zone, start_pfn,
+ memmap_init_zone(size, nid, zone, start_pfn, range_end_pfn,
MEMINIT_EARLY, NULL, MIGRATE_MOVABLE);
}
}
@@ -7075,23 +7080,26 @@ void __init free_area_init_memoryless_node(int nid)
* Initialize all valid struct pages in the range [spfn, epfn) and mark them
* PageReserved(). Return the number of struct pages that were initialized.
*/
-static u64 __init init_unavailable_range(unsigned long spfn, unsigned long epfn)
+static u64 __init init_unavailable_range(unsigned long spfn, unsigned long epfn,
+ int zone, int nid)
{
- unsigned long pfn;
+ unsigned long pfn, zone_spfn, zone_epfn;
u64 pgcnt = 0;
+ zone_spfn = arch_zone_lowest_possible_pfn[zone];
+ zone_epfn = arch_zone_highest_possible_pfn[zone];
+
+ spfn = clamp(spfn, zone_spfn, zone_epfn);
+ epfn = clamp(epfn, zone_spfn, zone_epfn);
+
for (pfn = spfn; pfn < epfn; pfn++) {
if (!pfn_valid(ALIGN_DOWN(pfn, pageblock_nr_pages))) {
pfn = ALIGN_DOWN(pfn, pageblock_nr_pages)
+ pageblock_nr_pages - 1;
continue;
}
- /*
- * Use a fake node/zone (0) for now. Some of these pages
- * (in memblock.reserved but not in memblock.memory) will
- * get re-initialized via reserve_bootmem_region() later.
- */
- __init_single_page(pfn_to_page(pfn), pfn, 0, 0);
+
+ __init_single_page(pfn_to_page(pfn), pfn, zone, nid);
__SetPageReserved(pfn_to_page(pfn));
pgcnt++;
}
@@ -7100,51 +7108,64 @@ static u64 __init init_unavailable_range(unsigned long spfn, unsigned long epfn)
}
/*
- * Only struct pages that are backed by physical memory are zeroed and
- * initialized by going through __init_single_page(). But, there are some
- * struct pages which are reserved in memblock allocator and their fields
- * may be accessed (for example page_to_pfn() on some configuration accesses
- * flags). We must explicitly initialize those struct pages.
+ * Only struct pages that correspond to ranges defined by memblock.memory
+ * are zeroed and initialized by going through __init_single_page() during
+ * memmap_init().
+ *
+ * But, there could be struct pages that correspond to holes in
+ * memblock.memory. This can happen because of the following reasons:
+ * - phyiscal memory bank size is not necessarily the exact multiple of the
+ * arbitrary section size
+ * - early reserved memory may not be listed in memblock.memory
+ * - memory layouts defined with memmap= kernel parameter may not align
+ * nicely with memmap sections
*
- * This function also addresses a similar issue where struct pages are left
- * uninitialized because the physical address range is not covered by
- * memblock.memory or memblock.reserved. That could happen when memblock
- * layout is manually configured via memmap=, or when the highest physical
- * address (max_pfn) does not end on a section boundary.
+ * Explicitly initialize those struct pages so that:
+ * - PG_Reserved is set
+ * - zone link is set accorging to the architecture constrains
+ * - node is set to node id of the next populated region except for the
+ * trailing hole where last node id is used
*/
-static void __init init_unavailable_mem(void)
+static void __init init_zone_unavailable_mem(int zone)
{
- phys_addr_t start, end;
- u64 i, pgcnt;
- phys_addr_t next = 0;
+ unsigned long start, end;
+ int i, nid;
+ u64 pgcnt;
+ unsigned long next = 0;
/*
- * Loop through unavailable ranges not covered by memblock.memory.
+ * Loop through holes in memblock.memory and initialize struct
+ * pages corresponding to these holes
*/
pgcnt = 0;
- for_each_mem_range(i, &start, &end) {
+ for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, &nid) {
if (next < start)
- pgcnt += init_unavailable_range(PFN_DOWN(next),
- PFN_UP(start));
+ pgcnt += init_unavailable_range(next, start, zone, nid);
next = end;
}
/*
- * Early sections always have a fully populated memmap for the whole
- * section - see pfn_valid(). If the last section has holes at the
- * end and that section is marked "online", the memmap will be
- * considered initialized. Make sure that memmap has a well defined
- * state.
+ * Last section may surpass the actual end of memory (e.g. we can
+ * have 1Gb section and 512Mb of RAM pouplated).
+ * Make sure that memmap has a well defined state in this case.
*/
- pgcnt += init_unavailable_range(PFN_DOWN(next),
- round_up(max_pfn, PAGES_PER_SECTION));
+ end = round_up(max_pfn, PAGES_PER_SECTION);
+ pgcnt += init_unavailable_range(next, end, zone, nid);
/*
* Struct pages that do not have backing memory. This could be because
* firmware is using some of this memory, or for some other reasons.
*/
if (pgcnt)
- pr_info("Zeroed struct page in unavailable ranges: %lld pages", pgcnt);
+ pr_info("Zone %s: zeroed struct page in unavailable ranges: %lld pages", zone_names[zone], pgcnt);
+}
+
+static void __init init_unavailable_mem(void)
+{
+ int zone;
+
+ for (zone = 0; zone < ZONE_MOVABLE; zone++)
+ init_zone_unavailable_mem(zone);
}
#else
static inline void __init init_unavailable_mem(void)
diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c
index 4bcc11958089..f5fee9cf90f8 100644
--- a/mm/process_vm_access.c
+++ b/mm/process_vm_access.c
@@ -9,6 +9,7 @@
#include <linux/mm.h>
#include <linux/uio.h>
#include <linux/sched.h>
+#include <linux/compat.h>
#include <linux/sched/mm.h>
#include <linux/highmem.h>
#include <linux/ptrace.h>
diff --git a/mm/slub.c b/mm/slub.c
index 0c8b43a5b3b0..69742ab9a21d 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1619,9 +1619,6 @@ static inline struct page *alloc_slab_page(struct kmem_cache *s,
else
page = __alloc_pages_node(node, flags, order);
- if (page)
- account_slab_page(page, order, s);
-
return page;
}
@@ -1774,6 +1771,8 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
page->objects = oo_objects(oo);
+ account_slab_page(page, oo_order(oo), s);
+
page->slab_cache = s;
__SetPageSlab(page);
if (page_is_pfmemalloc(page))
@@ -1974,7 +1973,7 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
t = acquire_slab(s, n, page, object == NULL, &objects);
if (!t)
- break;
+ continue; /* cmpxchg raced */
available += objects;
if (!object) {
@@ -2792,7 +2791,8 @@ static __always_inline void maybe_wipe_obj_freeptr(struct kmem_cache *s,
void *obj)
{
if (unlikely(slab_want_init_on_free(s)) && obj)
- memset((void *)((char *)obj + s->offset), 0, sizeof(void *));
+ memset((void *)((char *)kasan_reset_tag(obj) + s->offset),
+ 0, sizeof(void *));
}
/*
@@ -2884,7 +2884,7 @@ redo:
stat(s, ALLOC_FASTPATH);
}
- maybe_wipe_obj_freeptr(s, kasan_reset_tag(object));
+ maybe_wipe_obj_freeptr(s, object);
if (unlikely(slab_want_init_on_alloc(gfpflags, s)) && object)
memset(kasan_reset_tag(object), 0, s->object_size);
@@ -3330,7 +3330,7 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
int j;
for (j = 0; j < i; j++)
- memset(p[j], 0, s->object_size);
+ memset(kasan_reset_tag(p[j]), 0, s->object_size);
}
/* memcg and kmem_cache debug support */
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 4d88fe5a277a..e6f352bf0498 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -2420,8 +2420,10 @@ void *vmap(struct page **pages, unsigned int count,
return NULL;
}
- if (flags & VM_MAP_PUT_PAGES)
+ if (flags & VM_MAP_PUT_PAGES) {
area->pages = pages;
+ area->nr_pages = count;
+ }
return area->addr;
}
EXPORT_SYMBOL(vmap);
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 257cba79a96d..b1b574ad199d 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1238,6 +1238,8 @@ static unsigned int shrink_page_list(struct list_head *page_list,
if (!PageSwapCache(page)) {
if (!(sc->gfp_mask & __GFP_IO))
goto keep_locked;
+ if (page_maybe_dma_pinned(page))
+ goto keep_locked;
if (PageTransHuge(page)) {
/* cannot split THP, skip it */
if (!can_split_huge_page(page, NULL))
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index f292e0267bb9..8b644113715e 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -284,8 +284,7 @@ static int register_vlan_device(struct net_device *real_dev, u16 vlan_id)
return 0;
out_free_newdev:
- if (new_dev->reg_state == NETREG_UNINITIALIZED)
- free_netdev(new_dev);
+ free_netdev(new_dev);
return err;
}
diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c
index c1c30a9f76f3..8b796c499cbb 100644
--- a/net/bpf/test_run.c
+++ b/net/bpf/test_run.c
@@ -272,7 +272,8 @@ int bpf_prog_test_run_raw_tp(struct bpf_prog *prog,
kattr->test.repeat)
return -EINVAL;
- if (ctx_size_in < prog->aux->max_ctx_offset)
+ if (ctx_size_in < prog->aux->max_ctx_offset ||
+ ctx_size_in > MAX_BPF_FUNC_ARGS * sizeof(u64))
return -EINVAL;
if ((kattr->test.flags & BPF_F_TEST_RUN_ON_CPU) == 0 && cpu != 0)
diff --git a/net/can/isotp.c b/net/can/isotp.c
index 7839c3b9e5be..3ef7f78e553b 100644
--- a/net/can/isotp.c
+++ b/net/can/isotp.c
@@ -1155,6 +1155,7 @@ static int isotp_getname(struct socket *sock, struct sockaddr *uaddr, int peer)
if (peer)
return -EOPNOTSUPP;
+ memset(addr, 0, sizeof(*addr));
addr->can_family = AF_CAN;
addr->can_ifindex = so->ifindex;
addr->can_addr.tp.rx_id = so->rxid;
diff --git a/net/ceph/auth_x.c b/net/ceph/auth_x.c
index 9815cfe42af0..ca44c327bace 100644
--- a/net/ceph/auth_x.c
+++ b/net/ceph/auth_x.c
@@ -569,6 +569,34 @@ e_range:
return -ERANGE;
}
+static int decode_con_secret(void **p, void *end, u8 *con_secret,
+ int *con_secret_len)
+{
+ int len;
+
+ ceph_decode_32_safe(p, end, len, bad);
+ ceph_decode_need(p, end, len, bad);
+
+ dout("%s len %d\n", __func__, len);
+ if (con_secret) {
+ if (len > CEPH_MAX_CON_SECRET_LEN) {
+ pr_err("connection secret too big %d\n", len);
+ goto bad_memzero;
+ }
+ memcpy(con_secret, *p, len);
+ *con_secret_len = len;
+ }
+ memzero_explicit(*p, len);
+ *p += len;
+ return 0;
+
+bad_memzero:
+ memzero_explicit(*p, len);
+bad:
+ pr_err("failed to decode connection secret\n");
+ return -EINVAL;
+}
+
static int handle_auth_session_key(struct ceph_auth_client *ac,
void **p, void *end,
u8 *session_key, int *session_key_len,
@@ -612,17 +640,9 @@ static int handle_auth_session_key(struct ceph_auth_client *ac,
dout("%s decrypted %d bytes\n", __func__, ret);
dend = dp + ret;
- ceph_decode_32_safe(&dp, dend, len, e_inval);
- if (len > CEPH_MAX_CON_SECRET_LEN) {
- pr_err("connection secret too big %d\n", len);
- return -EINVAL;
- }
-
- dout("%s connection secret len %d\n", __func__, len);
- if (con_secret) {
- memcpy(con_secret, dp, len);
- *con_secret_len = len;
- }
+ ret = decode_con_secret(&dp, dend, con_secret, con_secret_len);
+ if (ret)
+ return ret;
}
/* service tickets */
@@ -828,7 +848,6 @@ static int decrypt_authorizer_reply(struct ceph_crypto_key *secret,
{
void *dp, *dend;
u8 struct_v;
- int len;
int ret;
dp = *p + ceph_x_encrypt_offset();
@@ -843,17 +862,9 @@ static int decrypt_authorizer_reply(struct ceph_crypto_key *secret,
ceph_decode_64_safe(&dp, dend, *nonce_plus_one, e_inval);
dout("%s nonce_plus_one %llu\n", __func__, *nonce_plus_one);
if (struct_v >= 2) {
- ceph_decode_32_safe(&dp, dend, len, e_inval);
- if (len > CEPH_MAX_CON_SECRET_LEN) {
- pr_err("connection secret too big %d\n", len);
- return -EINVAL;
- }
-
- dout("%s connection secret len %d\n", __func__, len);
- if (con_secret) {
- memcpy(con_secret, dp, len);
- *con_secret_len = len;
- }
+ ret = decode_con_secret(&dp, dend, con_secret, con_secret_len);
+ if (ret)
+ return ret;
}
return 0;
diff --git a/net/ceph/crypto.c b/net/ceph/crypto.c
index 4f75df40fb12..92d89b331645 100644
--- a/net/ceph/crypto.c
+++ b/net/ceph/crypto.c
@@ -96,6 +96,7 @@ int ceph_crypto_key_decode(struct ceph_crypto_key *key, void **p, void *end)
key->len = ceph_decode_16(p);
ceph_decode_need(p, end, key->len, bad);
ret = set_secret(key, *p);
+ memzero_explicit(*p, key->len);
*p += key->len;
return ret;
@@ -134,7 +135,7 @@ int ceph_crypto_key_unarmor(struct ceph_crypto_key *key, const char *inkey)
void ceph_crypto_key_destroy(struct ceph_crypto_key *key)
{
if (key) {
- kfree(key->key);
+ kfree_sensitive(key->key);
key->key = NULL;
if (key->tfm) {
crypto_free_sync_skcipher(key->tfm);
diff --git a/net/ceph/messenger_v1.c b/net/ceph/messenger_v1.c
index 04f653b3c897..2cb5ffdf071a 100644
--- a/net/ceph/messenger_v1.c
+++ b/net/ceph/messenger_v1.c
@@ -1100,7 +1100,7 @@ static int read_partial_message(struct ceph_connection *con)
if (ret < 0)
return ret;
- BUG_ON(!con->in_msg ^ skip);
+ BUG_ON((!con->in_msg) ^ skip);
if (skip) {
/* skip this message */
dout("alloc_msg said skip message\n");
diff --git a/net/ceph/messenger_v2.c b/net/ceph/messenger_v2.c
index c1ebb2aa08b5..cc40ce4e02fb 100644
--- a/net/ceph/messenger_v2.c
+++ b/net/ceph/messenger_v2.c
@@ -689,11 +689,10 @@ static int verify_epilogue_crcs(struct ceph_connection *con, u32 front_crc,
}
static int setup_crypto(struct ceph_connection *con,
- u8 *session_key, int session_key_len,
- u8 *con_secret, int con_secret_len)
+ const u8 *session_key, int session_key_len,
+ const u8 *con_secret, int con_secret_len)
{
unsigned int noio_flag;
- void *p;
int ret;
dout("%s con %p con_mode %d session_key_len %d con_secret_len %d\n",
@@ -751,15 +750,14 @@ static int setup_crypto(struct ceph_connection *con,
return ret;
}
- p = con_secret;
- WARN_ON((unsigned long)p & crypto_aead_alignmask(con->v2.gcm_tfm));
- ret = crypto_aead_setkey(con->v2.gcm_tfm, p, CEPH_GCM_KEY_LEN);
+ WARN_ON((unsigned long)con_secret &
+ crypto_aead_alignmask(con->v2.gcm_tfm));
+ ret = crypto_aead_setkey(con->v2.gcm_tfm, con_secret, CEPH_GCM_KEY_LEN);
if (ret) {
pr_err("failed to set gcm key: %d\n", ret);
return ret;
}
- p += CEPH_GCM_KEY_LEN;
WARN_ON(crypto_aead_ivsize(con->v2.gcm_tfm) != CEPH_GCM_IV_LEN);
ret = crypto_aead_setauthsize(con->v2.gcm_tfm, CEPH_GCM_TAG_LEN);
if (ret) {
@@ -777,8 +775,11 @@ static int setup_crypto(struct ceph_connection *con,
aead_request_set_callback(con->v2.gcm_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
crypto_req_done, &con->v2.gcm_wait);
- memcpy(&con->v2.in_gcm_nonce, p, CEPH_GCM_IV_LEN);
- memcpy(&con->v2.out_gcm_nonce, p + CEPH_GCM_IV_LEN, CEPH_GCM_IV_LEN);
+ memcpy(&con->v2.in_gcm_nonce, con_secret + CEPH_GCM_KEY_LEN,
+ CEPH_GCM_IV_LEN);
+ memcpy(&con->v2.out_gcm_nonce,
+ con_secret + CEPH_GCM_KEY_LEN + CEPH_GCM_IV_LEN,
+ CEPH_GCM_IV_LEN);
return 0; /* auth_x, secure mode */
}
@@ -800,7 +801,7 @@ static int hmac_sha256(struct ceph_connection *con, const struct kvec *kvecs,
desc->tfm = con->v2.hmac_tfm;
ret = crypto_shash_init(desc);
if (ret)
- return ret;
+ goto out;
for (i = 0; i < kvec_cnt; i++) {
WARN_ON((unsigned long)kvecs[i].iov_base &
@@ -808,15 +809,14 @@ static int hmac_sha256(struct ceph_connection *con, const struct kvec *kvecs,
ret = crypto_shash_update(desc, kvecs[i].iov_base,
kvecs[i].iov_len);
if (ret)
- return ret;
+ goto out;
}
ret = crypto_shash_final(desc, hmac);
- if (ret)
- return ret;
+out:
shash_desc_zero(desc);
- return 0; /* auth_x, both plain and secure modes */
+ return ret; /* auth_x, both plain and secure modes */
}
static void gcm_inc_nonce(struct ceph_gcm_nonce *nonce)
@@ -1333,7 +1333,8 @@ static int prepare_auth_signature(struct ceph_connection *con)
void *buf;
int ret;
- buf = alloc_conn_buf(con, head_onwire_len(SHA256_DIGEST_SIZE, false));
+ buf = alloc_conn_buf(con, head_onwire_len(SHA256_DIGEST_SIZE,
+ con_secure(con)));
if (!buf)
return -ENOMEM;
@@ -2032,10 +2033,18 @@ bad:
return -EINVAL;
}
+/*
+ * Align session_key and con_secret to avoid GFP_ATOMIC allocation
+ * inside crypto_shash_setkey() and crypto_aead_setkey() called from
+ * setup_crypto(). __aligned(16) isn't guaranteed to work for stack
+ * objects, so do it by hand.
+ */
static int process_auth_done(struct ceph_connection *con, void *p, void *end)
{
- u8 session_key[CEPH_KEY_LEN];
- u8 con_secret[CEPH_MAX_CON_SECRET_LEN];
+ u8 session_key_buf[CEPH_KEY_LEN + 16];
+ u8 con_secret_buf[CEPH_MAX_CON_SECRET_LEN + 16];
+ u8 *session_key = PTR_ALIGN(&session_key_buf[0], 16);
+ u8 *con_secret = PTR_ALIGN(&con_secret_buf[0], 16);
int session_key_len, con_secret_len;
int payload_len;
u64 global_id;
@@ -2063,27 +2072,32 @@ static int process_auth_done(struct ceph_connection *con, void *p, void *end)
if (con->state != CEPH_CON_S_V2_AUTH) {
dout("%s con %p state changed to %d\n", __func__, con,
con->state);
- return -EAGAIN;
+ ret = -EAGAIN;
+ goto out;
}
dout("%s con %p handle_auth_done ret %d\n", __func__, con, ret);
if (ret)
- return ret;
+ goto out;
ret = setup_crypto(con, session_key, session_key_len, con_secret,
con_secret_len);
if (ret)
- return ret;
+ goto out;
reset_out_kvecs(con);
ret = prepare_auth_signature(con);
if (ret) {
pr_err("prepare_auth_signature failed: %d\n", ret);
- return ret;
+ goto out;
}
con->state = CEPH_CON_S_V2_AUTH_SIGNATURE;
- return 0;
+
+out:
+ memzero_explicit(session_key_buf, sizeof(session_key_buf));
+ memzero_explicit(con_secret_buf, sizeof(con_secret_buf));
+ return ret;
bad:
pr_err("failed to decode auth_done\n");
@@ -3427,6 +3441,8 @@ void ceph_con_v2_reset_protocol(struct ceph_connection *con)
}
con->v2.con_mode = CEPH_CON_MODE_UNKNOWN;
+ memzero_explicit(&con->v2.in_gcm_nonce, CEPH_GCM_IV_LEN);
+ memzero_explicit(&con->v2.out_gcm_nonce, CEPH_GCM_IV_LEN);
if (con->v2.hmac_tfm) {
crypto_free_shash(con->v2.hmac_tfm);
diff --git a/net/ceph/mon_client.c b/net/ceph/mon_client.c
index b9d54ed9f338..195ceb8afb06 100644
--- a/net/ceph/mon_client.c
+++ b/net/ceph/mon_client.c
@@ -1433,7 +1433,7 @@ static int mon_handle_auth_bad_method(struct ceph_connection *con,
/*
* handle incoming message
*/
-static void dispatch(struct ceph_connection *con, struct ceph_msg *msg)
+static void mon_dispatch(struct ceph_connection *con, struct ceph_msg *msg)
{
struct ceph_mon_client *monc = con->private;
int type = le16_to_cpu(msg->hdr.type);
@@ -1565,21 +1565,21 @@ static void mon_fault(struct ceph_connection *con)
* will come from the messenger workqueue, which is drained prior to
* mon_client destruction.
*/
-static struct ceph_connection *con_get(struct ceph_connection *con)
+static struct ceph_connection *mon_get_con(struct ceph_connection *con)
{
return con;
}
-static void con_put(struct ceph_connection *con)
+static void mon_put_con(struct ceph_connection *con)
{
}
static const struct ceph_connection_operations mon_con_ops = {
- .get = con_get,
- .put = con_put,
- .dispatch = dispatch,
- .fault = mon_fault,
+ .get = mon_get_con,
+ .put = mon_put_con,
.alloc_msg = mon_alloc_msg,
+ .dispatch = mon_dispatch,
+ .fault = mon_fault,
.get_auth_request = mon_get_auth_request,
.handle_auth_reply_more = mon_handle_auth_reply_more,
.handle_auth_done = mon_handle_auth_done,
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index 61229c5e22cb..ff8624a7c964 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -5412,7 +5412,7 @@ void ceph_osdc_cleanup(void)
/*
* handle incoming message
*/
-static void dispatch(struct ceph_connection *con, struct ceph_msg *msg)
+static void osd_dispatch(struct ceph_connection *con, struct ceph_msg *msg)
{
struct ceph_osd *osd = con->private;
struct ceph_osd_client *osdc = osd->o_osdc;
@@ -5534,9 +5534,9 @@ static struct ceph_msg *alloc_msg_with_page_vector(struct ceph_msg_header *hdr)
return m;
}
-static struct ceph_msg *alloc_msg(struct ceph_connection *con,
- struct ceph_msg_header *hdr,
- int *skip)
+static struct ceph_msg *osd_alloc_msg(struct ceph_connection *con,
+ struct ceph_msg_header *hdr,
+ int *skip)
{
struct ceph_osd *osd = con->private;
int type = le16_to_cpu(hdr->type);
@@ -5560,7 +5560,7 @@ static struct ceph_msg *alloc_msg(struct ceph_connection *con,
/*
* Wrappers to refcount containing ceph_osd struct
*/
-static struct ceph_connection *get_osd_con(struct ceph_connection *con)
+static struct ceph_connection *osd_get_con(struct ceph_connection *con)
{
struct ceph_osd *osd = con->private;
if (get_osd(osd))
@@ -5568,7 +5568,7 @@ static struct ceph_connection *get_osd_con(struct ceph_connection *con)
return NULL;
}
-static void put_osd_con(struct ceph_connection *con)
+static void osd_put_con(struct ceph_connection *con)
{
struct ceph_osd *osd = con->private;
put_osd(osd);
@@ -5582,8 +5582,8 @@ static void put_osd_con(struct ceph_connection *con)
* Note: returned pointer is the address of a structure that's
* managed separately. Caller must *not* attempt to free it.
*/
-static struct ceph_auth_handshake *get_authorizer(struct ceph_connection *con,
- int *proto, int force_new)
+static struct ceph_auth_handshake *
+osd_get_authorizer(struct ceph_connection *con, int *proto, int force_new)
{
struct ceph_osd *o = con->private;
struct ceph_osd_client *osdc = o->o_osdc;
@@ -5599,7 +5599,7 @@ static struct ceph_auth_handshake *get_authorizer(struct ceph_connection *con,
return auth;
}
-static int add_authorizer_challenge(struct ceph_connection *con,
+static int osd_add_authorizer_challenge(struct ceph_connection *con,
void *challenge_buf, int challenge_buf_len)
{
struct ceph_osd *o = con->private;
@@ -5610,7 +5610,7 @@ static int add_authorizer_challenge(struct ceph_connection *con,
challenge_buf, challenge_buf_len);
}
-static int verify_authorizer_reply(struct ceph_connection *con)
+static int osd_verify_authorizer_reply(struct ceph_connection *con)
{
struct ceph_osd *o = con->private;
struct ceph_osd_client *osdc = o->o_osdc;
@@ -5622,7 +5622,7 @@ static int verify_authorizer_reply(struct ceph_connection *con)
NULL, NULL, NULL, NULL);
}
-static int invalidate_authorizer(struct ceph_connection *con)
+static int osd_invalidate_authorizer(struct ceph_connection *con)
{
struct ceph_osd *o = con->private;
struct ceph_osd_client *osdc = o->o_osdc;
@@ -5731,18 +5731,18 @@ static int osd_check_message_signature(struct ceph_msg *msg)
}
static const struct ceph_connection_operations osd_con_ops = {
- .get = get_osd_con,
- .put = put_osd_con,
- .dispatch = dispatch,
- .get_authorizer = get_authorizer,
- .add_authorizer_challenge = add_authorizer_challenge,
- .verify_authorizer_reply = verify_authorizer_reply,
- .invalidate_authorizer = invalidate_authorizer,
- .alloc_msg = alloc_msg,
+ .get = osd_get_con,
+ .put = osd_put_con,
+ .alloc_msg = osd_alloc_msg,
+ .dispatch = osd_dispatch,
+ .fault = osd_fault,
.reencode_message = osd_reencode_message,
+ .get_authorizer = osd_get_authorizer,
+ .add_authorizer_challenge = osd_add_authorizer_challenge,
+ .verify_authorizer_reply = osd_verify_authorizer_reply,
+ .invalidate_authorizer = osd_invalidate_authorizer,
.sign_message = osd_sign_message,
.check_message_signature = osd_check_message_signature,
- .fault = osd_fault,
.get_auth_request = osd_get_auth_request,
.handle_auth_reply_more = osd_handle_auth_reply_more,
.handle_auth_done = osd_handle_auth_done,
diff --git a/net/core/dev.c b/net/core/dev.c
index 8fa739259041..a979b86dbacd 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -9661,9 +9661,20 @@ static netdev_features_t netdev_fix_features(struct net_device *dev,
}
}
- if ((features & NETIF_F_HW_TLS_TX) && !(features & NETIF_F_HW_CSUM)) {
- netdev_dbg(dev, "Dropping TLS TX HW offload feature since no CSUM feature.\n");
- features &= ~NETIF_F_HW_TLS_TX;
+ if (features & NETIF_F_HW_TLS_TX) {
+ bool ip_csum = (features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) ==
+ (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
+ bool hw_csum = features & NETIF_F_HW_CSUM;
+
+ if (!ip_csum && !hw_csum) {
+ netdev_dbg(dev, "Dropping TLS TX HW offload feature since no CSUM feature.\n");
+ features &= ~NETIF_F_HW_TLS_TX;
+ }
+ }
+
+ if ((features & NETIF_F_HW_TLS_RX) && !(features & NETIF_F_RXCSUM)) {
+ netdev_dbg(dev, "Dropping TLS RX HW offload feature since no RXCSUM feature.\n");
+ features &= ~NETIF_F_HW_TLS_RX;
}
return features;
@@ -10077,17 +10088,11 @@ int register_netdevice(struct net_device *dev)
ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
ret = notifier_to_errno(ret);
if (ret) {
+ /* Expect explicit free_netdev() on failure */
+ dev->needs_free_netdev = false;
rollback_registered(dev);
- rcu_barrier();
-
- dev->reg_state = NETREG_UNREGISTERED;
- /* We should put the kobject that hold in
- * netdev_unregister_kobject(), otherwise
- * the net device cannot be freed when
- * driver calls free_netdev(), because the
- * kobject is being hold.
- */
- kobject_put(&dev->dev.kobj);
+ net_set_todo(dev);
+ goto out;
}
/*
* Prevent userspace races by waiting until the network
@@ -10631,6 +10636,17 @@ void free_netdev(struct net_device *dev)
struct napi_struct *p, *n;
might_sleep();
+
+ /* When called immediately after register_netdevice() failed the unwind
+ * handling may still be dismantling the device. Handle that case by
+ * deferring the free.
+ */
+ if (dev->reg_state == NETREG_UNREGISTERING) {
+ ASSERT_RTNL();
+ dev->needs_free_netdev = true;
+ return;
+ }
+
netif_free_tx_queues(dev);
netif_free_rx_queues(dev);
diff --git a/net/core/devlink.c b/net/core/devlink.c
index ee828e4b1007..738d4344d679 100644
--- a/net/core/devlink.c
+++ b/net/core/devlink.c
@@ -4146,7 +4146,7 @@ out:
static int devlink_nl_cmd_port_param_get_doit(struct sk_buff *skb,
struct genl_info *info)
{
- struct devlink_port *devlink_port = info->user_ptr[0];
+ struct devlink_port *devlink_port = info->user_ptr[1];
struct devlink_param_item *param_item;
struct sk_buff *msg;
int err;
@@ -4175,7 +4175,7 @@ static int devlink_nl_cmd_port_param_get_doit(struct sk_buff *skb,
static int devlink_nl_cmd_port_param_set_doit(struct sk_buff *skb,
struct genl_info *info)
{
- struct devlink_port *devlink_port = info->user_ptr[0];
+ struct devlink_port *devlink_port = info->user_ptr[1];
return __devlink_nl_cmd_param_set_doit(devlink_port->devlink,
devlink_port->index,
diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c
index 80dbf2f4016e..8e582e29a41e 100644
--- a/net/core/gen_estimator.c
+++ b/net/core/gen_estimator.c
@@ -80,11 +80,11 @@ static void est_timer(struct timer_list *t)
u64 rate, brate;
est_fetch_counters(est, &b);
- brate = (b.bytes - est->last_bytes) << (10 - est->ewma_log - est->intvl_log);
- brate -= (est->avbps >> est->ewma_log);
+ brate = (b.bytes - est->last_bytes) << (10 - est->intvl_log);
+ brate = (brate >> est->ewma_log) - (est->avbps >> est->ewma_log);
- rate = (b.packets - est->last_packets) << (10 - est->ewma_log - est->intvl_log);
- rate -= (est->avpps >> est->ewma_log);
+ rate = (b.packets - est->last_packets) << (10 - est->intvl_log);
+ rate = (rate >> est->ewma_log) - (est->avpps >> est->ewma_log);
write_seqcount_begin(&est->seq);
est->avbps += brate;
@@ -143,6 +143,9 @@ int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
if (parm->interval < -2 || parm->interval > 3)
return -EINVAL;
+ if (parm->ewma_log == 0 || parm->ewma_log >= 31)
+ return -EINVAL;
+
est = kzalloc(sizeof(*est), GFP_KERNEL);
if (!est)
return -ENOBUFS;
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 9500d28a43b0..277ed854aef1 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -1569,10 +1569,8 @@ static void neigh_proxy_process(struct timer_list *t)
void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
struct sk_buff *skb)
{
- unsigned long now = jiffies;
-
- unsigned long sched_next = now + (prandom_u32() %
- NEIGH_VAR(p, PROXY_DELAY));
+ unsigned long sched_next = jiffies +
+ prandom_u32_max(NEIGH_VAR(p, PROXY_DELAY));
if (tbl->proxy_queue.qlen > NEIGH_VAR(p, PROXY_QLEN)) {
kfree_skb(skb);
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index 999b70c59761..daf502c13d6d 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -1317,8 +1317,8 @@ static const struct attribute_group dql_group = {
static ssize_t xps_cpus_show(struct netdev_queue *queue,
char *buf)
{
+ int cpu, len, ret, num_tc = 1, tc = 0;
struct net_device *dev = queue->dev;
- int cpu, len, num_tc = 1, tc = 0;
struct xps_dev_maps *dev_maps;
cpumask_var_t mask;
unsigned long index;
@@ -1328,22 +1328,31 @@ static ssize_t xps_cpus_show(struct netdev_queue *queue,
index = get_netdev_queue_index(queue);
+ if (!rtnl_trylock())
+ return restart_syscall();
+
if (dev->num_tc) {
/* Do not allow XPS on subordinate device directly */
num_tc = dev->num_tc;
- if (num_tc < 0)
- return -EINVAL;
+ if (num_tc < 0) {
+ ret = -EINVAL;
+ goto err_rtnl_unlock;
+ }
/* If queue belongs to subordinate dev use its map */
dev = netdev_get_tx_queue(dev, index)->sb_dev ? : dev;
tc = netdev_txq_to_tc(dev, index);
- if (tc < 0)
- return -EINVAL;
+ if (tc < 0) {
+ ret = -EINVAL;
+ goto err_rtnl_unlock;
+ }
}
- if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
- return -ENOMEM;
+ if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) {
+ ret = -ENOMEM;
+ goto err_rtnl_unlock;
+ }
rcu_read_lock();
dev_maps = rcu_dereference(dev->xps_cpus_map);
@@ -1366,9 +1375,15 @@ static ssize_t xps_cpus_show(struct netdev_queue *queue,
}
rcu_read_unlock();
+ rtnl_unlock();
+
len = snprintf(buf, PAGE_SIZE, "%*pb\n", cpumask_pr_args(mask));
free_cpumask_var(mask);
return len < PAGE_SIZE ? len : -EINVAL;
+
+err_rtnl_unlock:
+ rtnl_unlock();
+ return ret;
}
static ssize_t xps_cpus_store(struct netdev_queue *queue,
@@ -1396,7 +1411,13 @@ static ssize_t xps_cpus_store(struct netdev_queue *queue,
return err;
}
+ if (!rtnl_trylock()) {
+ free_cpumask_var(mask);
+ return restart_syscall();
+ }
+
err = netif_set_xps_queue(dev, mask, index);
+ rtnl_unlock();
free_cpumask_var(mask);
@@ -1408,22 +1429,29 @@ static struct netdev_queue_attribute xps_cpus_attribute __ro_after_init
static ssize_t xps_rxqs_show(struct netdev_queue *queue, char *buf)
{
+ int j, len, ret, num_tc = 1, tc = 0;
struct net_device *dev = queue->dev;
struct xps_dev_maps *dev_maps;
unsigned long *mask, index;
- int j, len, num_tc = 1, tc = 0;
index = get_netdev_queue_index(queue);
+ if (!rtnl_trylock())
+ return restart_syscall();
+
if (dev->num_tc) {
num_tc = dev->num_tc;
tc = netdev_txq_to_tc(dev, index);
- if (tc < 0)
- return -EINVAL;
+ if (tc < 0) {
+ ret = -EINVAL;
+ goto err_rtnl_unlock;
+ }
}
mask = bitmap_zalloc(dev->num_rx_queues, GFP_KERNEL);
- if (!mask)
- return -ENOMEM;
+ if (!mask) {
+ ret = -ENOMEM;
+ goto err_rtnl_unlock;
+ }
rcu_read_lock();
dev_maps = rcu_dereference(dev->xps_rxqs_map);
@@ -1449,10 +1477,16 @@ static ssize_t xps_rxqs_show(struct netdev_queue *queue, char *buf)
out_no_maps:
rcu_read_unlock();
+ rtnl_unlock();
+
len = bitmap_print_to_pagebuf(false, buf, mask, dev->num_rx_queues);
bitmap_free(mask);
return len < PAGE_SIZE ? len : -EINVAL;
+
+err_rtnl_unlock:
+ rtnl_unlock();
+ return ret;
}
static ssize_t xps_rxqs_store(struct netdev_queue *queue, const char *buf,
@@ -1478,10 +1512,17 @@ static ssize_t xps_rxqs_store(struct netdev_queue *queue, const char *buf,
return err;
}
+ if (!rtnl_trylock()) {
+ bitmap_free(mask);
+ return restart_syscall();
+ }
+
cpus_read_lock();
err = __netif_set_xps_queue(dev, mask, index, true);
cpus_read_unlock();
+ rtnl_unlock();
+
bitmap_free(mask);
return err ? : len;
}
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index bb0596c41b3e..3d6ab194d0f5 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -3439,26 +3439,15 @@ replay:
dev->ifindex = ifm->ifi_index;
- if (ops->newlink) {
+ if (ops->newlink)
err = ops->newlink(link_net ? : net, dev, tb, data, extack);
- /* Drivers should call free_netdev() in ->destructor
- * and unregister it on failure after registration
- * so that device could be finally freed in rtnl_unlock.
- */
- if (err < 0) {
- /* If device is not registered at all, free it now */
- if (dev->reg_state == NETREG_UNINITIALIZED ||
- dev->reg_state == NETREG_UNREGISTERED)
- free_netdev(dev);
- goto out;
- }
- } else {
+ else
err = register_netdevice(dev);
- if (err < 0) {
- free_netdev(dev);
- goto out;
- }
+ if (err < 0) {
+ free_netdev(dev);
+ goto out;
}
+
err = rtnl_configure_link(dev, ifm);
if (err < 0)
goto out_unregister;
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index f62cae3f75d8..785daff48030 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -437,7 +437,11 @@ struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int len,
len += NET_SKB_PAD;
- if ((len > SKB_WITH_OVERHEAD(PAGE_SIZE)) ||
+ /* If requested length is either too small or too big,
+ * we use kmalloc() for skb->head allocation.
+ */
+ if (len <= SKB_WITH_OVERHEAD(1024) ||
+ len > SKB_WITH_OVERHEAD(PAGE_SIZE) ||
(gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) {
skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE);
if (!skb)
@@ -501,13 +505,17 @@ EXPORT_SYMBOL(__netdev_alloc_skb);
struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len,
gfp_t gfp_mask)
{
- struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
+ struct napi_alloc_cache *nc;
struct sk_buff *skb;
void *data;
len += NET_SKB_PAD + NET_IP_ALIGN;
- if ((len > SKB_WITH_OVERHEAD(PAGE_SIZE)) ||
+ /* If requested length is either too small or too big,
+ * we use kmalloc() for skb->head allocation.
+ */
+ if (len <= SKB_WITH_OVERHEAD(1024) ||
+ len > SKB_WITH_OVERHEAD(PAGE_SIZE) ||
(gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) {
skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE);
if (!skb)
@@ -515,6 +523,7 @@ struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len,
goto skb_success;
}
+ nc = this_cpu_ptr(&napi_alloc_cache);
len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
len = SKB_DATA_ALIGN(len);
@@ -3442,6 +3451,7 @@ void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from,
st->root_skb = st->cur_skb = skb;
st->frag_idx = st->stepped_offset = 0;
st->frag_data = NULL;
+ st->frag_off = 0;
}
EXPORT_SYMBOL(skb_prepare_seq_read);
@@ -3496,14 +3506,27 @@ next_skb:
st->stepped_offset += skb_headlen(st->cur_skb);
while (st->frag_idx < skb_shinfo(st->cur_skb)->nr_frags) {
+ unsigned int pg_idx, pg_off, pg_sz;
+
frag = &skb_shinfo(st->cur_skb)->frags[st->frag_idx];
- block_limit = skb_frag_size(frag) + st->stepped_offset;
+ pg_idx = 0;
+ pg_off = skb_frag_off(frag);
+ pg_sz = skb_frag_size(frag);
+
+ if (skb_frag_must_loop(skb_frag_page(frag))) {
+ pg_idx = (pg_off + st->frag_off) >> PAGE_SHIFT;
+ pg_off = offset_in_page(pg_off + st->frag_off);
+ pg_sz = min_t(unsigned int, pg_sz - st->frag_off,
+ PAGE_SIZE - pg_off);
+ }
+
+ block_limit = pg_sz + st->stepped_offset;
if (abs_offset < block_limit) {
if (!st->frag_data)
- st->frag_data = kmap_atomic(skb_frag_page(frag));
+ st->frag_data = kmap_atomic(skb_frag_page(frag) + pg_idx);
- *data = (u8 *) st->frag_data + skb_frag_off(frag) +
+ *data = (u8 *)st->frag_data + pg_off +
(abs_offset - st->stepped_offset);
return block_limit - abs_offset;
@@ -3514,8 +3537,12 @@ next_skb:
st->frag_data = NULL;
}
- st->frag_idx++;
- st->stepped_offset += skb_frag_size(frag);
+ st->stepped_offset += pg_sz;
+ st->frag_off += pg_sz;
+ if (st->frag_off == skb_frag_size(frag)) {
+ st->frag_off = 0;
+ st->frag_idx++;
+ }
}
if (st->frag_data) {
@@ -3655,7 +3682,8 @@ struct sk_buff *skb_segment_list(struct sk_buff *skb,
unsigned int delta_truesize = 0;
unsigned int delta_len = 0;
struct sk_buff *tail = NULL;
- struct sk_buff *nskb;
+ struct sk_buff *nskb, *tmp;
+ int err;
skb_push(skb, -skb_network_offset(skb) + offset);
@@ -3665,11 +3693,28 @@ struct sk_buff *skb_segment_list(struct sk_buff *skb,
nskb = list_skb;
list_skb = list_skb->next;
+ err = 0;
+ if (skb_shared(nskb)) {
+ tmp = skb_clone(nskb, GFP_ATOMIC);
+ if (tmp) {
+ consume_skb(nskb);
+ nskb = tmp;
+ err = skb_unclone(nskb, GFP_ATOMIC);
+ } else {
+ err = -ENOMEM;
+ }
+ }
+
if (!tail)
skb->next = nskb;
else
tail->next = nskb;
+ if (unlikely(err)) {
+ nskb->next = list_skb;
+ goto err_linearize;
+ }
+
tail = nskb;
delta_len += nskb->len;
diff --git a/net/core/sock_reuseport.c b/net/core/sock_reuseport.c
index bbdd3c7b6cb5..b065f0a103ed 100644
--- a/net/core/sock_reuseport.c
+++ b/net/core/sock_reuseport.c
@@ -293,7 +293,7 @@ select_by_hash:
i = j = reciprocal_scale(hash, socks);
while (reuse->socks[i]->sk_state == TCP_ESTABLISHED) {
i++;
- if (i >= reuse->num_socks)
+ if (i >= socks)
i = 0;
if (i == j)
goto out;
diff --git a/net/dcb/dcbnl.c b/net/dcb/dcbnl.c
index 084e159a12ba..653e3bc9c87b 100644
--- a/net/dcb/dcbnl.c
+++ b/net/dcb/dcbnl.c
@@ -1765,6 +1765,8 @@ static int dcb_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
fn = &reply_funcs[dcb->cmd];
if (!fn->cb)
return -EOPNOTSUPP;
+ if (fn->type == RTM_SETDCB && !netlink_capable(skb, CAP_NET_ADMIN))
+ return -EPERM;
if (!tb[DCB_ATTR_IFNAME])
return -EINVAL;
diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c
index 183003e45762..a47e0f9b20d0 100644
--- a/net/dsa/dsa2.c
+++ b/net/dsa/dsa2.c
@@ -353,9 +353,13 @@ static int dsa_port_devlink_setup(struct dsa_port *dp)
static void dsa_port_teardown(struct dsa_port *dp)
{
+ struct devlink_port *dlp = &dp->devlink_port;
+
if (!dp->setup)
return;
+ devlink_port_type_clear(dlp);
+
switch (dp->type) {
case DSA_PORT_TYPE_UNUSED:
break;
diff --git a/net/dsa/master.c b/net/dsa/master.c
index 5a0f6fec4271..cb3a5cf99b25 100644
--- a/net/dsa/master.c
+++ b/net/dsa/master.c
@@ -309,8 +309,18 @@ static struct lock_class_key dsa_master_addr_list_lock_key;
int dsa_master_setup(struct net_device *dev, struct dsa_port *cpu_dp)
{
int mtu = ETH_DATA_LEN + cpu_dp->tag_ops->overhead;
+ struct dsa_switch *ds = cpu_dp->ds;
+ struct device_link *consumer_link;
int ret;
+ /* The DSA master must use SET_NETDEV_DEV for this to work. */
+ consumer_link = device_link_add(ds->dev, dev->dev.parent,
+ DL_FLAG_AUTOREMOVE_CONSUMER);
+ if (!consumer_link)
+ netdev_err(dev,
+ "Failed to create a device link to DSA switch %s\n",
+ dev_name(ds->dev));
+
rtnl_lock();
ret = dev_set_mtu(dev, mtu);
rtnl_unlock();
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index 8b07f3a4f2db..a3271ec3e162 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -443,7 +443,6 @@ static int esp_output_encap(struct xfrm_state *x, struct sk_buff *skb,
int esp_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp)
{
u8 *tail;
- u8 *vaddr;
int nfrags;
int esph_offset;
struct page *page;
@@ -485,14 +484,10 @@ int esp_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *
page = pfrag->page;
get_page(page);
- vaddr = kmap_atomic(page);
-
- tail = vaddr + pfrag->offset;
+ tail = page_address(page) + pfrag->offset;
esp_output_fill_trailer(tail, esp->tfclen, esp->plen, esp->proto);
- kunmap_atomic(vaddr);
-
nfrags = skb_shinfo(skb)->nr_frags;
__skb_fill_page_desc(skb, nfrags, page, pfrag->offset,
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index cdf6ec5aa45d..84bb707bd88d 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -292,7 +292,7 @@ __be32 fib_compute_spec_dst(struct sk_buff *skb)
.flowi4_iif = LOOPBACK_IFINDEX,
.flowi4_oif = l3mdev_master_ifindex_rcu(dev),
.daddr = ip_hdr(skb)->saddr,
- .flowi4_tos = RT_TOS(ip_hdr(skb)->tos),
+ .flowi4_tos = ip_hdr(skb)->tos & IPTOS_RT_MASK,
.flowi4_scope = scope,
.flowi4_mark = vmark ? skb->mark : 0,
};
diff --git a/net/ipv4/gre_demux.c b/net/ipv4/gre_demux.c
index 66fdbfe5447c..5d1e6fe9d838 100644
--- a/net/ipv4/gre_demux.c
+++ b/net/ipv4/gre_demux.c
@@ -128,7 +128,7 @@ int gre_parse_header(struct sk_buff *skb, struct tnl_ptk_info *tpi,
* to 0 and sets the configured key in the
* inner erspan header field
*/
- if (greh->protocol == htons(ETH_P_ERSPAN) ||
+ if ((greh->protocol == htons(ETH_P_ERSPAN) && hdr_len != 4) ||
greh->protocol == htons(ETH_P_ERSPAN2)) {
struct erspan_base_hdr *ershdr;
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index fd8b8800a2c3..6bd7ca09af03 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -851,6 +851,7 @@ struct sock *inet_csk_clone_lock(const struct sock *sk,
newicsk->icsk_retransmits = 0;
newicsk->icsk_backoff = 0;
newicsk->icsk_probes_out = 0;
+ newicsk->icsk_probes_tstamp = 0;
/* Deinitialize accept_queue to trap illegal accesses. */
memset(&newicsk->icsk_accept_queue, 0, sizeof(newicsk->icsk_accept_queue));
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 89fff5f59eea..2ed0b01f72f0 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -302,7 +302,7 @@ static int __ip_finish_output(struct net *net, struct sock *sk, struct sk_buff *
if (skb_is_gso(skb))
return ip_finish_output_gso(net, sk, skb, mtu);
- if (skb->len > mtu || (IPCB(skb)->flags & IPSKB_FRAG_PMTU))
+ if (skb->len > mtu || IPCB(skb)->frag_max_size)
return ip_fragment(net, sk, skb, mtu, ip_finish_output2);
return ip_finish_output2(net, sk, skb);
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index ee65c9225178..64594aa755f0 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -759,8 +759,11 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
goto tx_error;
}
- if (tnl_update_pmtu(dev, skb, rt, tnl_params->frag_off, inner_iph,
- 0, 0, false)) {
+ df = tnl_params->frag_off;
+ if (skb->protocol == htons(ETH_P_IP) && !tunnel->ignore_df)
+ df |= (inner_iph->frag_off & htons(IP_DF));
+
+ if (tnl_update_pmtu(dev, skb, rt, df, inner_iph, 0, 0, false)) {
ip_rt_put(rt);
goto tx_error;
}
@@ -788,10 +791,6 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
ttl = ip4_dst_hoplimit(&rt->dst);
}
- df = tnl_params->frag_off;
- if (skb->protocol == htons(ETH_P_IP) && !tunnel->ignore_df)
- df |= (inner_iph->frag_off&htons(IP_DF));
-
max_headroom = LL_RESERVED_SPACE(rt->dst.dev) + sizeof(struct iphdr)
+ rt->dst.header_len + ip_encap_hlen(&tunnel->encap);
if (max_headroom > dev->needed_headroom)
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index 563b62b76a5f..c576a63d09db 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -1379,7 +1379,7 @@ static int compat_get_entries(struct net *net,
xt_compat_lock(NFPROTO_ARP);
t = xt_find_table_lock(net, NFPROTO_ARP, get.name);
if (!IS_ERR(t)) {
- const struct xt_table_info *private = t->private;
+ const struct xt_table_info *private = xt_table_get_private_protected(t);
struct xt_table_info info;
ret = compat_table_info(private, &info);
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index 6e2851f8d3a3..e8f6f9d86237 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -1589,7 +1589,7 @@ compat_get_entries(struct net *net, struct compat_ipt_get_entries __user *uptr,
xt_compat_lock(AF_INET);
t = xt_find_table_lock(net, AF_INET, get.name);
if (!IS_ERR(t)) {
- const struct xt_table_info *private = t->private;
+ const struct xt_table_info *private = xt_table_get_private_protected(t);
struct xt_table_info info;
ret = compat_table_info(private, &info);
if (!ret && get.size == info.size)
diff --git a/net/ipv4/netfilter/ipt_rpfilter.c b/net/ipv4/netfilter/ipt_rpfilter.c
index cc23f1ce239c..8cd3224d913e 100644
--- a/net/ipv4/netfilter/ipt_rpfilter.c
+++ b/net/ipv4/netfilter/ipt_rpfilter.c
@@ -76,7 +76,7 @@ static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par)
flow.daddr = iph->saddr;
flow.saddr = rpfilter_get_saddr(iph->daddr);
flow.flowi4_mark = info->flags & XT_RPFILTER_VALID_MARK ? skb->mark : 0;
- flow.flowi4_tos = RT_TOS(iph->tos);
+ flow.flowi4_tos = iph->tos & IPTOS_RT_MASK;
flow.flowi4_scope = RT_SCOPE_UNIVERSE;
flow.flowi4_oif = l3mdev_master_ifindex_rcu(xt_in(par));
diff --git a/net/ipv4/nexthop.c b/net/ipv4/nexthop.c
index 5e1b22d4f939..e53e43aef785 100644
--- a/net/ipv4/nexthop.c
+++ b/net/ipv4/nexthop.c
@@ -627,7 +627,7 @@ static int nh_check_attr_group(struct net *net, struct nlattr *tb[],
for (i = NHA_GROUP_TYPE + 1; i < __NHA_MAX; ++i) {
if (!tb[i])
continue;
- if (tb[NHA_FDB])
+ if (i == NHA_FDB)
continue;
NL_SET_ERR_MSG(extack,
"No other attributes can be set in nexthop groups");
@@ -1459,8 +1459,10 @@ static struct nexthop *nexthop_create_group(struct net *net,
return nh;
out_no_nh:
- for (; i >= 0; --i)
+ for (i--; i >= 0; --i) {
+ list_del(&nhg->nh_entries[i].nh_list);
nexthop_put(nhg->nh_entries[i].nh);
+ }
kfree(nhg->spare);
kfree(nhg);
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index ed42d2193c5c..32545ecf2ab1 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -2937,6 +2937,7 @@ int tcp_disconnect(struct sock *sk, int flags)
icsk->icsk_backoff = 0;
icsk->icsk_probes_out = 0;
+ icsk->icsk_probes_tstamp = 0;
icsk->icsk_rto = TCP_TIMEOUT_INIT;
icsk->icsk_rto_min = TCP_RTO_MIN;
icsk->icsk_delack_max = TCP_DELACK_MAX;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index c7e16b0ed791..a7dfca0a38cd 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -3384,6 +3384,7 @@ static void tcp_ack_probe(struct sock *sk)
return;
if (!after(TCP_SKB_CB(head)->end_seq, tcp_wnd_end(tp))) {
icsk->icsk_backoff = 0;
+ icsk->icsk_probes_tstamp = 0;
inet_csk_clear_xmit_timer(sk, ICSK_TIME_PROBE0);
/* Socket must be waked up by subsequent tcp_data_snd_check().
* This function is not for random using!
@@ -4396,10 +4397,9 @@ static void tcp_rcv_spurious_retrans(struct sock *sk, const struct sk_buff *skb)
* The receiver remembers and reflects via DSACKs. Leverage the
* DSACK state and change the txhash to re-route speculatively.
*/
- if (TCP_SKB_CB(skb)->seq == tcp_sk(sk)->duplicate_sack[0].start_seq) {
- sk_rethink_txhash(sk);
+ if (TCP_SKB_CB(skb)->seq == tcp_sk(sk)->duplicate_sack[0].start_seq &&
+ sk_rethink_txhash(sk))
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDUPLICATEDATAREHASH);
- }
}
static void tcp_send_dupack(struct sock *sk, const struct sk_buff *skb)
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 58207c7769d0..777306b5bc22 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1595,6 +1595,8 @@ struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
tcp_move_syn(newtp, req);
ireq->ireq_opt = NULL;
} else {
+ newinet->inet_opt = NULL;
+
if (!req_unhash && found_dup_sk) {
/* This code path should only be executed in the
* syncookie case only
@@ -1602,8 +1604,6 @@ struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
bh_unlock_sock(newsk);
sock_put(newsk);
newsk = NULL;
- } else {
- newinet->inet_opt = NULL;
}
}
return newsk;
@@ -1760,6 +1760,7 @@ int tcp_v4_early_demux(struct sk_buff *skb)
bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb)
{
u32 limit = READ_ONCE(sk->sk_rcvbuf) + READ_ONCE(sk->sk_sndbuf);
+ u32 tail_gso_size, tail_gso_segs;
struct skb_shared_info *shinfo;
const struct tcphdr *th;
struct tcphdr *thtail;
@@ -1767,6 +1768,7 @@ bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb)
unsigned int hdrlen;
bool fragstolen;
u32 gso_segs;
+ u32 gso_size;
int delta;
/* In case all data was pulled from skb frags (in __pskb_pull_tail()),
@@ -1792,13 +1794,6 @@ bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb)
*/
th = (const struct tcphdr *)skb->data;
hdrlen = th->doff * 4;
- shinfo = skb_shinfo(skb);
-
- if (!shinfo->gso_size)
- shinfo->gso_size = skb->len - hdrlen;
-
- if (!shinfo->gso_segs)
- shinfo->gso_segs = 1;
tail = sk->sk_backlog.tail;
if (!tail)
@@ -1821,6 +1816,15 @@ bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb)
goto no_coalesce;
__skb_pull(skb, hdrlen);
+
+ shinfo = skb_shinfo(skb);
+ gso_size = shinfo->gso_size ?: skb->len;
+ gso_segs = shinfo->gso_segs ?: 1;
+
+ shinfo = skb_shinfo(tail);
+ tail_gso_size = shinfo->gso_size ?: (tail->len - hdrlen);
+ tail_gso_segs = shinfo->gso_segs ?: 1;
+
if (skb_try_coalesce(tail, skb, &fragstolen, &delta)) {
TCP_SKB_CB(tail)->end_seq = TCP_SKB_CB(skb)->end_seq;
@@ -1847,11 +1851,8 @@ bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb)
}
/* Not as strict as GRO. We only need to carry mss max value */
- skb_shinfo(tail)->gso_size = max(shinfo->gso_size,
- skb_shinfo(tail)->gso_size);
-
- gso_segs = skb_shinfo(tail)->gso_segs + shinfo->gso_segs;
- skb_shinfo(tail)->gso_segs = min_t(u32, gso_segs, 0xFFFF);
+ shinfo->gso_size = max(gso_size, tail_gso_size);
+ shinfo->gso_segs = min_t(u32, gso_segs + tail_gso_segs, 0xFFFF);
sk->sk_backlog.len += delta;
__NET_INC_STATS(sock_net(sk),
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index f322e798a351..ab458697881e 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -4084,6 +4084,7 @@ void tcp_send_probe0(struct sock *sk)
/* Cancel probe timer, if it is not required. */
icsk->icsk_probes_out = 0;
icsk->icsk_backoff = 0;
+ icsk->icsk_probes_tstamp = 0;
return;
}
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index 6c62b9ea1320..faa92948441b 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -219,14 +219,8 @@ static int tcp_write_timeout(struct sock *sk)
int retry_until;
if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
- if (icsk->icsk_retransmits) {
- dst_negative_advice(sk);
- } else {
- sk_rethink_txhash(sk);
- tp->timeout_rehash++;
- __NET_INC_STATS(sock_net(sk),
- LINUX_MIB_TCPTIMEOUTREHASH);
- }
+ if (icsk->icsk_retransmits)
+ __dst_negative_advice(sk);
retry_until = icsk->icsk_syn_retries ? : net->ipv4.sysctl_tcp_syn_retries;
expired = icsk->icsk_retransmits >= retry_until;
} else {
@@ -234,12 +228,7 @@ static int tcp_write_timeout(struct sock *sk)
/* Black hole detection */
tcp_mtu_probing(icsk, sk);
- dst_negative_advice(sk);
- } else {
- sk_rethink_txhash(sk);
- tp->timeout_rehash++;
- __NET_INC_STATS(sock_net(sk),
- LINUX_MIB_TCPTIMEOUTREHASH);
+ __dst_negative_advice(sk);
}
retry_until = net->ipv4.sysctl_tcp_retries2;
@@ -270,6 +259,11 @@ static int tcp_write_timeout(struct sock *sk)
return 1;
}
+ if (sk_rethink_txhash(sk)) {
+ tp->timeout_rehash++;
+ __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPTIMEOUTREHASH);
+ }
+
return 0;
}
@@ -349,6 +343,7 @@ static void tcp_probe_timer(struct sock *sk)
if (tp->packets_out || !skb) {
icsk->icsk_probes_out = 0;
+ icsk->icsk_probes_tstamp = 0;
return;
}
@@ -360,13 +355,12 @@ static void tcp_probe_timer(struct sock *sk)
* corresponding system limit. We also implement similar policy when
* we use RTO to probe window in tcp_retransmit_timer().
*/
- if (icsk->icsk_user_timeout) {
- u32 elapsed = tcp_model_timeout(sk, icsk->icsk_probes_out,
- tcp_probe0_base(sk));
-
- if (elapsed >= icsk->icsk_user_timeout)
- goto abort;
- }
+ if (!icsk->icsk_probes_tstamp)
+ icsk->icsk_probes_tstamp = tcp_jiffies32;
+ else if (icsk->icsk_user_timeout &&
+ (s32)(tcp_jiffies32 - icsk->icsk_probes_tstamp) >=
+ msecs_to_jiffies(icsk->icsk_user_timeout))
+ goto abort;
max_probes = sock_net(sk)->ipv4.sysctl_tcp_retries2;
if (sock_flag(sk, SOCK_DEAD)) {
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 7103b0a89756..69ea76578abb 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -2555,7 +2555,8 @@ int udp_v4_early_demux(struct sk_buff *skb)
*/
if (!inet_sk(sk)->inet_daddr && in_dev)
return ip_mc_validate_source(skb, iph->daddr,
- iph->saddr, iph->tos,
+ iph->saddr,
+ iph->tos & IPTOS_RT_MASK,
skb->dev, in_dev, &itag);
}
return 0;
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index eff2cacd5209..9edc5bb2d531 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -2467,8 +2467,9 @@ static void addrconf_add_mroute(struct net_device *dev)
.fc_ifindex = dev->ifindex,
.fc_dst_len = 8,
.fc_flags = RTF_UP,
- .fc_type = RTN_UNICAST,
+ .fc_type = RTN_MULTICAST,
.fc_nlinfo.nl_net = dev_net(dev),
+ .fc_protocol = RTPROT_KERNEL,
};
ipv6_addr_set(&cfg.fc_dst, htonl(0xFF000000), 0, 0, 0);
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
index 52c2f063529f..2b804fcebcc6 100644
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -478,7 +478,6 @@ static int esp6_output_encap(struct xfrm_state *x, struct sk_buff *skb,
int esp6_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp)
{
u8 *tail;
- u8 *vaddr;
int nfrags;
int esph_offset;
struct page *page;
@@ -519,14 +518,10 @@ int esp6_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info
page = pfrag->page;
get_page(page);
- vaddr = kmap_atomic(page);
-
- tail = vaddr + pfrag->offset;
+ tail = page_address(page) + pfrag->offset;
esp_output_fill_trailer(tail, esp->tfclen, esp->plen, esp->proto);
- kunmap_atomic(vaddr);
-
nfrags = skb_shinfo(skb)->nr_frags;
__skb_fill_page_desc(skb, nfrags, page, pfrag->offset,
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 605cdd38a919..f43e27555725 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -1025,6 +1025,8 @@ static void fib6_purge_rt(struct fib6_info *rt, struct fib6_node *fn,
{
struct fib6_table *table = rt->fib6_table;
+ /* Flush all cached dst in exception table */
+ rt6_flush_exceptions(rt);
fib6_drop_pcpu_from(rt, table);
if (rt->nh && !list_empty(&rt->nh_list))
@@ -1927,9 +1929,6 @@ static void fib6_del_route(struct fib6_table *table, struct fib6_node *fn,
net->ipv6.rt6_stats->fib_rt_entries--;
net->ipv6.rt6_stats->fib_discarded_routes++;
- /* Flush all cached dst in exception table */
- rt6_flush_exceptions(rt);
-
/* Reset round-robin state, if necessary */
if (rcu_access_pointer(fn->rr_ptr) == rt)
fn->rr_ptr = NULL;
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 749ad72386b2..077d43af8226 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -125,8 +125,43 @@ static int ip6_finish_output2(struct net *net, struct sock *sk, struct sk_buff *
return -EINVAL;
}
+static int
+ip6_finish_output_gso_slowpath_drop(struct net *net, struct sock *sk,
+ struct sk_buff *skb, unsigned int mtu)
+{
+ struct sk_buff *segs, *nskb;
+ netdev_features_t features;
+ int ret = 0;
+
+ /* Please see corresponding comment in ip_finish_output_gso
+ * describing the cases where GSO segment length exceeds the
+ * egress MTU.
+ */
+ features = netif_skb_features(skb);
+ segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
+ if (IS_ERR_OR_NULL(segs)) {
+ kfree_skb(skb);
+ return -ENOMEM;
+ }
+
+ consume_skb(skb);
+
+ skb_list_walk_safe(segs, segs, nskb) {
+ int err;
+
+ skb_mark_not_on_list(segs);
+ err = ip6_fragment(net, sk, segs, ip6_finish_output2);
+ if (err && ret == 0)
+ ret = err;
+ }
+
+ return ret;
+}
+
static int __ip6_finish_output(struct net *net, struct sock *sk, struct sk_buff *skb)
{
+ unsigned int mtu;
+
#if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM)
/* Policy lookup after SNAT yielded a new policy */
if (skb_dst(skb)->xfrm) {
@@ -135,7 +170,11 @@ static int __ip6_finish_output(struct net *net, struct sock *sk, struct sk_buff
}
#endif
- if ((skb->len > ip6_skb_dst_mtu(skb) && !skb_is_gso(skb)) ||
+ mtu = ip6_skb_dst_mtu(skb);
+ if (skb_is_gso(skb) && !skb_gso_validate_network_len(skb, mtu))
+ return ip6_finish_output_gso_slowpath_drop(net, sk, skb, mtu);
+
+ if ((skb->len > mtu && !skb_is_gso(skb)) ||
dst_allfrag(skb_dst(skb)) ||
(IP6CB(skb)->frag_max_size && skb->len > IP6CB(skb)->frag_max_size))
return ip6_fragment(net, sk, skb, ip6_finish_output2);
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index c4f532f4d311..0d453fa9e327 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -1598,7 +1598,7 @@ compat_get_entries(struct net *net, struct compat_ip6t_get_entries __user *uptr,
xt_compat_lock(AF_INET6);
t = xt_find_table_lock(net, AF_INET6, get.name);
if (!IS_ERR(t)) {
- const struct xt_table_info *private = t->private;
+ const struct xt_table_info *private = xt_table_get_private_protected(t);
struct xt_table_info info;
ret = compat_table_info(private, &info);
if (!ret && get.size == info.size)
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 2da0ee703779..93636867aee2 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -1645,8 +1645,11 @@ static int ipip6_newlink(struct net *src_net, struct net_device *dev,
}
#ifdef CONFIG_IPV6_SIT_6RD
- if (ipip6_netlink_6rd_parms(data, &ip6rd))
+ if (ipip6_netlink_6rd_parms(data, &ip6rd)) {
err = ipip6_tunnel_update_6rd(nt, &ip6rd);
+ if (err < 0)
+ unregister_netdevice_queue(dev, NULL);
+ }
#endif
return err;
diff --git a/net/lapb/lapb_iface.c b/net/lapb/lapb_iface.c
index 213ea7abc9ab..40961889e9c0 100644
--- a/net/lapb/lapb_iface.c
+++ b/net/lapb/lapb_iface.c
@@ -489,6 +489,7 @@ static int lapb_device_event(struct notifier_block *this, unsigned long event,
break;
}
+ lapb_put(lapb);
return NOTIFY_DONE;
}
diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c
index 48f144f107d5..9e723d943421 100644
--- a/net/mac80211/debugfs.c
+++ b/net/mac80211/debugfs.c
@@ -120,18 +120,17 @@ static ssize_t aqm_write(struct file *file,
{
struct ieee80211_local *local = file->private_data;
char buf[100];
- size_t len;
- if (count > sizeof(buf))
+ if (count >= sizeof(buf))
return -EINVAL;
if (copy_from_user(buf, user_buf, count))
return -EFAULT;
- buf[sizeof(buf) - 1] = '\0';
- len = strlen(buf);
- if (len > 0 && buf[len-1] == '\n')
- buf[len-1] = 0;
+ if (count && buf[count - 1] == '\n')
+ buf[count - 1] = '\0';
+ else
+ buf[count] = '\0';
if (sscanf(buf, "fq_limit %u", &local->fq.limit) == 1)
return count;
@@ -177,18 +176,17 @@ static ssize_t airtime_flags_write(struct file *file,
{
struct ieee80211_local *local = file->private_data;
char buf[16];
- size_t len;
- if (count > sizeof(buf))
+ if (count >= sizeof(buf))
return -EINVAL;
if (copy_from_user(buf, user_buf, count))
return -EFAULT;
- buf[sizeof(buf) - 1] = 0;
- len = strlen(buf);
- if (len > 0 && buf[len - 1] == '\n')
- buf[len - 1] = 0;
+ if (count && buf[count - 1] == '\n')
+ buf[count - 1] = '\0';
+ else
+ buf[count] = '\0';
if (kstrtou16(buf, 0, &local->airtime_flags))
return -EINVAL;
@@ -237,20 +235,19 @@ static ssize_t aql_txq_limit_write(struct file *file,
{
struct ieee80211_local *local = file->private_data;
char buf[100];
- size_t len;
u32 ac, q_limit_low, q_limit_high, q_limit_low_old, q_limit_high_old;
struct sta_info *sta;
- if (count > sizeof(buf))
+ if (count >= sizeof(buf))
return -EINVAL;
if (copy_from_user(buf, user_buf, count))
return -EFAULT;
- buf[sizeof(buf) - 1] = 0;
- len = strlen(buf);
- if (len > 0 && buf[len - 1] == '\n')
- buf[len - 1] = 0;
+ if (count && buf[count - 1] == '\n')
+ buf[count - 1] = '\0';
+ else
+ buf[count] = '\0';
if (sscanf(buf, "%u %u %u", &ac, &q_limit_low, &q_limit_high) != 3)
return -EINVAL;
@@ -306,18 +303,17 @@ static ssize_t force_tx_status_write(struct file *file,
{
struct ieee80211_local *local = file->private_data;
char buf[3];
- size_t len;
- if (count > sizeof(buf))
+ if (count >= sizeof(buf))
return -EINVAL;
if (copy_from_user(buf, user_buf, count))
return -EFAULT;
- buf[sizeof(buf) - 1] = '\0';
- len = strlen(buf);
- if (len > 0 && buf[len - 1] == '\n')
- buf[len - 1] = 0;
+ if (count && buf[count - 1] == '\n')
+ buf[count - 1] = '\0';
+ else
+ buf[count] = '\0';
if (buf[0] == '0' && buf[1] == '\0')
local->force_tx_status = 0;
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 13b9bcc4865d..972895e9f22d 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -4176,6 +4176,8 @@ void ieee80211_check_fast_rx(struct sta_info *sta)
rcu_read_lock();
key = rcu_dereference(sta->ptk[sta->ptk_idx]);
+ if (!key)
+ key = rcu_dereference(sdata->default_unicast_key);
if (key) {
switch (key->conf.cipher) {
case WLAN_CIPHER_SUITE_TKIP:
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 6422da6690f7..ebb3228ce971 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -649,7 +649,7 @@ ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx)
if (!skip_hw && tx->key &&
tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE)
info->control.hw_key = &tx->key->conf;
- } else if (!ieee80211_is_mgmt(hdr->frame_control) && tx->sta &&
+ } else if (ieee80211_is_data_present(hdr->frame_control) && tx->sta &&
test_sta_flag(tx->sta, WLAN_STA_USES_ENCRYPTION)) {
return TX_DROP;
}
@@ -3809,7 +3809,7 @@ void __ieee80211_schedule_txq(struct ieee80211_hw *hw,
* get immediately moved to the back of the list on the next
* call to ieee80211_next_txq().
*/
- if (txqi->txq.sta &&
+ if (txqi->txq.sta && local->airtime_flags &&
wiphy_ext_feature_isset(local->hw.wiphy,
NL80211_EXT_FEATURE_AIRTIME_FAIRNESS))
list_add(&txqi->schedule_order,
@@ -4251,7 +4251,6 @@ netdev_tx_t ieee80211_subif_start_xmit_8023(struct sk_buff *skb,
struct ethhdr *ehdr = (struct ethhdr *)skb->data;
struct ieee80211_key *key;
struct sta_info *sta;
- bool offload = true;
if (unlikely(skb->len < ETH_HLEN)) {
kfree_skb(skb);
@@ -4267,18 +4266,22 @@ netdev_tx_t ieee80211_subif_start_xmit_8023(struct sk_buff *skb,
if (unlikely(IS_ERR_OR_NULL(sta) || !sta->uploaded ||
!test_sta_flag(sta, WLAN_STA_AUTHORIZED) ||
- sdata->control_port_protocol == ehdr->h_proto))
- offload = false;
- else if ((key = rcu_dereference(sta->ptk[sta->ptk_idx])) &&
- (!(key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) ||
- key->conf.cipher == WLAN_CIPHER_SUITE_TKIP))
- offload = false;
-
- if (offload)
- ieee80211_8023_xmit(sdata, dev, sta, key, skb);
- else
- ieee80211_subif_start_xmit(skb, dev);
+ sdata->control_port_protocol == ehdr->h_proto))
+ goto skip_offload;
+
+ key = rcu_dereference(sta->ptk[sta->ptk_idx]);
+ if (!key)
+ key = rcu_dereference(sdata->default_unicast_key);
+
+ if (key && (!(key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) ||
+ key->conf.cipher == WLAN_CIPHER_SUITE_TKIP))
+ goto skip_offload;
+
+ ieee80211_8023_xmit(sdata, dev, sta, key, skb);
+ goto out;
+skip_offload:
+ ieee80211_subif_start_xmit(skb, dev);
out:
rcu_read_unlock();
diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
index 09b19aa2f205..f998a077c7dd 100644
--- a/net/mptcp/protocol.c
+++ b/net/mptcp/protocol.c
@@ -427,7 +427,7 @@ static bool mptcp_subflow_active(struct mptcp_subflow_context *subflow)
static bool tcp_can_send_ack(const struct sock *ssk)
{
return !((1 << inet_sk_state_load(ssk)) &
- (TCPF_SYN_SENT | TCPF_SYN_RECV | TCPF_TIME_WAIT | TCPF_CLOSE));
+ (TCPF_SYN_SENT | TCPF_SYN_RECV | TCPF_TIME_WAIT | TCPF_CLOSE | TCPF_LISTEN));
}
static void mptcp_send_ack(struct mptcp_sock *msk)
@@ -877,6 +877,9 @@ static void __mptcp_wmem_reserve(struct sock *sk, int size)
struct mptcp_sock *msk = mptcp_sk(sk);
WARN_ON_ONCE(msk->wmem_reserved);
+ if (WARN_ON_ONCE(amount < 0))
+ amount = 0;
+
if (amount <= sk->sk_forward_alloc)
goto reserve;
@@ -1587,7 +1590,7 @@ static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL))
return -EOPNOTSUPP;
- mptcp_lock_sock(sk, __mptcp_wmem_reserve(sk, len));
+ mptcp_lock_sock(sk, __mptcp_wmem_reserve(sk, min_t(size_t, 1 << 20, len)));
timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
@@ -2639,11 +2642,17 @@ static void mptcp_copy_inaddrs(struct sock *msk, const struct sock *ssk)
static int mptcp_disconnect(struct sock *sk, int flags)
{
- /* Should never be called.
- * inet_stream_connect() calls ->disconnect, but that
- * refers to the subflow socket, not the mptcp one.
- */
- WARN_ON_ONCE(1);
+ struct mptcp_subflow_context *subflow;
+ struct mptcp_sock *msk = mptcp_sk(sk);
+
+ __mptcp_flush_join_list(msk);
+ mptcp_for_each_subflow(msk, subflow) {
+ struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
+
+ lock_sock(ssk);
+ tcp_disconnect(ssk, flags);
+ release_sock(ssk);
+ }
return 0;
}
@@ -3086,6 +3095,14 @@ bool mptcp_finish_join(struct sock *ssk)
return true;
}
+static void mptcp_shutdown(struct sock *sk, int how)
+{
+ pr_debug("sk=%p, how=%d", sk, how);
+
+ if ((how & SEND_SHUTDOWN) && mptcp_close_state(sk))
+ __mptcp_wr_shutdown(sk);
+}
+
static struct proto mptcp_prot = {
.name = "MPTCP",
.owner = THIS_MODULE,
@@ -3095,7 +3112,7 @@ static struct proto mptcp_prot = {
.accept = mptcp_accept,
.setsockopt = mptcp_setsockopt,
.getsockopt = mptcp_getsockopt,
- .shutdown = tcp_shutdown,
+ .shutdown = mptcp_shutdown,
.destroy = mptcp_destroy,
.sendmsg = mptcp_sendmsg,
.recvmsg = mptcp_recvmsg,
@@ -3341,43 +3358,6 @@ static __poll_t mptcp_poll(struct file *file, struct socket *sock,
return mask;
}
-static int mptcp_shutdown(struct socket *sock, int how)
-{
- struct mptcp_sock *msk = mptcp_sk(sock->sk);
- struct sock *sk = sock->sk;
- int ret = 0;
-
- pr_debug("sk=%p, how=%d", msk, how);
-
- lock_sock(sk);
-
- how++;
- if ((how & ~SHUTDOWN_MASK) || !how) {
- ret = -EINVAL;
- goto out_unlock;
- }
-
- if (sock->state == SS_CONNECTING) {
- if ((1 << sk->sk_state) &
- (TCPF_SYN_SENT | TCPF_SYN_RECV | TCPF_CLOSE))
- sock->state = SS_DISCONNECTING;
- else
- sock->state = SS_CONNECTED;
- }
-
- sk->sk_shutdown |= how;
- if ((how & SEND_SHUTDOWN) && mptcp_close_state(sk))
- __mptcp_wr_shutdown(sk);
-
- /* Wake up anyone sleeping in poll. */
- sk->sk_state_change(sk);
-
-out_unlock:
- release_sock(sk);
-
- return ret;
-}
-
static const struct proto_ops mptcp_stream_ops = {
.family = PF_INET,
.owner = THIS_MODULE,
@@ -3391,7 +3371,7 @@ static const struct proto_ops mptcp_stream_ops = {
.ioctl = inet_ioctl,
.gettstamp = sock_gettstamp,
.listen = mptcp_listen,
- .shutdown = mptcp_shutdown,
+ .shutdown = inet_shutdown,
.setsockopt = sock_common_setsockopt,
.getsockopt = sock_common_getsockopt,
.sendmsg = inet_sendmsg,
@@ -3441,7 +3421,7 @@ static const struct proto_ops mptcp_v6_stream_ops = {
.ioctl = inet6_ioctl,
.gettstamp = sock_gettstamp,
.listen = mptcp_listen,
- .shutdown = mptcp_shutdown,
+ .shutdown = inet_shutdown,
.setsockopt = sock_common_setsockopt,
.getsockopt = sock_common_getsockopt,
.sendmsg = inet6_sendmsg,
diff --git a/net/ncsi/ncsi-rsp.c b/net/ncsi/ncsi-rsp.c
index 5b1f4ec66dd9..888ccc2d4e34 100644
--- a/net/ncsi/ncsi-rsp.c
+++ b/net/ncsi/ncsi-rsp.c
@@ -1120,7 +1120,7 @@ int ncsi_rcv_rsp(struct sk_buff *skb, struct net_device *dev,
int payload, i, ret;
/* Find the NCSI device */
- nd = ncsi_find_dev(dev);
+ nd = ncsi_find_dev(orig_dev);
ndp = nd ? TO_NCSI_DEV_PRIV(nd) : NULL;
if (!ndp)
return -ENODEV;
diff --git a/net/netfilter/ipset/ip_set_hash_gen.h b/net/netfilter/ipset/ip_set_hash_gen.h
index 5f1208ad049e..6186358eac7c 100644
--- a/net/netfilter/ipset/ip_set_hash_gen.h
+++ b/net/netfilter/ipset/ip_set_hash_gen.h
@@ -141,20 +141,6 @@ htable_size(u8 hbits)
return hsize * sizeof(struct hbucket *) + sizeof(struct htable);
}
-/* Compute htable_bits from the user input parameter hashsize */
-static u8
-htable_bits(u32 hashsize)
-{
- /* Assume that hashsize == 2^htable_bits */
- u8 bits = fls(hashsize - 1);
-
- if (jhash_size(bits) != hashsize)
- /* Round up to the first 2^n value */
- bits = fls(hashsize);
-
- return bits;
-}
-
#ifdef IP_SET_HASH_WITH_NETS
#if IPSET_NET_COUNT > 1
#define __CIDR(cidr, i) (cidr[i])
@@ -640,7 +626,7 @@ mtype_resize(struct ip_set *set, bool retried)
struct htype *h = set->data;
struct htable *t, *orig;
u8 htable_bits;
- size_t dsize = set->dsize;
+ size_t hsize, dsize = set->dsize;
#ifdef IP_SET_HASH_WITH_NETS
u8 flags;
struct mtype_elem *tmp;
@@ -664,14 +650,12 @@ mtype_resize(struct ip_set *set, bool retried)
retry:
ret = 0;
htable_bits++;
- if (!htable_bits) {
- /* In case we have plenty of memory :-) */
- pr_warn("Cannot increase the hashsize of set %s further\n",
- set->name);
- ret = -IPSET_ERR_HASH_FULL;
- goto out;
- }
- t = ip_set_alloc(htable_size(htable_bits));
+ if (!htable_bits)
+ goto hbwarn;
+ hsize = htable_size(htable_bits);
+ if (!hsize)
+ goto hbwarn;
+ t = ip_set_alloc(hsize);
if (!t) {
ret = -ENOMEM;
goto out;
@@ -813,6 +797,12 @@ cleanup:
if (ret == -EAGAIN)
goto retry;
goto out;
+
+hbwarn:
+ /* In case we have plenty of memory :-) */
+ pr_warn("Cannot increase the hashsize of set %s further\n", set->name);
+ ret = -IPSET_ERR_HASH_FULL;
+ goto out;
}
/* Get the current number of elements and ext_size in the set */
@@ -1521,7 +1511,11 @@ IPSET_TOKEN(HTYPE, _create)(struct net *net, struct ip_set *set,
if (!h)
return -ENOMEM;
- hbits = htable_bits(hashsize);
+ /* Compute htable_bits from the user input parameter hashsize.
+ * Assume that hashsize == 2^htable_bits,
+ * otherwise round up to the first 2^n value.
+ */
+ hbits = fls(hashsize - 1);
hsize = htable_size(hbits);
if (hsize == 0) {
kfree(h);
diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
index 46c5557c1fec..0ee702d374b0 100644
--- a/net/netfilter/nf_conntrack_standalone.c
+++ b/net/netfilter/nf_conntrack_standalone.c
@@ -523,6 +523,9 @@ nf_conntrack_hash_sysctl(struct ctl_table *table, int write,
{
int ret;
+ /* module_param hashsize could have changed value */
+ nf_conntrack_htable_size_user = nf_conntrack_htable_size;
+
ret = proc_dointvec(table, write, buffer, lenp, ppos);
if (ret < 0 || !write)
return ret;
diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c
index ea923f8cf9c4..b7c3c902290f 100644
--- a/net/netfilter/nf_nat_core.c
+++ b/net/netfilter/nf_nat_core.c
@@ -1174,6 +1174,7 @@ static int __init nf_nat_init(void)
ret = register_pernet_subsys(&nat_net_ops);
if (ret < 0) {
nf_ct_extend_unregister(&nat_extend);
+ kvfree(nf_nat_bysource);
return ret;
}
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 8d5aa0ac45f4..15c467f1a9dd 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -4162,7 +4162,7 @@ static int nf_tables_newset(struct net *net, struct sock *nlsk,
if (flags & ~(NFT_SET_ANONYMOUS | NFT_SET_CONSTANT |
NFT_SET_INTERVAL | NFT_SET_TIMEOUT |
NFT_SET_MAP | NFT_SET_EVAL |
- NFT_SET_OBJECT | NFT_SET_CONCAT))
+ NFT_SET_OBJECT | NFT_SET_CONCAT | NFT_SET_EXPR))
return -EOPNOTSUPP;
/* Only one of these operations is supported */
if ((flags & (NFT_SET_MAP | NFT_SET_OBJECT)) ==
@@ -4304,6 +4304,10 @@ static int nf_tables_newset(struct net *net, struct sock *nlsk,
struct nlattr *tmp;
int left;
+ if (!(flags & NFT_SET_EXPR)) {
+ err = -EINVAL;
+ goto err_set_alloc_name;
+ }
i = 0;
nla_for_each_nested(tmp, nla[NFTA_SET_EXPRESSIONS], left) {
if (i == NFT_SET_EXPR_MAX) {
@@ -5254,8 +5258,8 @@ static int nft_set_elem_expr_clone(const struct nft_ctx *ctx,
return 0;
err_expr:
- for (k = i - 1; k >= 0; k++)
- nft_expr_destroy(ctx, expr_array[i]);
+ for (k = i - 1; k >= 0; k--)
+ nft_expr_destroy(ctx, expr_array[k]);
return -ENOMEM;
}
diff --git a/net/netfilter/nft_dynset.c b/net/netfilter/nft_dynset.c
index 983a1d5ca3ab..0b053f75cd60 100644
--- a/net/netfilter/nft_dynset.c
+++ b/net/netfilter/nft_dynset.c
@@ -19,6 +19,7 @@ struct nft_dynset {
enum nft_registers sreg_key:8;
enum nft_registers sreg_data:8;
bool invert;
+ bool expr;
u8 num_exprs;
u64 timeout;
struct nft_expr *expr_array[NFT_SET_EXPR_MAX];
@@ -175,11 +176,12 @@ static int nft_dynset_init(const struct nft_ctx *ctx,
if (tb[NFTA_DYNSET_FLAGS]) {
u32 flags = ntohl(nla_get_be32(tb[NFTA_DYNSET_FLAGS]));
-
- if (flags & ~NFT_DYNSET_F_INV)
- return -EINVAL;
+ if (flags & ~(NFT_DYNSET_F_INV | NFT_DYNSET_F_EXPR))
+ return -EOPNOTSUPP;
if (flags & NFT_DYNSET_F_INV)
priv->invert = true;
+ if (flags & NFT_DYNSET_F_EXPR)
+ priv->expr = true;
}
set = nft_set_lookup_global(ctx->net, ctx->table,
@@ -210,7 +212,7 @@ static int nft_dynset_init(const struct nft_ctx *ctx,
timeout = 0;
if (tb[NFTA_DYNSET_TIMEOUT] != NULL) {
if (!(set->flags & NFT_SET_TIMEOUT))
- return -EINVAL;
+ return -EOPNOTSUPP;
err = nf_msecs_to_jiffies64(tb[NFTA_DYNSET_TIMEOUT], &timeout);
if (err)
@@ -224,7 +226,7 @@ static int nft_dynset_init(const struct nft_ctx *ctx,
if (tb[NFTA_DYNSET_SREG_DATA] != NULL) {
if (!(set->flags & NFT_SET_MAP))
- return -EINVAL;
+ return -EOPNOTSUPP;
if (set->dtype == NFT_DATA_VERDICT)
return -EOPNOTSUPP;
@@ -261,6 +263,9 @@ static int nft_dynset_init(const struct nft_ctx *ctx,
struct nlattr *tmp;
int left;
+ if (!priv->expr)
+ return -EINVAL;
+
i = 0;
nla_for_each_nested(tmp, tb[NFTA_DYNSET_EXPRESSIONS], left) {
if (i == NFT_SET_EXPR_MAX) {
diff --git a/net/netfilter/xt_RATEEST.c b/net/netfilter/xt_RATEEST.c
index 37253d399c6b..0d5c422f8745 100644
--- a/net/netfilter/xt_RATEEST.c
+++ b/net/netfilter/xt_RATEEST.c
@@ -115,6 +115,9 @@ static int xt_rateest_tg_checkentry(const struct xt_tgchk_param *par)
} cfg;
int ret;
+ if (strnlen(info->name, sizeof(est->name)) >= sizeof(est->name))
+ return -ENAMETOOLONG;
+
net_get_random_once(&jhash_rnd, sizeof(jhash_rnd));
mutex_lock(&xn->hash_lock);
diff --git a/net/nfc/nci/core.c b/net/nfc/nci/core.c
index e64727e1a72f..02a1f13f0798 100644
--- a/net/nfc/nci/core.c
+++ b/net/nfc/nci/core.c
@@ -508,7 +508,7 @@ static int nci_open_device(struct nci_dev *ndev)
};
unsigned long opt = 0;
- if (!(ndev->nci_ver & NCI_VER_2_MASK))
+ if (ndev->nci_ver & NCI_VER_2_MASK)
opt = (unsigned long)&nci_init_v2_cmd;
rc = __nci_request(ndev, nci_init_req, opt,
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index de8e8dbbdeb8..6bbc7a448593 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -4595,7 +4595,9 @@ static void packet_seq_stop(struct seq_file *seq, void *v)
static int packet_seq_show(struct seq_file *seq, void *v)
{
if (v == SEQ_START_TOKEN)
- seq_puts(seq, "sk RefCnt Type Proto Iface R Rmem User Inode\n");
+ seq_printf(seq,
+ "%*sRefCnt Type Proto Iface R Rmem User Inode\n",
+ IS_ENABLED(CONFIG_64BIT) ? -17 : -9, "sk");
else {
struct sock *s = sk_entry(v);
const struct packet_sock *po = pkt_sk(s);
diff --git a/net/qrtr/ns.c b/net/qrtr/ns.c
index 56aaf8cb6527..8d00dfe8139e 100644
--- a/net/qrtr/ns.c
+++ b/net/qrtr/ns.c
@@ -755,7 +755,7 @@ static void qrtr_ns_data_ready(struct sock *sk)
queue_work(qrtr_ns.workqueue, &qrtr_ns.work);
}
-void qrtr_ns_init(void)
+int qrtr_ns_init(void)
{
struct sockaddr_qrtr sq;
int ret;
@@ -766,7 +766,7 @@ void qrtr_ns_init(void)
ret = sock_create_kern(&init_net, AF_QIPCRTR, SOCK_DGRAM,
PF_QIPCRTR, &qrtr_ns.sock);
if (ret < 0)
- return;
+ return ret;
ret = kernel_getsockname(qrtr_ns.sock, (struct sockaddr *)&sq);
if (ret < 0) {
@@ -797,12 +797,13 @@ void qrtr_ns_init(void)
if (ret < 0)
goto err_wq;
- return;
+ return 0;
err_wq:
destroy_workqueue(qrtr_ns.workqueue);
err_sock:
sock_release(qrtr_ns.sock);
+ return ret;
}
EXPORT_SYMBOL_GPL(qrtr_ns_init);
diff --git a/net/qrtr/qrtr.c b/net/qrtr/qrtr.c
index f4ab3ca6d73b..b34358282f37 100644
--- a/net/qrtr/qrtr.c
+++ b/net/qrtr/qrtr.c
@@ -1287,13 +1287,19 @@ static int __init qrtr_proto_init(void)
return rc;
rc = sock_register(&qrtr_family);
- if (rc) {
- proto_unregister(&qrtr_proto);
- return rc;
- }
+ if (rc)
+ goto err_proto;
- qrtr_ns_init();
+ rc = qrtr_ns_init();
+ if (rc)
+ goto err_sock;
+ return 0;
+
+err_sock:
+ sock_unregister(qrtr_family.family);
+err_proto:
+ proto_unregister(&qrtr_proto);
return rc;
}
postcore_initcall(qrtr_proto_init);
diff --git a/net/qrtr/qrtr.h b/net/qrtr/qrtr.h
index dc2b67f17927..3f2d28696062 100644
--- a/net/qrtr/qrtr.h
+++ b/net/qrtr/qrtr.h
@@ -29,7 +29,7 @@ void qrtr_endpoint_unregister(struct qrtr_endpoint *ep);
int qrtr_endpoint_post(struct qrtr_endpoint *ep, const void *data, size_t len);
-void qrtr_ns_init(void);
+int qrtr_ns_init(void);
void qrtr_ns_remove(void);
diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c
index 667c44aa5a63..dc201363f2c4 100644
--- a/net/rxrpc/input.c
+++ b/net/rxrpc/input.c
@@ -430,7 +430,7 @@ static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb)
return;
}
- if (call->state == RXRPC_CALL_SERVER_RECV_REQUEST) {
+ if (state == RXRPC_CALL_SERVER_RECV_REQUEST) {
unsigned long timo = READ_ONCE(call->next_req_timo);
unsigned long now, expect_req_by;
diff --git a/net/rxrpc/key.c b/net/rxrpc/key.c
index 9631aa8543b5..8d2073e0e3da 100644
--- a/net/rxrpc/key.c
+++ b/net/rxrpc/key.c
@@ -598,7 +598,7 @@ static long rxrpc_read(const struct key *key,
default: /* we have a ticket we can't encode */
pr_err("Unsupported key token type (%u)\n",
token->security_index);
- continue;
+ return -ENOPKG;
}
_debug("token[%u]: toksize=%u", ntoks, toksize);
@@ -674,7 +674,9 @@ static long rxrpc_read(const struct key *key,
break;
default:
- break;
+ pr_err("Unsupported key token type (%u)\n",
+ token->security_index);
+ return -ENOPKG;
}
ASSERTCMP((unsigned long)xdr - (unsigned long)oldxdr, ==,
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
index 1319986693fc..84f932532db7 100644
--- a/net/sched/cls_flower.c
+++ b/net/sched/cls_flower.c
@@ -1272,6 +1272,10 @@ static int fl_set_enc_opt(struct nlattr **tb, struct fl_flow_key *key,
nla_opt_msk = nla_data(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]);
msk_depth = nla_len(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]);
+ if (!nla_ok(nla_opt_msk, msk_depth)) {
+ NL_SET_ERR_MSG(extack, "Invalid nested attribute for masks");
+ return -EINVAL;
+ }
}
nla_for_each_attr(nla_opt_key, nla_enc_key,
@@ -1307,9 +1311,6 @@ static int fl_set_enc_opt(struct nlattr **tb, struct fl_flow_key *key,
NL_SET_ERR_MSG(extack, "Key and mask miss aligned");
return -EINVAL;
}
-
- if (msk_depth)
- nla_opt_msk = nla_next(nla_opt_msk, &msk_depth);
break;
case TCA_FLOWER_KEY_ENC_OPTS_VXLAN:
if (key->enc_opts.dst_opt_type) {
@@ -1340,9 +1341,6 @@ static int fl_set_enc_opt(struct nlattr **tb, struct fl_flow_key *key,
NL_SET_ERR_MSG(extack, "Key and mask miss aligned");
return -EINVAL;
}
-
- if (msk_depth)
- nla_opt_msk = nla_next(nla_opt_msk, &msk_depth);
break;
case TCA_FLOWER_KEY_ENC_OPTS_ERSPAN:
if (key->enc_opts.dst_opt_type) {
@@ -1373,14 +1371,20 @@ static int fl_set_enc_opt(struct nlattr **tb, struct fl_flow_key *key,
NL_SET_ERR_MSG(extack, "Key and mask miss aligned");
return -EINVAL;
}
-
- if (msk_depth)
- nla_opt_msk = nla_next(nla_opt_msk, &msk_depth);
break;
default:
NL_SET_ERR_MSG(extack, "Unknown tunnel option type");
return -EINVAL;
}
+
+ if (!msk_depth)
+ continue;
+
+ if (!nla_ok(nla_opt_msk, msk_depth)) {
+ NL_SET_ERR_MSG(extack, "A mask attribute is invalid");
+ return -EINVAL;
+ }
+ nla_opt_msk = nla_next(nla_opt_msk, &msk_depth);
}
return 0;
diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c
index 78bec347b8b6..c4007b9cd16d 100644
--- a/net/sched/cls_tcindex.c
+++ b/net/sched/cls_tcindex.c
@@ -366,9 +366,13 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
if (tb[TCA_TCINDEX_MASK])
cp->mask = nla_get_u16(tb[TCA_TCINDEX_MASK]);
- if (tb[TCA_TCINDEX_SHIFT])
+ if (tb[TCA_TCINDEX_SHIFT]) {
cp->shift = nla_get_u32(tb[TCA_TCINDEX_SHIFT]);
-
+ if (cp->shift > 16) {
+ err = -EINVAL;
+ goto errout;
+ }
+ }
if (!cp->hash) {
/* Hash not specified, use perfect hash if the upper limit
* of the hashing index is below the threshold.
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 51cb553e4317..6fe4e5cc807c 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -412,7 +412,8 @@ struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r,
{
struct qdisc_rate_table *rtab;
- if (tab == NULL || r->rate == 0 || r->cell_log == 0 ||
+ if (tab == NULL || r->rate == 0 ||
+ r->cell_log == 0 || r->cell_log >= 32 ||
nla_len(tab) != TC_RTAB_SIZE) {
NL_SET_ERR_MSG(extack, "Invalid rate table parameters for searching");
return NULL;
diff --git a/net/sched/sch_choke.c b/net/sched/sch_choke.c
index bd618b00d319..50f680f03a54 100644
--- a/net/sched/sch_choke.c
+++ b/net/sched/sch_choke.c
@@ -362,7 +362,7 @@ static int choke_change(struct Qdisc *sch, struct nlattr *opt,
ctl = nla_data(tb[TCA_CHOKE_PARMS]);
- if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog))
+ if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Scell_log))
return -EINVAL;
if (ctl->limit > CHOKE_MAX_QUEUE)
diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c
index 8599c6f31b05..e0bc77533acc 100644
--- a/net/sched/sch_gred.c
+++ b/net/sched/sch_gred.c
@@ -480,7 +480,7 @@ static inline int gred_change_vq(struct Qdisc *sch, int dp,
struct gred_sched *table = qdisc_priv(sch);
struct gred_sched_data *q = table->tab[dp];
- if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog)) {
+ if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Scell_log)) {
NL_SET_ERR_MSG_MOD(extack, "invalid RED parameters");
return -EINVAL;
}
diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c
index e89fab6ccb34..b4ae34d7aa96 100644
--- a/net/sched/sch_red.c
+++ b/net/sched/sch_red.c
@@ -250,7 +250,7 @@ static int __red_change(struct Qdisc *sch, struct nlattr **tb,
max_P = tb[TCA_RED_MAX_P] ? nla_get_u32(tb[TCA_RED_MAX_P]) : 0;
ctl = nla_data(tb[TCA_RED_PARMS]);
- if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog))
+ if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Scell_log))
return -EINVAL;
err = red_get_flags(ctl->flags, TC_RED_HISTORIC_FLAGS,
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index bca2be57d9fc..b25e51440623 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -647,7 +647,7 @@ static int sfq_change(struct Qdisc *sch, struct nlattr *opt)
}
if (ctl_v1 && !red_check_params(ctl_v1->qth_min, ctl_v1->qth_max,
- ctl_v1->Wlog))
+ ctl_v1->Wlog, ctl_v1->Scell_log))
return -EINVAL;
if (ctl_v1 && ctl_v1->qth_min) {
p = kmalloc(sizeof(*p), GFP_KERNEL);
diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c
index c74817ec9964..6f775275826a 100644
--- a/net/sched/sch_taprio.c
+++ b/net/sched/sch_taprio.c
@@ -1605,8 +1605,9 @@ static void taprio_reset(struct Qdisc *sch)
hrtimer_cancel(&q->advance_timer);
if (q->qdiscs) {
- for (i = 0; i < dev->num_tx_queues && q->qdiscs[i]; i++)
- qdisc_reset(q->qdiscs[i]);
+ for (i = 0; i < dev->num_tx_queues; i++)
+ if (q->qdiscs[i])
+ qdisc_reset(q->qdiscs[i]);
}
sch->qstats.backlog = 0;
sch->q.qlen = 0;
@@ -1626,7 +1627,7 @@ static void taprio_destroy(struct Qdisc *sch)
taprio_disable_offload(dev, q, NULL);
if (q->qdiscs) {
- for (i = 0; i < dev->num_tx_queues && q->qdiscs[i]; i++)
+ for (i = 0; i < dev->num_tx_queues; i++)
qdisc_put(q->qdiscs[i]);
kfree(q->qdiscs);
diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c
index 59342b519e34..0df85a12651e 100644
--- a/net/smc/smc_core.c
+++ b/net/smc/smc_core.c
@@ -246,7 +246,8 @@ int smc_nl_get_sys_info(struct sk_buff *skb, struct netlink_callback *cb)
goto errattr;
smc_clc_get_hostname(&host);
if (host) {
- snprintf(hostname, sizeof(hostname), "%s", host);
+ memcpy(hostname, host, SMC_MAX_HOSTNAME_LEN);
+ hostname[SMC_MAX_HOSTNAME_LEN] = 0;
if (nla_put_string(skb, SMC_NLA_SYS_LOCAL_HOST, hostname))
goto errattr;
}
@@ -257,7 +258,8 @@ int smc_nl_get_sys_info(struct sk_buff *skb, struct netlink_callback *cb)
smc_ism_get_system_eid(smcd_dev, &seid);
mutex_unlock(&smcd_dev_list.mutex);
if (seid && smc_ism_is_v2_capable()) {
- snprintf(smc_seid, sizeof(smc_seid), "%s", seid);
+ memcpy(smc_seid, seid, SMC_MAX_EID_LEN);
+ smc_seid[SMC_MAX_EID_LEN] = 0;
if (nla_put_string(skb, SMC_NLA_SYS_SEID, smc_seid))
goto errattr;
}
@@ -295,7 +297,8 @@ static int smc_nl_fill_lgr(struct smc_link_group *lgr,
goto errattr;
if (nla_put_u8(skb, SMC_NLA_LGR_R_VLAN_ID, lgr->vlan_id))
goto errattr;
- snprintf(smc_target, sizeof(smc_target), "%s", lgr->pnet_id);
+ memcpy(smc_target, lgr->pnet_id, SMC_MAX_PNETID_LEN);
+ smc_target[SMC_MAX_PNETID_LEN] = 0;
if (nla_put_string(skb, SMC_NLA_LGR_R_PNETID, smc_target))
goto errattr;
@@ -312,7 +315,7 @@ static int smc_nl_fill_lgr_link(struct smc_link_group *lgr,
struct sk_buff *skb,
struct netlink_callback *cb)
{
- char smc_ibname[IB_DEVICE_NAME_MAX + 1];
+ char smc_ibname[IB_DEVICE_NAME_MAX];
u8 smc_gid_target[41];
struct nlattr *attrs;
u32 link_uid = 0;
@@ -461,7 +464,8 @@ static int smc_nl_fill_smcd_lgr(struct smc_link_group *lgr,
goto errattr;
if (nla_put_u32(skb, SMC_NLA_LGR_D_CHID, smc_ism_get_chid(lgr->smcd)))
goto errattr;
- snprintf(smc_pnet, sizeof(smc_pnet), "%s", lgr->smcd->pnetid);
+ memcpy(smc_pnet, lgr->smcd->pnetid, SMC_MAX_PNETID_LEN);
+ smc_pnet[SMC_MAX_PNETID_LEN] = 0;
if (nla_put_string(skb, SMC_NLA_LGR_D_PNETID, smc_pnet))
goto errattr;
@@ -474,10 +478,12 @@ static int smc_nl_fill_smcd_lgr(struct smc_link_group *lgr,
goto errv2attr;
if (nla_put_u8(skb, SMC_NLA_LGR_V2_OS, lgr->peer_os))
goto errv2attr;
- snprintf(smc_host, sizeof(smc_host), "%s", lgr->peer_hostname);
+ memcpy(smc_host, lgr->peer_hostname, SMC_MAX_HOSTNAME_LEN);
+ smc_host[SMC_MAX_HOSTNAME_LEN] = 0;
if (nla_put_string(skb, SMC_NLA_LGR_V2_PEER_HOST, smc_host))
goto errv2attr;
- snprintf(smc_eid, sizeof(smc_eid), "%s", lgr->negotiated_eid);
+ memcpy(smc_eid, lgr->negotiated_eid, SMC_MAX_EID_LEN);
+ smc_eid[SMC_MAX_EID_LEN] = 0;
if (nla_put_string(skb, SMC_NLA_LGR_V2_NEG_EID, smc_eid))
goto errv2attr;
diff --git a/net/smc/smc_ib.c b/net/smc/smc_ib.c
index ddd7fac98b1d..7d7ba0320d5a 100644
--- a/net/smc/smc_ib.c
+++ b/net/smc/smc_ib.c
@@ -371,8 +371,8 @@ static int smc_nl_handle_dev_port(struct sk_buff *skb,
if (nla_put_u8(skb, SMC_NLA_DEV_PORT_PNET_USR,
smcibdev->pnetid_by_user[port]))
goto errattr;
- snprintf(smc_pnet, sizeof(smc_pnet), "%s",
- (char *)&smcibdev->pnetid[port]);
+ memcpy(smc_pnet, &smcibdev->pnetid[port], SMC_MAX_PNETID_LEN);
+ smc_pnet[SMC_MAX_PNETID_LEN] = 0;
if (nla_put_string(skb, SMC_NLA_DEV_PORT_PNETID, smc_pnet))
goto errattr;
if (nla_put_u32(skb, SMC_NLA_DEV_PORT_NETDEV,
@@ -414,7 +414,7 @@ static int smc_nl_handle_smcr_dev(struct smc_ib_device *smcibdev,
struct sk_buff *skb,
struct netlink_callback *cb)
{
- char smc_ibname[IB_DEVICE_NAME_MAX + 1];
+ char smc_ibname[IB_DEVICE_NAME_MAX];
struct smc_pci_dev smc_pci_dev;
struct pci_dev *pci_dev;
unsigned char is_crit;
diff --git a/net/smc/smc_ism.c b/net/smc/smc_ism.c
index 524ef64a191a..9c6e95882553 100644
--- a/net/smc/smc_ism.c
+++ b/net/smc/smc_ism.c
@@ -250,7 +250,8 @@ static int smc_nl_handle_smcd_dev(struct smcd_dev *smcd,
goto errattr;
if (nla_put_u8(skb, SMC_NLA_DEV_PORT_PNET_USR, smcd->pnetid_by_user))
goto errportattr;
- snprintf(smc_pnet, sizeof(smc_pnet), "%s", smcd->pnetid);
+ memcpy(smc_pnet, smcd->pnetid, SMC_MAX_PNETID_LEN);
+ smc_pnet[SMC_MAX_PNETID_LEN] = 0;
if (nla_put_string(skb, SMC_NLA_DEV_PORT_PNETID, smc_pnet))
goto errportattr;
diff --git a/net/sunrpc/addr.c b/net/sunrpc/addr.c
index 010dcb876f9d..6e4dbd577a39 100644
--- a/net/sunrpc/addr.c
+++ b/net/sunrpc/addr.c
@@ -185,7 +185,7 @@ static int rpc_parse_scope_id(struct net *net, const char *buf,
scope_id = dev->ifindex;
dev_put(dev);
} else {
- if (kstrtou32(p, 10, &scope_id) == 0) {
+ if (kstrtou32(p, 10, &scope_id) != 0) {
kfree(p);
return 0;
}
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
index 5fb9164aa690..dcc50ae54550 100644
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -857,6 +857,7 @@ int svc_recv(struct svc_rqst *rqstp, long timeout)
err = -EAGAIN;
if (len <= 0)
goto out_release;
+ trace_svc_xdr_recvfrom(&rqstp->rq_arg);
clear_bit(XPT_OLD, &xprt->xpt_flags);
@@ -866,7 +867,6 @@ int svc_recv(struct svc_rqst *rqstp, long timeout)
if (serv->sv_stats)
serv->sv_stats->netcnt++;
- trace_svc_xdr_recvfrom(rqstp, &rqstp->rq_arg);
return len;
out_release:
rqstp->rq_res.len = 0;
@@ -904,7 +904,7 @@ int svc_send(struct svc_rqst *rqstp)
xb->len = xb->head[0].iov_len +
xb->page_len +
xb->tail[0].iov_len;
- trace_svc_xdr_sendto(rqstp, xb);
+ trace_svc_xdr_sendto(rqstp->rq_xid, xb);
trace_svc_stats_latency(rqstp);
len = xprt->xpt_ops->xpo_sendto(rqstp);
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index b248f2349437..c9766d07eb81 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -1062,6 +1062,90 @@ err_noclose:
return 0; /* record not complete */
}
+static int svc_tcp_send_kvec(struct socket *sock, const struct kvec *vec,
+ int flags)
+{
+ return kernel_sendpage(sock, virt_to_page(vec->iov_base),
+ offset_in_page(vec->iov_base),
+ vec->iov_len, flags);
+}
+
+/*
+ * kernel_sendpage() is used exclusively to reduce the number of
+ * copy operations in this path. Therefore the caller must ensure
+ * that the pages backing @xdr are unchanging.
+ *
+ * In addition, the logic assumes that * .bv_len is never larger
+ * than PAGE_SIZE.
+ */
+static int svc_tcp_sendmsg(struct socket *sock, struct msghdr *msg,
+ struct xdr_buf *xdr, rpc_fraghdr marker,
+ unsigned int *sentp)
+{
+ const struct kvec *head = xdr->head;
+ const struct kvec *tail = xdr->tail;
+ struct kvec rm = {
+ .iov_base = &marker,
+ .iov_len = sizeof(marker),
+ };
+ int flags, ret;
+
+ *sentp = 0;
+ xdr_alloc_bvec(xdr, GFP_KERNEL);
+
+ msg->msg_flags = MSG_MORE;
+ ret = kernel_sendmsg(sock, msg, &rm, 1, rm.iov_len);
+ if (ret < 0)
+ return ret;
+ *sentp += ret;
+ if (ret != rm.iov_len)
+ return -EAGAIN;
+
+ flags = head->iov_len < xdr->len ? MSG_MORE | MSG_SENDPAGE_NOTLAST : 0;
+ ret = svc_tcp_send_kvec(sock, head, flags);
+ if (ret < 0)
+ return ret;
+ *sentp += ret;
+ if (ret != head->iov_len)
+ goto out;
+
+ if (xdr->page_len) {
+ unsigned int offset, len, remaining;
+ struct bio_vec *bvec;
+
+ bvec = xdr->bvec;
+ offset = xdr->page_base;
+ remaining = xdr->page_len;
+ flags = MSG_MORE | MSG_SENDPAGE_NOTLAST;
+ while (remaining > 0) {
+ if (remaining <= PAGE_SIZE && tail->iov_len == 0)
+ flags = 0;
+ len = min(remaining, bvec->bv_len);
+ ret = kernel_sendpage(sock, bvec->bv_page,
+ bvec->bv_offset + offset,
+ len, flags);
+ if (ret < 0)
+ return ret;
+ *sentp += ret;
+ if (ret != len)
+ goto out;
+ remaining -= len;
+ offset = 0;
+ bvec++;
+ }
+ }
+
+ if (tail->iov_len) {
+ ret = svc_tcp_send_kvec(sock, tail, 0);
+ if (ret < 0)
+ return ret;
+ *sentp += ret;
+ }
+
+out:
+ return 0;
+}
+
/**
* svc_tcp_sendto - Send out a reply on a TCP socket
* @rqstp: completed svc_rqst
@@ -1089,7 +1173,7 @@ static int svc_tcp_sendto(struct svc_rqst *rqstp)
mutex_lock(&xprt->xpt_mutex);
if (svc_xprt_is_dead(xprt))
goto out_notconn;
- err = xprt_sock_sendmsg(svsk->sk_sock, &msg, xdr, 0, marker, &sent);
+ err = svc_tcp_sendmsg(svsk->sk_sock, &msg, xdr, marker, &sent);
xdr_free_bvec(xdr);
trace_svcsock_tcp_send(xprt, err < 0 ? err : sent);
if (err < 0 || sent != (xdr->len + sizeof(marker)))
diff --git a/net/tipc/link.c b/net/tipc/link.c
index 6ae2140eb4f7..115109259430 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -1030,7 +1030,6 @@ void tipc_link_reset(struct tipc_link *l)
int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list,
struct sk_buff_head *xmitq)
{
- struct tipc_msg *hdr = buf_msg(skb_peek(list));
struct sk_buff_head *backlogq = &l->backlogq;
struct sk_buff_head *transmq = &l->transmq;
struct sk_buff *skb, *_skb;
@@ -1038,13 +1037,18 @@ int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list,
u16 ack = l->rcv_nxt - 1;
u16 seqno = l->snd_nxt;
int pkt_cnt = skb_queue_len(list);
- int imp = msg_importance(hdr);
unsigned int mss = tipc_link_mss(l);
unsigned int cwin = l->window;
unsigned int mtu = l->mtu;
+ struct tipc_msg *hdr;
bool new_bundle;
int rc = 0;
+ int imp;
+
+ if (pkt_cnt <= 0)
+ return 0;
+ hdr = buf_msg(skb_peek(list));
if (unlikely(msg_size(hdr) > mtu)) {
pr_warn("Too large msg, purging xmit list %d %d %d %d %d!\n",
skb_queue_len(list), msg_user(hdr),
@@ -1053,6 +1057,7 @@ int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list,
return -EMSGSIZE;
}
+ imp = msg_importance(hdr);
/* Allow oversubscription of one data msg per source at congestion */
if (unlikely(l->backlog[imp].len >= l->backlog[imp].limit)) {
if (imp == TIPC_SYSTEM_IMPORTANCE) {
@@ -2539,7 +2544,7 @@ void tipc_link_set_queue_limits(struct tipc_link *l, u32 min_win, u32 max_win)
}
/**
- * link_reset_stats - reset link statistics
+ * tipc_link_reset_stats - reset link statistics
* @l: pointer to link
*/
void tipc_link_reset_stats(struct tipc_link *l)
diff --git a/net/tipc/node.c b/net/tipc/node.c
index 83d9eb830592..008670d1f43e 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -1665,7 +1665,7 @@ static void tipc_lxc_xmit(struct net *peer_net, struct sk_buff_head *list)
}
/**
- * tipc_node_xmit() is the general link level function for message sending
+ * tipc_node_xmit() - general link level function for message sending
* @net: the applicable net namespace
* @list: chain of buffers containing message
* @dnode: address of destination node
diff --git a/net/wireless/Kconfig b/net/wireless/Kconfig
index 27026f587fa6..f620acd2a0f5 100644
--- a/net/wireless/Kconfig
+++ b/net/wireless/Kconfig
@@ -21,6 +21,7 @@ config CFG80211
tristate "cfg80211 - wireless configuration API"
depends on RFKILL || !RFKILL
select FW_LOADER
+ select CRC32
# may need to update this when certificates are changed and are
# using a different algorithm, though right now they shouldn't
# (this is here rather than below to allow it to be a module)
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index bb72447ad960..8114bba8556c 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -5,7 +5,7 @@
* Copyright 2008-2011 Luis R. Rodriguez <[email protected]>
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright 2017 Intel Deutschland GmbH
- * Copyright (C) 2018 - 2019 Intel Corporation
+ * Copyright (C) 2018 - 2021 Intel Corporation
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -139,6 +139,11 @@ static const struct ieee80211_regdomain *get_cfg80211_regdom(void)
return rcu_dereference_rtnl(cfg80211_regdomain);
}
+/*
+ * Returns the regulatory domain associated with the wiphy.
+ *
+ * Requires either RTNL or RCU protection
+ */
const struct ieee80211_regdomain *get_wiphy_regdom(struct wiphy *wiphy)
{
return rcu_dereference_rtnl(wiphy->regd);
@@ -2571,9 +2576,13 @@ void wiphy_apply_custom_regulatory(struct wiphy *wiphy,
if (IS_ERR(new_regd))
return;
+ rtnl_lock();
+
tmp = get_wiphy_regdom(wiphy);
rcu_assign_pointer(wiphy->regd, new_regd);
rcu_free_regdom(tmp);
+
+ rtnl_unlock();
}
EXPORT_SYMBOL(wiphy_apply_custom_regulatory);
diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
index ac4a317038f1..4a83117507f5 100644
--- a/net/xdp/xsk.c
+++ b/net/xdp/xsk.c
@@ -108,9 +108,9 @@ EXPORT_SYMBOL(xsk_get_pool_from_qid);
void xsk_clear_pool_at_qid(struct net_device *dev, u16 queue_id)
{
- if (queue_id < dev->real_num_rx_queues)
+ if (queue_id < dev->num_rx_queues)
dev->_rx[queue_id].pool = NULL;
- if (queue_id < dev->real_num_tx_queues)
+ if (queue_id < dev->num_tx_queues)
dev->_tx[queue_id].pool = NULL;
}
@@ -423,9 +423,9 @@ static void xsk_destruct_skb(struct sk_buff *skb)
struct xdp_sock *xs = xdp_sk(skb->sk);
unsigned long flags;
- spin_lock_irqsave(&xs->tx_completion_lock, flags);
+ spin_lock_irqsave(&xs->pool->cq_lock, flags);
xskq_prod_submit_addr(xs->pool->cq, addr);
- spin_unlock_irqrestore(&xs->tx_completion_lock, flags);
+ spin_unlock_irqrestore(&xs->pool->cq_lock, flags);
sock_wfree(skb);
}
@@ -437,6 +437,7 @@ static int xsk_generic_xmit(struct sock *sk)
bool sent_frame = false;
struct xdp_desc desc;
struct sk_buff *skb;
+ unsigned long flags;
int err = 0;
mutex_lock(&xs->mutex);
@@ -468,10 +469,13 @@ static int xsk_generic_xmit(struct sock *sk)
* if there is space in it. This avoids having to implement
* any buffering in the Tx path.
*/
+ spin_lock_irqsave(&xs->pool->cq_lock, flags);
if (unlikely(err) || xskq_prod_reserve(xs->pool->cq)) {
+ spin_unlock_irqrestore(&xs->pool->cq_lock, flags);
kfree_skb(skb);
goto out;
}
+ spin_unlock_irqrestore(&xs->pool->cq_lock, flags);
skb->dev = xs->dev;
skb->priority = sk->sk_priority;
@@ -483,6 +487,9 @@ static int xsk_generic_xmit(struct sock *sk)
if (err == NETDEV_TX_BUSY) {
/* Tell user-space to retry the send */
skb->destructor = sock_wfree;
+ spin_lock_irqsave(&xs->pool->cq_lock, flags);
+ xskq_prod_cancel(xs->pool->cq);
+ spin_unlock_irqrestore(&xs->pool->cq_lock, flags);
/* Free skb without triggering the perf drop trace */
consume_skb(skb);
err = -EAGAIN;
@@ -878,6 +885,10 @@ static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
}
}
+ /* FQ and CQ are now owned by the buffer pool and cleaned up with it. */
+ xs->fq_tmp = NULL;
+ xs->cq_tmp = NULL;
+
xs->dev = dev;
xs->zc = xs->umem->zc;
xs->queue_id = qid;
@@ -1299,7 +1310,6 @@ static int xsk_create(struct net *net, struct socket *sock, int protocol,
xs->state = XSK_READY;
mutex_init(&xs->mutex);
spin_lock_init(&xs->rx_lock);
- spin_lock_init(&xs->tx_completion_lock);
INIT_LIST_HEAD(&xs->map_list);
spin_lock_init(&xs->map_list_lock);
diff --git a/net/xdp/xsk_buff_pool.c b/net/xdp/xsk_buff_pool.c
index 67a4494d63b6..20598eea658c 100644
--- a/net/xdp/xsk_buff_pool.c
+++ b/net/xdp/xsk_buff_pool.c
@@ -71,12 +71,11 @@ struct xsk_buff_pool *xp_create_and_assign_umem(struct xdp_sock *xs,
INIT_LIST_HEAD(&pool->free_list);
INIT_LIST_HEAD(&pool->xsk_tx_list);
spin_lock_init(&pool->xsk_tx_list_lock);
+ spin_lock_init(&pool->cq_lock);
refcount_set(&pool->users, 1);
pool->fq = xs->fq_tmp;
pool->cq = xs->cq_tmp;
- xs->fq_tmp = NULL;
- xs->cq_tmp = NULL;
for (i = 0; i < pool->free_heads_cnt; i++) {
xskb = &pool->heads[i];
diff --git a/net/xdp/xsk_queue.h b/net/xdp/xsk_queue.h
index 4a9663aa7afe..2823b7c3302d 100644
--- a/net/xdp/xsk_queue.h
+++ b/net/xdp/xsk_queue.h
@@ -334,6 +334,11 @@ static inline bool xskq_prod_is_full(struct xsk_queue *q)
return xskq_prod_nb_free(q, 1) ? false : true;
}
+static inline void xskq_prod_cancel(struct xsk_queue *q)
+{
+ q->cached_prod--;
+}
+
static inline int xskq_prod_reserve(struct xsk_queue *q)
{
if (xskq_prod_is_full(q))
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index 00085308ed9d..92e888ed939f 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -6646,6 +6646,12 @@ sub process {
# }
# }
+# strlcpy uses that should likely be strscpy
+ if ($line =~ /\bstrlcpy\s*\(/) {
+ WARN("STRLCPY",
+ "Prefer strscpy over strlcpy - see: https://lore.kernel.org/r/CAHk-=wgfRnXz0W3D37d01q3JFkr_i_uTL=V6A6G1oUZcprmknw\@mail.gmail.com/\n" . $herecurr);
+ }
+
# typecasts on min/max could be min_t/max_t
if ($perl_version_ok &&
defined $stat &&
diff --git a/scripts/config b/scripts/config
index 8c8d7c3d7acc..ff88e2faefd3 100755
--- a/scripts/config
+++ b/scripts/config
@@ -223,6 +223,7 @@ while [ "$1" != "" ] ; do
;;
*)
+ echo "bad command: $CMD" >&2
usage
;;
esac
diff --git a/scripts/depmod.sh b/scripts/depmod.sh
index e083bcae343f..3643b4f896ed 100755
--- a/scripts/depmod.sh
+++ b/scripts/depmod.sh
@@ -15,6 +15,8 @@ if ! test -r System.map ; then
exit 0
fi
+# legacy behavior: "depmod" in /sbin, no /sbin in PATH
+PATH="$PATH:/sbin"
if [ -z $(command -v $DEPMOD) ]; then
echo "Warning: 'make modules_install' requires $DEPMOD. Please install it." >&2
echo "This is probably in the kmod package." >&2
diff --git a/scripts/gcc-plugins/Makefile b/scripts/gcc-plugins/Makefile
index d66949bfeba4..b5487cce69e8 100644
--- a/scripts/gcc-plugins/Makefile
+++ b/scripts/gcc-plugins/Makefile
@@ -22,9 +22,9 @@ always-y += $(GCC_PLUGIN)
GCC_PLUGINS_DIR = $(shell $(CC) -print-file-name=plugin)
plugin_cxxflags = -Wp,-MMD,$(depfile) $(KBUILD_HOSTCXXFLAGS) -fPIC \
- -I $(GCC_PLUGINS_DIR)/include -I $(obj) -std=gnu++98 \
+ -I $(GCC_PLUGINS_DIR)/include -I $(obj) -std=gnu++11 \
-fno-rtti -fno-exceptions -fasynchronous-unwind-tables \
- -ggdb -Wno-narrowing -Wno-unused-variable -Wno-c++11-compat \
+ -ggdb -Wno-narrowing -Wno-unused-variable \
-Wno-format-diag
plugin_ldflags = -shared
diff --git a/scripts/kconfig/Makefile b/scripts/kconfig/Makefile
index e46df0a2d4f9..2c40e68853dd 100644
--- a/scripts/kconfig/Makefile
+++ b/scripts/kconfig/Makefile
@@ -94,16 +94,6 @@ configfiles=$(wildcard $(srctree)/kernel/configs/$@ $(srctree)/arch/$(SRCARCH)/c
$(Q)$(CONFIG_SHELL) $(srctree)/scripts/kconfig/merge_config.sh -m .config $(configfiles)
$(Q)$(MAKE) -f $(srctree)/Makefile olddefconfig
-PHONY += kvmconfig
-kvmconfig: kvm_guest.config
- @echo >&2 "WARNING: 'make $@' will be removed after Linux 5.10"
- @echo >&2 " Please use 'make $<' instead."
-
-PHONY += xenconfig
-xenconfig: xen.config
- @echo >&2 "WARNING: 'make $@' will be removed after Linux 5.10"
- @echo >&2 " Please use 'make $<' instead."
-
PHONY += tinyconfig
tinyconfig:
$(Q)$(MAKE) -f $(srctree)/Makefile allnoconfig tiny.config
diff --git a/scripts/kconfig/mconf-cfg.sh b/scripts/kconfig/mconf-cfg.sh
index aa68ec95620d..fcd4acd4e9cb 100755
--- a/scripts/kconfig/mconf-cfg.sh
+++ b/scripts/kconfig/mconf-cfg.sh
@@ -33,7 +33,9 @@ if [ -f /usr/include/ncurses/ncurses.h ]; then
exit 0
fi
-if [ -f /usr/include/ncurses.h ]; then
+# As a final fallback before giving up, check if $HOSTCC knows of a default
+# ncurses installation (e.g. from a vendor-specific sysroot).
+if echo '#include <ncurses.h>' | "${HOSTCC}" -E - >/dev/null 2>&1; then
echo cflags=\"-D_GNU_SOURCE\"
echo libs=\"-lncurses\"
exit 0
diff --git a/security/lsm_audit.c b/security/lsm_audit.c
index 7d8026f3f377..a0cd28cd31a8 100644
--- a/security/lsm_audit.c
+++ b/security/lsm_audit.c
@@ -275,7 +275,9 @@ static void dump_common_audit_data(struct audit_buffer *ab,
struct inode *inode;
audit_log_format(ab, " name=");
+ spin_lock(&a->u.dentry->d_lock);
audit_log_untrustedstring(ab, a->u.dentry->d_name.name);
+ spin_unlock(&a->u.dentry->d_lock);
inode = d_backing_inode(a->u.dentry);
if (inode) {
@@ -293,8 +295,9 @@ static void dump_common_audit_data(struct audit_buffer *ab,
dentry = d_find_alias(inode);
if (dentry) {
audit_log_format(ab, " name=");
- audit_log_untrustedstring(ab,
- dentry->d_name.name);
+ spin_lock(&dentry->d_lock);
+ audit_log_untrustedstring(ab, dentry->d_name.name);
+ spin_unlock(&dentry->d_lock);
dput(dentry);
}
audit_log_format(ab, " dev=");
diff --git a/sound/core/seq/oss/seq_oss_synth.c b/sound/core/seq/oss/seq_oss_synth.c
index 11554d0412f0..1b8409ec2c97 100644
--- a/sound/core/seq/oss/seq_oss_synth.c
+++ b/sound/core/seq/oss/seq_oss_synth.c
@@ -611,7 +611,8 @@ snd_seq_oss_synth_make_info(struct seq_oss_devinfo *dp, int dev, struct synth_in
if (info->is_midi) {
struct midi_info minf;
- snd_seq_oss_midi_make_info(dp, info->midi_mapped, &minf);
+ if (snd_seq_oss_midi_make_info(dp, info->midi_mapped, &minf))
+ return -ENXIO;
inf->synth_type = SYNTH_TYPE_MIDI;
inf->synth_subtype = 0;
inf->nr_voices = 16;
diff --git a/sound/firewire/fireface/ff-transaction.c b/sound/firewire/fireface/ff-transaction.c
index 7f82762ccc8c..ee7122c461d4 100644
--- a/sound/firewire/fireface/ff-transaction.c
+++ b/sound/firewire/fireface/ff-transaction.c
@@ -88,7 +88,7 @@ static void transmit_midi_msg(struct snd_ff *ff, unsigned int port)
/* Set interval to next transaction. */
ff->next_ktime[port] = ktime_add_ns(ktime_get(),
- ff->rx_bytes[port] * 8 * NSEC_PER_SEC / 31250);
+ ff->rx_bytes[port] * 8 * (NSEC_PER_SEC / 31250));
if (quad_count == 1)
tcode = TCODE_WRITE_QUADLET_REQUEST;
diff --git a/sound/firewire/tascam/tascam-transaction.c b/sound/firewire/tascam/tascam-transaction.c
index 90288b4b4637..a073cece4a7d 100644
--- a/sound/firewire/tascam/tascam-transaction.c
+++ b/sound/firewire/tascam/tascam-transaction.c
@@ -209,7 +209,7 @@ static void midi_port_work(struct work_struct *work)
/* Set interval to next transaction. */
port->next_ktime = ktime_add_ns(ktime_get(),
- port->consume_bytes * 8 * NSEC_PER_SEC / 31250);
+ port->consume_bytes * 8 * (NSEC_PER_SEC / 31250));
/* Start this transaction. */
port->idling = false;
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
index 687216e74526..eec1775dfffe 100644
--- a/sound/pci/hda/hda_codec.c
+++ b/sound/pci/hda/hda_codec.c
@@ -2934,7 +2934,7 @@ static void hda_call_codec_resume(struct hda_codec *codec)
snd_hdac_leave_pm(&codec->core);
}
-static int hda_codec_suspend(struct device *dev)
+static int hda_codec_runtime_suspend(struct device *dev)
{
struct hda_codec *codec = dev_to_hda_codec(dev);
unsigned int state;
@@ -2953,7 +2953,7 @@ static int hda_codec_suspend(struct device *dev)
return 0;
}
-static int hda_codec_resume(struct device *dev)
+static int hda_codec_runtime_resume(struct device *dev)
{
struct hda_codec *codec = dev_to_hda_codec(dev);
@@ -2968,16 +2968,6 @@ static int hda_codec_resume(struct device *dev)
return 0;
}
-static int hda_codec_runtime_suspend(struct device *dev)
-{
- return hda_codec_suspend(dev);
-}
-
-static int hda_codec_runtime_resume(struct device *dev)
-{
- return hda_codec_resume(dev);
-}
-
#endif /* CONFIG_PM */
#ifdef CONFIG_PM_SLEEP
@@ -2998,31 +2988,31 @@ static void hda_codec_pm_complete(struct device *dev)
static int hda_codec_pm_suspend(struct device *dev)
{
dev->power.power_state = PMSG_SUSPEND;
- return hda_codec_suspend(dev);
+ return pm_runtime_force_suspend(dev);
}
static int hda_codec_pm_resume(struct device *dev)
{
dev->power.power_state = PMSG_RESUME;
- return hda_codec_resume(dev);
+ return pm_runtime_force_resume(dev);
}
static int hda_codec_pm_freeze(struct device *dev)
{
dev->power.power_state = PMSG_FREEZE;
- return hda_codec_suspend(dev);
+ return pm_runtime_force_suspend(dev);
}
static int hda_codec_pm_thaw(struct device *dev)
{
dev->power.power_state = PMSG_THAW;
- return hda_codec_resume(dev);
+ return pm_runtime_force_resume(dev);
}
static int hda_codec_pm_restore(struct device *dev)
{
dev->power.power_state = PMSG_RESTORE;
- return hda_codec_resume(dev);
+ return pm_runtime_force_resume(dev);
}
#endif /* CONFIG_PM_SLEEP */
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 6852668f1bcb..5a50d3a46445 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -2220,8 +2220,6 @@ static const struct snd_pci_quirk power_save_denylist[] = {
SND_PCI_QUIRK(0x1849, 0x7662, "Asrock H81M-HDS", 0),
/* https://bugzilla.redhat.com/show_bug.cgi?id=1525104 */
SND_PCI_QUIRK(0x1043, 0x8733, "Asus Prime X370-Pro", 0),
- /* https://bugzilla.redhat.com/show_bug.cgi?id=1581607 */
- SND_PCI_QUIRK(0x1558, 0x3501, "Clevo W35xSS_370SS", 0),
/* https://bugzilla.redhat.com/show_bug.cgi?id=1525104 */
SND_PCI_QUIRK(0x1558, 0x6504, "Clevo W65_67SB", 0),
/* https://bugzilla.redhat.com/show_bug.cgi?id=1525104 */
@@ -2486,6 +2484,9 @@ static const struct pci_device_id azx_ids[] = {
/* CometLake-S */
{ PCI_DEVICE(0x8086, 0xa3f0),
.driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
+ /* CometLake-R */
+ { PCI_DEVICE(0x8086, 0xf0c8),
+ .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
/* Icelake */
{ PCI_DEVICE(0x8086, 0x34c8),
.driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
@@ -2509,6 +2510,9 @@ static const struct pci_device_id azx_ids[] = {
/* Alderlake-S */
{ PCI_DEVICE(0x8086, 0x7ad0),
.driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
+ /* Alderlake-P */
+ { PCI_DEVICE(0x8086, 0x51c8),
+ .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
/* Elkhart Lake */
{ PCI_DEVICE(0x8086, 0x4b55),
.driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
@@ -2600,7 +2604,8 @@ static const struct pci_device_id azx_ids[] = {
.driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_AMD_SB },
/* ATI HDMI */
{ PCI_DEVICE(0x1002, 0x0002),
- .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
+ .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS |
+ AZX_DCAPS_PM_RUNTIME },
{ PCI_DEVICE(0x1002, 0x1308),
.driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
{ PCI_DEVICE(0x1002, 0x157a),
@@ -2662,9 +2667,11 @@ static const struct pci_device_id azx_ids[] = {
{ PCI_DEVICE(0x1002, 0xaab0),
.driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
{ PCI_DEVICE(0x1002, 0xaac0),
- .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
+ .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS |
+ AZX_DCAPS_PM_RUNTIME },
{ PCI_DEVICE(0x1002, 0xaac8),
- .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
+ .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS |
+ AZX_DCAPS_PM_RUNTIME },
{ PCI_DEVICE(0x1002, 0xaad8),
.driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS |
AZX_DCAPS_PM_RUNTIME },
diff --git a/sound/pci/hda/hda_tegra.c b/sound/pci/hda/hda_tegra.c
index 70164d1428d4..361cf2041911 100644
--- a/sound/pci/hda/hda_tegra.c
+++ b/sound/pci/hda/hda_tegra.c
@@ -388,7 +388,7 @@ static int hda_tegra_first_init(struct azx *chip, struct platform_device *pdev)
* in powers of 2, next available ratio is 16 which can be
* used as a limiting factor here.
*/
- if (of_device_is_compatible(np, "nvidia,tegra194-hda"))
+ if (of_device_is_compatible(np, "nvidia,tegra30-hda"))
chip->bus.core.sdo_limit = 16;
/* codec detection */
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index be5000dd1585..d49cc4409d59 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -1070,6 +1070,7 @@ static int patch_conexant_auto(struct hda_codec *codec)
static const struct hda_device_id snd_hda_id_conexant[] = {
HDA_CODEC_ENTRY(0x14f11f86, "CX8070", patch_conexant_auto),
HDA_CODEC_ENTRY(0x14f12008, "CX8200", patch_conexant_auto),
+ HDA_CODEC_ENTRY(0x14f120d0, "CX11970", patch_conexant_auto),
HDA_CODEC_ENTRY(0x14f15045, "CX20549 (Venice)", patch_conexant_auto),
HDA_CODEC_ENTRY(0x14f15047, "CX20551 (Waikiki)", patch_conexant_auto),
HDA_CODEC_ENTRY(0x14f15051, "CX20561 (Hermosa)", patch_conexant_auto),
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index 1e4a4b83fbf6..97adff0cbcab 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -1733,7 +1733,7 @@ static void silent_stream_disable(struct hda_codec *codec,
per_pin->silent_stream = false;
unlock_out:
- mutex_unlock(&spec->pcm_lock);
+ mutex_unlock(&per_pin->lock);
}
/* update ELD and jack state via audio component */
@@ -4346,6 +4346,7 @@ HDA_CODEC_ENTRY(0x8086280f, "Icelake HDMI", patch_i915_icl_hdmi),
HDA_CODEC_ENTRY(0x80862812, "Tigerlake HDMI", patch_i915_tgl_hdmi),
HDA_CODEC_ENTRY(0x80862814, "DG1 HDMI", patch_i915_tgl_hdmi),
HDA_CODEC_ENTRY(0x80862815, "Alderlake HDMI", patch_i915_tgl_hdmi),
+HDA_CODEC_ENTRY(0x8086281c, "Alderlake-P HDMI", patch_i915_tgl_hdmi),
HDA_CODEC_ENTRY(0x80862816, "Rocketlake HDMI", patch_i915_tgl_hdmi),
HDA_CODEC_ENTRY(0x8086281a, "Jasperlake HDMI", patch_i915_icl_hdmi),
HDA_CODEC_ENTRY(0x8086281b, "Elkhartlake HDMI", patch_i915_icl_hdmi),
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index dde5ba209541..ed5b6b894dc1 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -6289,6 +6289,7 @@ enum {
ALC221_FIXUP_HP_FRONT_MIC,
ALC292_FIXUP_TPT460,
ALC298_FIXUP_SPK_VOLUME,
+ ALC298_FIXUP_LENOVO_SPK_VOLUME,
ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER,
ALC269_FIXUP_ATIV_BOOK_8,
ALC221_FIXUP_HP_MIC_NO_PRESENCE,
@@ -6370,6 +6371,7 @@ enum {
ALC256_FIXUP_HP_HEADSET_MIC,
ALC236_FIXUP_DELL_AIO_HEADSET_MIC,
ALC282_FIXUP_ACER_DISABLE_LINEOUT,
+ ALC255_FIXUP_ACER_LIMIT_INT_MIC_BOOST,
};
static const struct hda_fixup alc269_fixups[] = {
@@ -7119,6 +7121,10 @@ static const struct hda_fixup alc269_fixups[] = {
.chained = true,
.chain_id = ALC298_FIXUP_DELL_AIO_MIC_NO_PRESENCE,
},
+ [ALC298_FIXUP_LENOVO_SPK_VOLUME] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc298_fixup_speaker_volume,
+ },
[ALC295_FIXUP_DISABLE_DAC3] = {
.type = HDA_FIXUP_FUNC,
.v.func = alc295_fixup_disable_dac3,
@@ -7803,6 +7809,12 @@ static const struct hda_fixup alc269_fixups[] = {
.chained = true,
.chain_id = ALC269_FIXUP_HEADSET_MODE
},
+ [ALC255_FIXUP_ACER_LIMIT_INT_MIC_BOOST] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc269_fixup_limit_int_mic_boost,
+ .chained = true,
+ .chain_id = ALC255_FIXUP_ACER_MIC_NO_PRESENCE,
+ },
};
static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -7821,6 +7833,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1025, 0x102b, "Acer Aspire C24-860", ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1025, 0x1065, "Acer Aspire C20-820", ALC269VC_FIXUP_ACER_HEADSET_MIC),
SND_PCI_QUIRK(0x1025, 0x106d, "Acer Cloudbook 14", ALC283_FIXUP_CHROME_BOOK),
+ SND_PCI_QUIRK(0x1025, 0x1094, "Acer Aspire E5-575T", ALC255_FIXUP_ACER_LIMIT_INT_MIC_BOOST),
SND_PCI_QUIRK(0x1025, 0x1099, "Acer Aspire E5-523G", ALC255_FIXUP_ACER_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1025, 0x110e, "Acer Aspire ES1-432", ALC255_FIXUP_ACER_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1025, 0x1166, "Acer Veriton N4640G", ALC269_FIXUP_LIFEBOOK),
@@ -7885,7 +7898,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1028, 0x09bf, "Dell Precision", ALC233_FIXUP_ASUS_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x0a2e, "Dell", ALC236_FIXUP_DELL_AIO_HEADSET_MIC),
SND_PCI_QUIRK(0x1028, 0x0a30, "Dell", ALC236_FIXUP_DELL_AIO_HEADSET_MIC),
- SND_PCI_QUIRK(0x1028, 0x0a58, "Dell Precision 3650 Tower", ALC255_FIXUP_DELL_HEADSET_MIC),
+ SND_PCI_QUIRK(0x1028, 0x0a58, "Dell", ALC255_FIXUP_DELL_HEADSET_MIC),
SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
@@ -7959,11 +7972,17 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x103c, 0x8497, "HP Envy x360", ALC269_FIXUP_HP_MUTE_LED_MIC3),
SND_PCI_QUIRK(0x103c, 0x84e7, "HP Pavilion 15", ALC269_FIXUP_HP_MUTE_LED_MIC3),
SND_PCI_QUIRK(0x103c, 0x869d, "HP", ALC236_FIXUP_HP_MUTE_LED),
+ SND_PCI_QUIRK(0x103c, 0x8724, "HP EliteBook 850 G7", ALC285_FIXUP_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x8729, "HP", ALC285_FIXUP_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x8736, "HP", ALC285_FIXUP_HP_GPIO_AMP_INIT),
SND_PCI_QUIRK(0x103c, 0x8760, "HP", ALC285_FIXUP_HP_MUTE_LED),
SND_PCI_QUIRK(0x103c, 0x877a, "HP", ALC285_FIXUP_HP_MUTE_LED),
SND_PCI_QUIRK(0x103c, 0x877d, "HP", ALC236_FIXUP_HP_MUTE_LED),
+ SND_PCI_QUIRK(0x103c, 0x8780, "HP ZBook Fury 17 G7 Mobile Workstation",
+ ALC285_FIXUP_HP_GPIO_AMP_INIT),
+ SND_PCI_QUIRK(0x103c, 0x8783, "HP ZBook Fury 15 G7 Mobile Workstation",
+ ALC285_FIXUP_HP_GPIO_AMP_INIT),
+ SND_PCI_QUIRK(0x103c, 0x87c8, "HP", ALC287_FIXUP_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x87f4, "HP", ALC287_FIXUP_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x87f5, "HP", ALC287_FIXUP_HP_GPIO_LED),
SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
@@ -8021,6 +8040,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x10cf, 0x1845, "Lifebook U904", ALC269_FIXUP_LIFEBOOK_EXTMIC),
SND_PCI_QUIRK(0x10ec, 0x10f2, "Intel Reference board", ALC700_FIXUP_INTEL_REFERENCE),
SND_PCI_QUIRK(0x10ec, 0x1230, "Intel Reference board", ALC295_FIXUP_CHROME_BOOK),
+ SND_PCI_QUIRK(0x10ec, 0x1252, "Intel Reference board", ALC295_FIXUP_CHROME_BOOK),
+ SND_PCI_QUIRK(0x10ec, 0x1254, "Intel Reference board", ALC295_FIXUP_CHROME_BOOK),
SND_PCI_QUIRK(0x10f7, 0x8338, "Panasonic CF-SZ6", ALC269_FIXUP_HEADSET_MODE),
SND_PCI_QUIRK(0x144d, 0xc109, "Samsung Ativ book 9 (NP900X3G)", ALC269_FIXUP_INV_DMIC),
SND_PCI_QUIRK(0x144d, 0xc169, "Samsung Notebook 9 Pen (NP930SBE-K01US)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET),
@@ -8126,6 +8147,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x17aa, 0x3151, "ThinkCentre Station", ALC283_FIXUP_HEADSET_MIC),
SND_PCI_QUIRK(0x17aa, 0x3176, "ThinkCentre Station", ALC283_FIXUP_HEADSET_MIC),
SND_PCI_QUIRK(0x17aa, 0x3178, "ThinkCentre Station", ALC283_FIXUP_HEADSET_MIC),
+ SND_PCI_QUIRK(0x17aa, 0x3818, "Lenovo C940", ALC298_FIXUP_LENOVO_SPK_VOLUME),
SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC),
SND_PCI_QUIRK(0x17aa, 0x3978, "Lenovo B50-70", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
diff --git a/sound/pci/hda/patch_via.c b/sound/pci/hda/patch_via.c
index 7ef8f3105cdb..834367dd54e1 100644
--- a/sound/pci/hda/patch_via.c
+++ b/sound/pci/hda/patch_via.c
@@ -113,6 +113,7 @@ static struct via_spec *via_new_spec(struct hda_codec *codec)
spec->codec_type = VT1708S;
spec->gen.indep_hp = 1;
spec->gen.keep_eapd_on = 1;
+ spec->gen.dac_min_mute = 1;
spec->gen.pcm_playback_hook = via_playback_pcm_hook;
spec->gen.add_stereo_mix_input = HDA_HINT_STEREO_MIX_AUTO;
codec->power_save_node = 1;
@@ -1002,6 +1003,7 @@ static const struct hda_verb vt1802_init_verbs[] = {
enum {
VIA_FIXUP_INTMIC_BOOST,
VIA_FIXUP_ASUS_G75,
+ VIA_FIXUP_POWER_SAVE,
};
static void via_fixup_intmic_boost(struct hda_codec *codec,
@@ -1011,6 +1013,13 @@ static void via_fixup_intmic_boost(struct hda_codec *codec,
override_mic_boost(codec, 0x30, 0, 2, 40);
}
+static void via_fixup_power_save(struct hda_codec *codec,
+ const struct hda_fixup *fix, int action)
+{
+ if (action == HDA_FIXUP_ACT_PRE_PROBE)
+ codec->power_save_node = 0;
+}
+
static const struct hda_fixup via_fixups[] = {
[VIA_FIXUP_INTMIC_BOOST] = {
.type = HDA_FIXUP_FUNC,
@@ -1025,11 +1034,16 @@ static const struct hda_fixup via_fixups[] = {
{ }
}
},
+ [VIA_FIXUP_POWER_SAVE] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = via_fixup_power_save,
+ },
};
static const struct snd_pci_quirk vt2002p_fixups[] = {
SND_PCI_QUIRK(0x1043, 0x1487, "Asus G75", VIA_FIXUP_ASUS_G75),
SND_PCI_QUIRK(0x1043, 0x8532, "Asus X202E", VIA_FIXUP_INTMIC_BOOST),
+ SND_PCI_QUIRK(0x1558, 0x3501, "Clevo W35xSS_370SS", VIA_FIXUP_POWER_SAVE),
{}
};
diff --git a/sound/soc/amd/raven/pci-acp3x.c b/sound/soc/amd/raven/pci-acp3x.c
index 8c138e490f0c..d3536fd6a124 100644
--- a/sound/soc/amd/raven/pci-acp3x.c
+++ b/sound/soc/amd/raven/pci-acp3x.c
@@ -140,21 +140,14 @@ static int snd_acp3x_probe(struct pci_dev *pci,
goto release_regions;
}
- /* check for msi interrupt support */
- ret = pci_enable_msi(pci);
- if (ret)
- /* msi is not enabled */
- irqflags = IRQF_SHARED;
- else
- /* msi is enabled */
- irqflags = 0;
+ irqflags = IRQF_SHARED;
addr = pci_resource_start(pci, 0);
adata->acp3x_base = devm_ioremap(&pci->dev, addr,
pci_resource_len(pci, 0));
if (!adata->acp3x_base) {
ret = -ENOMEM;
- goto disable_msi;
+ goto release_regions;
}
pci_set_master(pci);
pci_set_drvdata(pci, adata);
@@ -162,7 +155,7 @@ static int snd_acp3x_probe(struct pci_dev *pci,
adata->pme_en = rv_readl(adata->acp3x_base + mmACP_PME_EN);
ret = acp3x_init(adata);
if (ret)
- goto disable_msi;
+ goto release_regions;
val = rv_readl(adata->acp3x_base + mmACP_I2S_PIN_CONFIG);
switch (val) {
@@ -251,8 +244,6 @@ unregister_devs:
de_init:
if (acp3x_deinit(adata->acp3x_base))
dev_err(&pci->dev, "ACP de-init failed\n");
-disable_msi:
- pci_disable_msi(pci);
release_regions:
pci_release_regions(pci);
disable_pci:
@@ -311,7 +302,6 @@ static void snd_acp3x_remove(struct pci_dev *pci)
dev_err(&pci->dev, "ACP de-init failed\n");
pm_runtime_forbid(&pci->dev);
pm_runtime_get_noresume(&pci->dev);
- pci_disable_msi(pci);
pci_release_regions(pci);
pci_disable_device(pci);
}
diff --git a/sound/soc/amd/renoir/rn-pci-acp3x.c b/sound/soc/amd/renoir/rn-pci-acp3x.c
index fa169bf09886..deca8c7a0e87 100644
--- a/sound/soc/amd/renoir/rn-pci-acp3x.c
+++ b/sound/soc/amd/renoir/rn-pci-acp3x.c
@@ -171,6 +171,20 @@ static const struct dmi_system_id rn_acp_quirk_table[] = {
DMI_EXACT_MATCH(DMI_BOARD_NAME, "LNVNB161216"),
}
},
+ {
+ /* Lenovo ThinkPad E14 Gen 2 */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "20T6CTO1WW"),
+ }
+ },
+ {
+ /* Lenovo ThinkPad X395 */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "20NLCTO1WW"),
+ }
+ },
{}
};
diff --git a/sound/soc/atmel/Kconfig b/sound/soc/atmel/Kconfig
index 142373ec411a..9fe9471f4514 100644
--- a/sound/soc/atmel/Kconfig
+++ b/sound/soc/atmel/Kconfig
@@ -143,7 +143,7 @@ config SND_MCHP_SOC_SPDIFTX
- sama7g5
This S/PDIF TX driver is compliant with IEC-60958 standard and
- includes programable User Data and Channel Status fields.
+ includes programmable User Data and Channel Status fields.
config SND_MCHP_SOC_SPDIFRX
tristate "Microchip ASoC driver for boards using S/PDIF RX"
@@ -157,5 +157,5 @@ config SND_MCHP_SOC_SPDIFRX
- sama7g5
This S/PDIF RX driver is compliant with IEC-60958 standard and
- includes programable User Data and Channel Status fields.
+ includes programmable User Data and Channel Status fields.
endif
diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig
index ba4eb54aafcb..9bf6bfdaf11e 100644
--- a/sound/soc/codecs/Kconfig
+++ b/sound/soc/codecs/Kconfig
@@ -457,7 +457,7 @@ config SND_SOC_ADAU7118_HW
help
Enable support for the Analog Devices ADAU7118 8 Channel PDM-to-I2S/TDM
Converter. In this mode, the device works in standalone mode which
- means that there is no bus to comunicate with it. Stereo mode is not
+ means that there is no bus to communicate with it. Stereo mode is not
supported in this mode.
To compile this driver as a module, choose M here: the module
diff --git a/sound/soc/codecs/hdmi-codec.c b/sound/soc/codecs/hdmi-codec.c
index d5fcc4db8284..0f3ac22f2cf8 100644
--- a/sound/soc/codecs/hdmi-codec.c
+++ b/sound/soc/codecs/hdmi-codec.c
@@ -717,7 +717,7 @@ static int hdmi_codec_set_jack(struct snd_soc_component *component,
void *data)
{
struct hdmi_codec_priv *hcp = snd_soc_component_get_drvdata(component);
- int ret = -EOPNOTSUPP;
+ int ret = -ENOTSUPP;
if (hcp->hcd.ops->hook_plugged_cb) {
hcp->jack = jack;
diff --git a/sound/soc/codecs/max98373-i2c.c b/sound/soc/codecs/max98373-i2c.c
index 92921e34f948..85f6865019d4 100644
--- a/sound/soc/codecs/max98373-i2c.c
+++ b/sound/soc/codecs/max98373-i2c.c
@@ -19,6 +19,12 @@
#include <sound/tlv.h>
#include "max98373.h"
+static const u32 max98373_i2c_cache_reg[] = {
+ MAX98373_R2054_MEAS_ADC_PVDD_CH_READBACK,
+ MAX98373_R2055_MEAS_ADC_THERM_CH_READBACK,
+ MAX98373_R20B6_BDE_CUR_STATE_READBACK,
+};
+
static struct reg_default max98373_reg[] = {
{MAX98373_R2000_SW_RESET, 0x00},
{MAX98373_R2001_INT_RAW1, 0x00},
@@ -472,6 +478,11 @@ static struct snd_soc_dai_driver max98373_dai[] = {
static int max98373_suspend(struct device *dev)
{
struct max98373_priv *max98373 = dev_get_drvdata(dev);
+ int i;
+
+ /* cache feedback register values before suspend */
+ for (i = 0; i < max98373->cache_num; i++)
+ regmap_read(max98373->regmap, max98373->cache[i].reg, &max98373->cache[i].val);
regcache_cache_only(max98373->regmap, true);
regcache_mark_dirty(max98373->regmap);
@@ -509,6 +520,7 @@ static int max98373_i2c_probe(struct i2c_client *i2c,
{
int ret = 0;
int reg = 0;
+ int i;
struct max98373_priv *max98373 = NULL;
max98373 = devm_kzalloc(&i2c->dev, sizeof(*max98373), GFP_KERNEL);
@@ -534,6 +546,14 @@ static int max98373_i2c_probe(struct i2c_client *i2c,
return ret;
}
+ max98373->cache_num = ARRAY_SIZE(max98373_i2c_cache_reg);
+ max98373->cache = devm_kcalloc(&i2c->dev, max98373->cache_num,
+ sizeof(*max98373->cache),
+ GFP_KERNEL);
+
+ for (i = 0; i < max98373->cache_num; i++)
+ max98373->cache[i].reg = max98373_i2c_cache_reg[i];
+
/* voltage/current slot & gpio configuration */
max98373_slot_config(&i2c->dev, max98373);
diff --git a/sound/soc/codecs/max98373-sdw.c b/sound/soc/codecs/max98373-sdw.c
index ec2e79c57357..b8d471d79e93 100644
--- a/sound/soc/codecs/max98373-sdw.c
+++ b/sound/soc/codecs/max98373-sdw.c
@@ -23,6 +23,12 @@ struct sdw_stream_data {
struct sdw_stream_runtime *sdw_stream;
};
+static const u32 max98373_sdw_cache_reg[] = {
+ MAX98373_R2054_MEAS_ADC_PVDD_CH_READBACK,
+ MAX98373_R2055_MEAS_ADC_THERM_CH_READBACK,
+ MAX98373_R20B6_BDE_CUR_STATE_READBACK,
+};
+
static struct reg_default max98373_reg[] = {
{MAX98373_R0040_SCP_INIT_STAT_1, 0x00},
{MAX98373_R0041_SCP_INIT_MASK_1, 0x00},
@@ -245,6 +251,11 @@ static const struct regmap_config max98373_sdw_regmap = {
static __maybe_unused int max98373_suspend(struct device *dev)
{
struct max98373_priv *max98373 = dev_get_drvdata(dev);
+ int i;
+
+ /* cache feedback register values before suspend */
+ for (i = 0; i < max98373->cache_num; i++)
+ regmap_read(max98373->regmap, max98373->cache[i].reg, &max98373->cache[i].val);
regcache_cache_only(max98373->regmap, true);
@@ -757,6 +768,7 @@ static int max98373_init(struct sdw_slave *slave, struct regmap *regmap)
{
struct max98373_priv *max98373;
int ret;
+ int i;
struct device *dev = &slave->dev;
/* Allocate and assign private driver data structure */
@@ -768,6 +780,14 @@ static int max98373_init(struct sdw_slave *slave, struct regmap *regmap)
max98373->regmap = regmap;
max98373->slave = slave;
+ max98373->cache_num = ARRAY_SIZE(max98373_sdw_cache_reg);
+ max98373->cache = devm_kcalloc(dev, max98373->cache_num,
+ sizeof(*max98373->cache),
+ GFP_KERNEL);
+
+ for (i = 0; i < max98373->cache_num; i++)
+ max98373->cache[i].reg = max98373_sdw_cache_reg[i];
+
/* Read voltage and slot configuration */
max98373_slot_config(dev, max98373);
diff --git a/sound/soc/codecs/max98373.c b/sound/soc/codecs/max98373.c
index 929bb1798c43..31d571d4fac1 100644
--- a/sound/soc/codecs/max98373.c
+++ b/sound/soc/codecs/max98373.c
@@ -168,6 +168,31 @@ static SOC_ENUM_SINGLE_DECL(max98373_adc_samplerate_enum,
MAX98373_R2051_MEAS_ADC_SAMPLING_RATE, 0,
max98373_ADC_samplerate_text);
+static int max98373_feedback_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
+ struct soc_mixer_control *mc =
+ (struct soc_mixer_control *)kcontrol->private_value;
+ struct max98373_priv *max98373 = snd_soc_component_get_drvdata(component);
+ int i;
+
+ if (snd_soc_component_get_bias_level(component) == SND_SOC_BIAS_OFF) {
+ /*
+ * Register values will be cached before suspend. The cached value
+ * will be a valid value and userspace will happy with that.
+ */
+ for (i = 0; i < max98373->cache_num; i++) {
+ if (mc->reg == max98373->cache[i].reg) {
+ ucontrol->value.integer.value[0] = max98373->cache[i].val;
+ return 0;
+ }
+ }
+ }
+
+ return snd_soc_put_volsw(kcontrol, ucontrol);
+}
+
static const struct snd_kcontrol_new max98373_snd_controls[] = {
SOC_SINGLE("Digital Vol Sel Switch", MAX98373_R203F_AMP_DSP_CFG,
MAX98373_AMP_VOL_SEL_SHIFT, 1, 0),
@@ -209,8 +234,10 @@ SOC_SINGLE("ADC PVDD FLT Switch", MAX98373_R2052_MEAS_ADC_PVDD_FLT_CFG,
MAX98373_FLT_EN_SHIFT, 1, 0),
SOC_SINGLE("ADC TEMP FLT Switch", MAX98373_R2053_MEAS_ADC_THERM_FLT_CFG,
MAX98373_FLT_EN_SHIFT, 1, 0),
-SOC_SINGLE("ADC PVDD", MAX98373_R2054_MEAS_ADC_PVDD_CH_READBACK, 0, 0xFF, 0),
-SOC_SINGLE("ADC TEMP", MAX98373_R2055_MEAS_ADC_THERM_CH_READBACK, 0, 0xFF, 0),
+SOC_SINGLE_EXT("ADC PVDD", MAX98373_R2054_MEAS_ADC_PVDD_CH_READBACK, 0, 0xFF, 0,
+ max98373_feedback_get, NULL),
+SOC_SINGLE_EXT("ADC TEMP", MAX98373_R2055_MEAS_ADC_THERM_CH_READBACK, 0, 0xFF, 0,
+ max98373_feedback_get, NULL),
SOC_SINGLE("ADC PVDD FLT Coeff", MAX98373_R2052_MEAS_ADC_PVDD_FLT_CFG,
0, 0x3, 0),
SOC_SINGLE("ADC TEMP FLT Coeff", MAX98373_R2053_MEAS_ADC_THERM_FLT_CFG,
@@ -226,7 +253,8 @@ SOC_SINGLE("BDE LVL1 Thresh", MAX98373_R2097_BDE_L1_THRESH, 0, 0xFF, 0),
SOC_SINGLE("BDE LVL2 Thresh", MAX98373_R2098_BDE_L2_THRESH, 0, 0xFF, 0),
SOC_SINGLE("BDE LVL3 Thresh", MAX98373_R2099_BDE_L3_THRESH, 0, 0xFF, 0),
SOC_SINGLE("BDE LVL4 Thresh", MAX98373_R209A_BDE_L4_THRESH, 0, 0xFF, 0),
-SOC_SINGLE("BDE Active Level", MAX98373_R20B6_BDE_CUR_STATE_READBACK, 0, 8, 0),
+SOC_SINGLE_EXT("BDE Active Level", MAX98373_R20B6_BDE_CUR_STATE_READBACK, 0, 8, 0,
+ max98373_feedback_get, NULL),
SOC_SINGLE("BDE Clip Mode Switch", MAX98373_R2092_BDE_CLIPPER_MODE, 0, 1, 0),
SOC_SINGLE("BDE Thresh Hysteresis", MAX98373_R209B_BDE_THRESH_HYST, 0, 0xFF, 0),
SOC_SINGLE("BDE Hold Time", MAX98373_R2090_BDE_LVL_HOLD, 0, 0xFF, 0),
diff --git a/sound/soc/codecs/max98373.h b/sound/soc/codecs/max98373.h
index 4ab29b9d51c7..71f5a5228f34 100644
--- a/sound/soc/codecs/max98373.h
+++ b/sound/soc/codecs/max98373.h
@@ -203,6 +203,11 @@
/* MAX98373_R2000_SW_RESET */
#define MAX98373_SOFT_RESET (0x1 << 0)
+struct max98373_cache {
+ u32 reg;
+ u32 val;
+};
+
struct max98373_priv {
struct regmap *regmap;
int reset_gpio;
@@ -212,6 +217,9 @@ struct max98373_priv {
bool interleave_mode;
unsigned int ch_size;
bool tdm_mode;
+ /* cache for reading a valid fake feedback value */
+ struct max98373_cache *cache;
+ int cache_num;
/* variables to support soundwire */
struct sdw_slave *slave;
bool hw_init;
diff --git a/sound/soc/codecs/rt711.c b/sound/soc/codecs/rt711.c
index 5771c02c3459..85f744184a60 100644
--- a/sound/soc/codecs/rt711.c
+++ b/sound/soc/codecs/rt711.c
@@ -462,6 +462,8 @@ static int rt711_set_amp_gain_put(struct snd_kcontrol *kcontrol,
unsigned int read_ll, read_rl;
int i;
+ mutex_lock(&rt711->calibrate_mutex);
+
/* Can't use update bit function, so read the original value first */
addr_h = mc->reg;
addr_l = mc->rreg;
@@ -547,6 +549,8 @@ static int rt711_set_amp_gain_put(struct snd_kcontrol *kcontrol,
if (dapm->bias_level <= SND_SOC_BIAS_STANDBY)
regmap_write(rt711->regmap,
RT711_SET_AUDIO_POWER_STATE, AC_PWRST_D3);
+
+ mutex_unlock(&rt711->calibrate_mutex);
return 0;
}
@@ -859,9 +863,11 @@ static int rt711_set_bias_level(struct snd_soc_component *component,
break;
case SND_SOC_BIAS_STANDBY:
+ mutex_lock(&rt711->calibrate_mutex);
regmap_write(rt711->regmap,
RT711_SET_AUDIO_POWER_STATE,
AC_PWRST_D3);
+ mutex_unlock(&rt711->calibrate_mutex);
break;
default:
diff --git a/sound/soc/fsl/imx-hdmi.c b/sound/soc/fsl/imx-hdmi.c
index 2c2a76a71940..dbbb7618351c 100644
--- a/sound/soc/fsl/imx-hdmi.c
+++ b/sound/soc/fsl/imx-hdmi.c
@@ -90,7 +90,7 @@ static int imx_hdmi_init(struct snd_soc_pcm_runtime *rtd)
}
ret = snd_soc_component_set_jack(component, &data->hdmi_jack, NULL);
- if (ret && ret != -EOPNOTSUPP) {
+ if (ret && ret != -ENOTSUPP) {
dev_err(card->dev, "Can't set HDMI Jack %d\n", ret);
return ret;
}
@@ -164,6 +164,7 @@ static int imx_hdmi_probe(struct platform_device *pdev)
if ((hdmi_out && hdmi_in) || (!hdmi_out && !hdmi_in)) {
dev_err(&pdev->dev, "Invalid HDMI DAI link\n");
+ ret = -EINVAL;
goto fail;
}
diff --git a/sound/soc/intel/boards/haswell.c b/sound/soc/intel/boards/haswell.c
index c55d1239e705..c763bfeb1f38 100644
--- a/sound/soc/intel/boards/haswell.c
+++ b/sound/soc/intel/boards/haswell.c
@@ -189,6 +189,7 @@ static struct platform_driver haswell_audio = {
.probe = haswell_audio_probe,
.driver = {
.name = "haswell-audio",
+ .pm = &snd_soc_pm_ops,
},
};
diff --git a/sound/soc/intel/skylake/cnl-sst.c b/sound/soc/intel/skylake/cnl-sst.c
index fcd8dff27ae8..1275c149acc0 100644
--- a/sound/soc/intel/skylake/cnl-sst.c
+++ b/sound/soc/intel/skylake/cnl-sst.c
@@ -224,6 +224,7 @@ static int cnl_set_dsp_D0(struct sst_dsp *ctx, unsigned int core_id)
"dsp boot timeout, status=%#x error=%#x\n",
sst_dsp_shim_read(ctx, CNL_ADSP_FW_STATUS),
sst_dsp_shim_read(ctx, CNL_ADSP_ERROR_CODE));
+ ret = -ETIMEDOUT;
goto err;
}
} else {
diff --git a/sound/soc/meson/axg-tdm-interface.c b/sound/soc/meson/axg-tdm-interface.c
index c8664ab80d45..87cac440b369 100644
--- a/sound/soc/meson/axg-tdm-interface.c
+++ b/sound/soc/meson/axg-tdm-interface.c
@@ -467,8 +467,20 @@ static int axg_tdm_iface_set_bias_level(struct snd_soc_component *component,
return ret;
}
+static const struct snd_soc_dapm_widget axg_tdm_iface_dapm_widgets[] = {
+ SND_SOC_DAPM_SIGGEN("Playback Signal"),
+};
+
+static const struct snd_soc_dapm_route axg_tdm_iface_dapm_routes[] = {
+ { "Loopback", NULL, "Playback Signal" },
+};
+
static const struct snd_soc_component_driver axg_tdm_iface_component_drv = {
- .set_bias_level = axg_tdm_iface_set_bias_level,
+ .dapm_widgets = axg_tdm_iface_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(axg_tdm_iface_dapm_widgets),
+ .dapm_routes = axg_tdm_iface_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(axg_tdm_iface_dapm_routes),
+ .set_bias_level = axg_tdm_iface_set_bias_level,
};
static const struct of_device_id axg_tdm_iface_of_match[] = {
diff --git a/sound/soc/meson/axg-tdmin.c b/sound/soc/meson/axg-tdmin.c
index 88ed95ae886b..b4faf9d5c1aa 100644
--- a/sound/soc/meson/axg-tdmin.c
+++ b/sound/soc/meson/axg-tdmin.c
@@ -228,15 +228,6 @@ static const struct axg_tdm_formatter_driver axg_tdmin_drv = {
.regmap_cfg = &axg_tdmin_regmap_cfg,
.ops = &axg_tdmin_ops,
.quirks = &(const struct axg_tdm_formatter_hw) {
- .skew_offset = 2,
- },
-};
-
-static const struct axg_tdm_formatter_driver g12a_tdmin_drv = {
- .component_drv = &axg_tdmin_component_drv,
- .regmap_cfg = &axg_tdmin_regmap_cfg,
- .ops = &axg_tdmin_ops,
- .quirks = &(const struct axg_tdm_formatter_hw) {
.skew_offset = 3,
},
};
@@ -247,10 +238,10 @@ static const struct of_device_id axg_tdmin_of_match[] = {
.data = &axg_tdmin_drv,
}, {
.compatible = "amlogic,g12a-tdmin",
- .data = &g12a_tdmin_drv,
+ .data = &axg_tdmin_drv,
}, {
.compatible = "amlogic,sm1-tdmin",
- .data = &g12a_tdmin_drv,
+ .data = &axg_tdmin_drv,
}, {}
};
MODULE_DEVICE_TABLE(of, axg_tdmin_of_match);
diff --git a/sound/soc/qcom/lpass-cpu.c b/sound/soc/qcom/lpass-cpu.c
index af684fd19ab9..c5e99c2d89c7 100644
--- a/sound/soc/qcom/lpass-cpu.c
+++ b/sound/soc/qcom/lpass-cpu.c
@@ -270,18 +270,6 @@ static int lpass_cpu_daiops_trigger(struct snd_pcm_substream *substream,
struct lpaif_i2sctl *i2sctl = drvdata->i2sctl;
unsigned int id = dai->driver->id;
int ret = -EINVAL;
- unsigned int val = 0;
-
- ret = regmap_read(drvdata->lpaif_map,
- LPAIF_I2SCTL_REG(drvdata->variant, dai->driver->id), &val);
- if (ret) {
- dev_err(dai->dev, "error reading from i2sctl reg: %d\n", ret);
- return ret;
- }
- if (val == LPAIF_I2SCTL_RESET_STATE) {
- dev_err(dai->dev, "error in i2sctl register state\n");
- return -ENOTRECOVERABLE;
- }
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
@@ -454,20 +442,16 @@ static bool lpass_cpu_regmap_volatile(struct device *dev, unsigned int reg)
struct lpass_variant *v = drvdata->variant;
int i;
- for (i = 0; i < v->i2s_ports; ++i)
- if (reg == LPAIF_I2SCTL_REG(v, i))
- return true;
for (i = 0; i < v->irq_ports; ++i)
if (reg == LPAIF_IRQSTAT_REG(v, i))
return true;
for (i = 0; i < v->rdma_channels; ++i)
- if (reg == LPAIF_RDMACURR_REG(v, i) || reg == LPAIF_RDMACTL_REG(v, i))
+ if (reg == LPAIF_RDMACURR_REG(v, i))
return true;
for (i = 0; i < v->wrdma_channels; ++i)
- if (reg == LPAIF_WRDMACURR_REG(v, i + v->wrdma_channel_start) ||
- reg == LPAIF_WRDMACTL_REG(v, i + v->wrdma_channel_start))
+ if (reg == LPAIF_WRDMACURR_REG(v, i + v->wrdma_channel_start))
return true;
return false;
diff --git a/sound/soc/qcom/lpass-platform.c b/sound/soc/qcom/lpass-platform.c
index 80b09dede5f9..d1c248590f3a 100644
--- a/sound/soc/qcom/lpass-platform.c
+++ b/sound/soc/qcom/lpass-platform.c
@@ -452,7 +452,6 @@ static int lpass_platform_pcmops_trigger(struct snd_soc_component *component,
unsigned int reg_irqclr = 0, val_irqclr = 0;
unsigned int reg_irqen = 0, val_irqen = 0, val_mask = 0;
unsigned int dai_id = cpu_dai->driver->id;
- unsigned int dma_ctrl_reg = 0;
ch = pcm_data->dma_ch;
if (dir == SNDRV_PCM_STREAM_PLAYBACK) {
@@ -469,17 +468,7 @@ static int lpass_platform_pcmops_trigger(struct snd_soc_component *component,
id = pcm_data->dma_ch - v->wrdma_channel_start;
map = drvdata->lpaif_map;
}
- ret = regmap_read(map, LPAIF_DMACTL_REG(v, ch, dir, dai_id), &dma_ctrl_reg);
- if (ret) {
- dev_err(soc_runtime->dev, "error reading from rdmactl reg: %d\n", ret);
- return ret;
- }
- if (dma_ctrl_reg == LPAIF_DMACTL_RESET_STATE ||
- dma_ctrl_reg == LPAIF_DMACTL_RESET_STATE + 1) {
- dev_err(soc_runtime->dev, "error in rdmactl register state\n");
- return -ENOTRECOVERABLE;
- }
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
case SNDRV_PCM_TRIGGER_RESUME:
@@ -500,7 +489,6 @@ static int lpass_platform_pcmops_trigger(struct snd_soc_component *component,
"error writing to rdmactl reg: %d\n", ret);
return ret;
}
- map = drvdata->hdmiif_map;
reg_irqclr = LPASS_HDMITX_APP_IRQCLEAR_REG(v);
val_irqclr = (LPAIF_IRQ_ALL(ch) |
LPAIF_IRQ_HDMI_REQ_ON_PRELOAD(ch) |
@@ -519,7 +507,6 @@ static int lpass_platform_pcmops_trigger(struct snd_soc_component *component,
break;
case MI2S_PRIMARY:
case MI2S_SECONDARY:
- map = drvdata->lpaif_map;
reg_irqclr = LPAIF_IRQCLEAR_REG(v, LPAIF_IRQ_PORT_HOST);
val_irqclr = LPAIF_IRQ_ALL(ch);
@@ -563,7 +550,6 @@ static int lpass_platform_pcmops_trigger(struct snd_soc_component *component,
"error writing to rdmactl reg: %d\n", ret);
return ret;
}
- map = drvdata->hdmiif_map;
reg_irqen = LPASS_HDMITX_APP_IRQEN_REG(v);
val_mask = (LPAIF_IRQ_ALL(ch) |
LPAIF_IRQ_HDMI_REQ_ON_PRELOAD(ch) |
@@ -573,7 +559,6 @@ static int lpass_platform_pcmops_trigger(struct snd_soc_component *component,
break;
case MI2S_PRIMARY:
case MI2S_SECONDARY:
- map = drvdata->lpaif_map;
reg_irqen = LPAIF_IRQEN_REG(v, LPAIF_IRQ_PORT_HOST);
val_mask = LPAIF_IRQ_ALL(ch);
val_irqen = 0;
@@ -838,6 +823,39 @@ static void lpass_platform_pcm_free(struct snd_soc_component *component,
}
}
+static int lpass_platform_pcmops_suspend(struct snd_soc_component *component)
+{
+ struct lpass_data *drvdata = snd_soc_component_get_drvdata(component);
+ struct regmap *map;
+ unsigned int dai_id = component->id;
+
+ if (dai_id == LPASS_DP_RX)
+ map = drvdata->hdmiif_map;
+ else
+ map = drvdata->lpaif_map;
+
+ regcache_cache_only(map, true);
+ regcache_mark_dirty(map);
+
+ return 0;
+}
+
+static int lpass_platform_pcmops_resume(struct snd_soc_component *component)
+{
+ struct lpass_data *drvdata = snd_soc_component_get_drvdata(component);
+ struct regmap *map;
+ unsigned int dai_id = component->id;
+
+ if (dai_id == LPASS_DP_RX)
+ map = drvdata->hdmiif_map;
+ else
+ map = drvdata->lpaif_map;
+
+ regcache_cache_only(map, false);
+ return regcache_sync(map);
+}
+
+
static const struct snd_soc_component_driver lpass_component_driver = {
.name = DRV_NAME,
.open = lpass_platform_pcmops_open,
@@ -850,6 +868,8 @@ static const struct snd_soc_component_driver lpass_component_driver = {
.mmap = lpass_platform_pcmops_mmap,
.pcm_construct = lpass_platform_pcm_new,
.pcm_destruct = lpass_platform_pcm_free,
+ .suspend = lpass_platform_pcmops_suspend,
+ .resume = lpass_platform_pcmops_resume,
};
diff --git a/sound/soc/sh/rcar/adg.c b/sound/soc/sh/rcar/adg.c
index b9aacf3d3b29..abdfd9cf91e2 100644
--- a/sound/soc/sh/rcar/adg.c
+++ b/sound/soc/sh/rcar/adg.c
@@ -366,25 +366,27 @@ void rsnd_adg_clk_control(struct rsnd_priv *priv, int enable)
struct rsnd_adg *adg = rsnd_priv_to_adg(priv);
struct device *dev = rsnd_priv_to_dev(priv);
struct clk *clk;
- int i, ret;
+ int i;
for_each_rsnd_clk(clk, adg, i) {
- ret = 0;
if (enable) {
- ret = clk_prepare_enable(clk);
+ int ret = clk_prepare_enable(clk);
/*
* We shouldn't use clk_get_rate() under
* atomic context. Let's keep it when
* rsnd_adg_clk_enable() was called
*/
- adg->clk_rate[i] = clk_get_rate(adg->clk[i]);
+ adg->clk_rate[i] = 0;
+ if (ret < 0)
+ dev_warn(dev, "can't use clk %d\n", i);
+ else
+ adg->clk_rate[i] = clk_get_rate(clk);
} else {
- clk_disable_unprepare(clk);
+ if (adg->clk_rate[i])
+ clk_disable_unprepare(clk);
+ adg->clk_rate[i] = 0;
}
-
- if (ret < 0)
- dev_warn(dev, "can't use clk %d\n", i);
}
}
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index 9f0c86cbdcca..2b75d0139e47 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -2486,6 +2486,7 @@ void snd_soc_dapm_free_widget(struct snd_soc_dapm_widget *w)
enum snd_soc_dapm_direction dir;
list_del(&w->list);
+ list_del(&w->dirty);
/*
* remove source and sink paths associated to this widget.
* While removing the path, remove reference to it from both
diff --git a/sound/soc/sof/Kconfig b/sound/soc/sof/Kconfig
index 031dad5fc4c7..3e8b6c035ce3 100644
--- a/sound/soc/sof/Kconfig
+++ b/sound/soc/sof/Kconfig
@@ -122,7 +122,7 @@ config SND_SOC_SOF_DEBUG_XRUN_STOP
bool "SOF stop on XRUN"
help
This option forces PCMs to stop on any XRUN event. This is useful to
- preserve any trace data ond pipeline status prior to the XRUN.
+ preserve any trace data and pipeline status prior to the XRUN.
Say Y if you are debugging SOF FW pipeline XRUNs.
If unsure select "N".
diff --git a/sound/soc/sof/intel/hda-codec.c b/sound/soc/sof/intel/hda-codec.c
index 6875fa570c2c..6744318de612 100644
--- a/sound/soc/sof/intel/hda-codec.c
+++ b/sound/soc/sof/intel/hda-codec.c
@@ -63,16 +63,18 @@ static int hda_codec_load_module(struct hda_codec *codec)
}
/* enable controller wake up event for all codecs with jack connectors */
-void hda_codec_jack_wake_enable(struct snd_sof_dev *sdev)
+void hda_codec_jack_wake_enable(struct snd_sof_dev *sdev, bool enable)
{
struct hda_bus *hbus = sof_to_hbus(sdev);
struct hdac_bus *bus = sof_to_bus(sdev);
struct hda_codec *codec;
unsigned int mask = 0;
- list_for_each_codec(codec, hbus)
- if (codec->jacktbl.used)
- mask |= BIT(codec->core.addr);
+ if (enable) {
+ list_for_each_codec(codec, hbus)
+ if (codec->jacktbl.used)
+ mask |= BIT(codec->core.addr);
+ }
snd_hdac_chip_updatew(bus, WAKEEN, STATESTS_INT_MASK, mask);
}
@@ -81,23 +83,18 @@ void hda_codec_jack_wake_enable(struct snd_sof_dev *sdev)
void hda_codec_jack_check(struct snd_sof_dev *sdev)
{
struct hda_bus *hbus = sof_to_hbus(sdev);
- struct hdac_bus *bus = sof_to_bus(sdev);
struct hda_codec *codec;
- /* disable controller Wake Up event*/
- snd_hdac_chip_updatew(bus, WAKEEN, STATESTS_INT_MASK, 0);
-
list_for_each_codec(codec, hbus)
/*
* Wake up all jack-detecting codecs regardless whether an event
* has been recorded in STATESTS
*/
if (codec->jacktbl.used)
- schedule_delayed_work(&codec->jackpoll_work,
- codec->jackpoll_interval);
+ pm_request_resume(&codec->core.dev);
}
#else
-void hda_codec_jack_wake_enable(struct snd_sof_dev *sdev) {}
+void hda_codec_jack_wake_enable(struct snd_sof_dev *sdev, bool enable) {}
void hda_codec_jack_check(struct snd_sof_dev *sdev) {}
#endif /* CONFIG_SND_SOC_SOF_HDA_AUDIO_CODEC */
EXPORT_SYMBOL_NS(hda_codec_jack_wake_enable, SND_SOC_SOF_HDA_AUDIO_CODEC);
@@ -156,7 +153,8 @@ static int hda_codec_probe(struct snd_sof_dev *sdev, int address,
if (!hdev->bus->audio_component) {
dev_dbg(sdev->dev,
"iDisp hw present but no driver\n");
- goto error;
+ ret = -ENOENT;
+ goto out;
}
hda_priv->need_display_power = true;
}
@@ -173,24 +171,23 @@ static int hda_codec_probe(struct snd_sof_dev *sdev, int address,
* other return codes without modification
*/
if (ret == 0)
- goto error;
+ ret = -ENOENT;
}
- return ret;
-
-error:
- snd_hdac_ext_bus_device_exit(hdev);
- return -ENOENT;
-
+out:
+ if (ret < 0) {
+ snd_hdac_device_unregister(hdev);
+ put_device(&hdev->dev);
+ }
#else
hdev = devm_kzalloc(sdev->dev, sizeof(*hdev), GFP_KERNEL);
if (!hdev)
return -ENOMEM;
ret = snd_hdac_ext_bus_device_init(&hbus->core, address, hdev, HDA_DEV_ASOC);
+#endif
return ret;
-#endif
}
/* Codec initialization */
diff --git a/sound/soc/sof/intel/hda-dsp.c b/sound/soc/sof/intel/hda-dsp.c
index 2b001151fe37..1c5e05b88a90 100644
--- a/sound/soc/sof/intel/hda-dsp.c
+++ b/sound/soc/sof/intel/hda-dsp.c
@@ -617,7 +617,7 @@ static int hda_suspend(struct snd_sof_dev *sdev, bool runtime_suspend)
#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
if (runtime_suspend)
- hda_codec_jack_wake_enable(sdev);
+ hda_codec_jack_wake_enable(sdev, true);
/* power down all hda link */
snd_hdac_ext_bus_link_power_down_all(bus);
@@ -683,8 +683,11 @@ static int hda_resume(struct snd_sof_dev *sdev, bool runtime_resume)
#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
/* check jack status */
- if (runtime_resume)
- hda_codec_jack_check(sdev);
+ if (runtime_resume) {
+ hda_codec_jack_wake_enable(sdev, false);
+ if (sdev->system_suspend_target == SOF_SUSPEND_NONE)
+ hda_codec_jack_check(sdev);
+ }
/* turn off the links that were off before suspend */
list_for_each_entry(hlink, &bus->hlink_list, list) {
diff --git a/sound/soc/sof/intel/hda.h b/sound/soc/sof/intel/hda.h
index 9ec8ae0fd649..a3b6f3e9121c 100644
--- a/sound/soc/sof/intel/hda.h
+++ b/sound/soc/sof/intel/hda.h
@@ -650,7 +650,7 @@ void sof_hda_bus_init(struct hdac_bus *bus, struct device *dev);
*/
void hda_codec_probe_bus(struct snd_sof_dev *sdev,
bool hda_codec_use_common_hdmi);
-void hda_codec_jack_wake_enable(struct snd_sof_dev *sdev);
+void hda_codec_jack_wake_enable(struct snd_sof_dev *sdev, bool enable);
void hda_codec_jack_check(struct snd_sof_dev *sdev);
#endif /* CONFIG_SND_SOC_SOF_HDA */
diff --git a/sound/usb/card.c b/sound/usb/card.c
index d731ca62d599..e08fbf8e3ee0 100644
--- a/sound/usb/card.c
+++ b/sound/usb/card.c
@@ -450,10 +450,8 @@ lookup_device_name(u32 id)
static void snd_usb_audio_free(struct snd_card *card)
{
struct snd_usb_audio *chip = card->private_data;
- struct snd_usb_endpoint *ep, *n;
- list_for_each_entry_safe(ep, n, &chip->ep_list, list)
- snd_usb_endpoint_free(ep);
+ snd_usb_endpoint_free_all(chip);
mutex_destroy(&chip->mutex);
if (!atomic_read(&chip->shutdown))
@@ -611,6 +609,7 @@ static int snd_usb_audio_create(struct usb_interface *intf,
chip->usb_id = usb_id;
INIT_LIST_HEAD(&chip->pcm_list);
INIT_LIST_HEAD(&chip->ep_list);
+ INIT_LIST_HEAD(&chip->iface_ref_list);
INIT_LIST_HEAD(&chip->midi_list);
INIT_LIST_HEAD(&chip->mixer_list);
diff --git a/sound/usb/card.h b/sound/usb/card.h
index 6a027c349194..37091b117614 100644
--- a/sound/usb/card.h
+++ b/sound/usb/card.h
@@ -18,6 +18,7 @@ struct audioformat {
unsigned int frame_size; /* samples per frame for non-audio */
unsigned char iface; /* interface number */
unsigned char altsetting; /* corresponding alternate setting */
+ unsigned char ep_idx; /* endpoint array index */
unsigned char altset_idx; /* array index of altenate setting */
unsigned char attributes; /* corresponding attributes of cs endpoint */
unsigned char endpoint; /* endpoint */
@@ -42,6 +43,7 @@ struct audioformat {
};
struct snd_usb_substream;
+struct snd_usb_iface_ref;
struct snd_usb_endpoint;
struct snd_usb_power_domain;
@@ -58,6 +60,7 @@ struct snd_urb_ctx {
struct snd_usb_endpoint {
struct snd_usb_audio *chip;
+ struct snd_usb_iface_ref *iface_ref;
int opened; /* open refcount; protect with chip->mutex */
atomic_t running; /* running status */
diff --git a/sound/usb/clock.c b/sound/usb/clock.c
index 31051f2be46d..dc68ed65e478 100644
--- a/sound/usb/clock.c
+++ b/sound/usb/clock.c
@@ -485,18 +485,9 @@ static int set_sample_rate_v1(struct snd_usb_audio *chip,
const struct audioformat *fmt, int rate)
{
struct usb_device *dev = chip->dev;
- struct usb_host_interface *alts;
- unsigned int ep;
unsigned char data[3];
int err, crate;
- alts = snd_usb_get_host_interface(chip, fmt->iface, fmt->altsetting);
- if (!alts)
- return -EINVAL;
- if (get_iface_desc(alts)->bNumEndpoints < 1)
- return -EINVAL;
- ep = get_endpoint(alts, 0)->bEndpointAddress;
-
/* if endpoint doesn't have sampling rate control, bail out */
if (!(fmt->attributes & UAC_EP_CS_ATTR_SAMPLE_RATE))
return 0;
@@ -506,11 +497,11 @@ static int set_sample_rate_v1(struct snd_usb_audio *chip,
data[2] = rate >> 16;
err = snd_usb_ctl_msg(dev, usb_sndctrlpipe(dev, 0), UAC_SET_CUR,
USB_TYPE_CLASS | USB_RECIP_ENDPOINT | USB_DIR_OUT,
- UAC_EP_CS_ATTR_SAMPLE_RATE << 8, ep,
- data, sizeof(data));
+ UAC_EP_CS_ATTR_SAMPLE_RATE << 8,
+ fmt->endpoint, data, sizeof(data));
if (err < 0) {
dev_err(&dev->dev, "%d:%d: cannot set freq %d to ep %#x\n",
- fmt->iface, fmt->altsetting, rate, ep);
+ fmt->iface, fmt->altsetting, rate, fmt->endpoint);
return err;
}
@@ -524,11 +515,11 @@ static int set_sample_rate_v1(struct snd_usb_audio *chip,
err = snd_usb_ctl_msg(dev, usb_rcvctrlpipe(dev, 0), UAC_GET_CUR,
USB_TYPE_CLASS | USB_RECIP_ENDPOINT | USB_DIR_IN,
- UAC_EP_CS_ATTR_SAMPLE_RATE << 8, ep,
- data, sizeof(data));
+ UAC_EP_CS_ATTR_SAMPLE_RATE << 8,
+ fmt->endpoint, data, sizeof(data));
if (err < 0) {
dev_err(&dev->dev, "%d:%d: cannot get freq at ep %#x\n",
- fmt->iface, fmt->altsetting, ep);
+ fmt->iface, fmt->altsetting, fmt->endpoint);
chip->sample_rate_read_error++;
return 0; /* some devices don't support reading */
}
diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c
index 162da7a50046..8e568823c992 100644
--- a/sound/usb/endpoint.c
+++ b/sound/usb/endpoint.c
@@ -24,6 +24,14 @@
#define EP_FLAG_RUNNING 1
#define EP_FLAG_STOPPING 2
+/* interface refcounting */
+struct snd_usb_iface_ref {
+ unsigned char iface;
+ bool need_setup;
+ int opened;
+ struct list_head list;
+};
+
/*
* snd_usb_endpoint is a model that abstracts everything related to an
* USB endpoint and its streaming.
@@ -489,6 +497,28 @@ exit_clear:
}
/*
+ * Find or create a refcount object for the given interface
+ *
+ * The objects are released altogether in snd_usb_endpoint_free_all()
+ */
+static struct snd_usb_iface_ref *
+iface_ref_find(struct snd_usb_audio *chip, int iface)
+{
+ struct snd_usb_iface_ref *ip;
+
+ list_for_each_entry(ip, &chip->iface_ref_list, list)
+ if (ip->iface == iface)
+ return ip;
+
+ ip = kzalloc(sizeof(*ip), GFP_KERNEL);
+ if (!ip)
+ return NULL;
+ ip->iface = iface;
+ list_add_tail(&ip->list, &chip->iface_ref_list);
+ return ip;
+}
+
+/*
* Get the existing endpoint object corresponding EP
* Returns NULL if not present.
*/
@@ -520,8 +550,8 @@ snd_usb_get_endpoint(struct snd_usb_audio *chip, int ep_num)
*
* Returns zero on success or a negative error code.
*
- * New endpoints will be added to chip->ep_list and must be freed by
- * calling snd_usb_endpoint_free().
+ * New endpoints will be added to chip->ep_list and freed by
+ * calling snd_usb_endpoint_free_all().
*
* For SND_USB_ENDPOINT_TYPE_SYNC, the caller needs to guarantee that
* bNumEndpoints > 1 beforehand.
@@ -653,11 +683,17 @@ snd_usb_endpoint_open(struct snd_usb_audio *chip,
} else {
ep->iface = fp->iface;
ep->altsetting = fp->altsetting;
- ep->ep_idx = 0;
+ ep->ep_idx = fp->ep_idx;
}
usb_audio_dbg(chip, "Open EP 0x%x, iface=%d:%d, idx=%d\n",
ep_num, ep->iface, ep->altsetting, ep->ep_idx);
+ ep->iface_ref = iface_ref_find(chip, ep->iface);
+ if (!ep->iface_ref) {
+ ep = NULL;
+ goto unlock;
+ }
+
ep->cur_audiofmt = fp;
ep->cur_channels = fp->channels;
ep->cur_rate = params_rate(params);
@@ -681,6 +717,11 @@ snd_usb_endpoint_open(struct snd_usb_audio *chip,
ep->implicit_fb_sync);
} else {
+ if (WARN_ON(!ep->iface_ref)) {
+ ep = NULL;
+ goto unlock;
+ }
+
if (!endpoint_compatible(ep, fp, params)) {
usb_audio_err(chip, "Incompatible EP setup for 0x%x\n",
ep_num);
@@ -692,6 +733,9 @@ snd_usb_endpoint_open(struct snd_usb_audio *chip,
ep_num, ep->opened);
}
+ if (!ep->iface_ref->opened++)
+ ep->iface_ref->need_setup = true;
+
ep->opened++;
unlock:
@@ -760,12 +804,16 @@ void snd_usb_endpoint_close(struct snd_usb_audio *chip,
mutex_lock(&chip->mutex);
usb_audio_dbg(chip, "Closing EP 0x%x (count %d)\n",
ep->ep_num, ep->opened);
- if (!--ep->opened) {
+
+ if (!--ep->iface_ref->opened)
endpoint_set_interface(chip, ep, false);
+
+ if (!--ep->opened) {
ep->iface = 0;
ep->altsetting = 0;
ep->cur_audiofmt = NULL;
ep->cur_rate = 0;
+ ep->iface_ref = NULL;
usb_audio_dbg(chip, "EP 0x%x closed\n", ep->ep_num);
}
mutex_unlock(&chip->mutex);
@@ -775,6 +823,8 @@ void snd_usb_endpoint_close(struct snd_usb_audio *chip,
void snd_usb_endpoint_suspend(struct snd_usb_endpoint *ep)
{
ep->need_setup = true;
+ if (ep->iface_ref)
+ ep->iface_ref->need_setup = true;
}
/*
@@ -1195,11 +1245,22 @@ int snd_usb_endpoint_configure(struct snd_usb_audio *chip,
int err = 0;
mutex_lock(&chip->mutex);
+ if (WARN_ON(!ep->iface_ref))
+ goto unlock;
if (!ep->need_setup)
goto unlock;
- /* No need to (re-)configure the sync EP belonging to the same altset */
- if (ep->ep_idx) {
+ /* If the interface has been already set up, just set EP parameters */
+ if (!ep->iface_ref->need_setup) {
+ /* sample rate setup of UAC1 is per endpoint, and we need
+ * to update at each EP configuration
+ */
+ if (ep->cur_audiofmt->protocol == UAC_VERSION_1) {
+ err = snd_usb_init_sample_rate(chip, ep->cur_audiofmt,
+ ep->cur_rate);
+ if (err < 0)
+ goto unlock;
+ }
err = snd_usb_endpoint_set_params(chip, ep);
if (err < 0)
goto unlock;
@@ -1242,6 +1303,8 @@ int snd_usb_endpoint_configure(struct snd_usb_audio *chip,
goto unlock;
}
+ ep->iface_ref->need_setup = false;
+
done:
ep->need_setup = false;
err = 1;
@@ -1387,15 +1450,21 @@ void snd_usb_endpoint_release(struct snd_usb_endpoint *ep)
}
/**
- * snd_usb_endpoint_free: Free the resources of an snd_usb_endpoint
+ * snd_usb_endpoint_free_all: Free the resources of an snd_usb_endpoint
+ * @card: The chip
*
- * @ep: the endpoint to free
- *
- * This free all resources of the given ep.
+ * This free all endpoints and those resources
*/
-void snd_usb_endpoint_free(struct snd_usb_endpoint *ep)
+void snd_usb_endpoint_free_all(struct snd_usb_audio *chip)
{
- kfree(ep);
+ struct snd_usb_endpoint *ep, *en;
+ struct snd_usb_iface_ref *ip, *in;
+
+ list_for_each_entry_safe(ep, en, &chip->ep_list, list)
+ kfree(ep);
+
+ list_for_each_entry_safe(ip, in, &chip->iface_ref_list, list)
+ kfree(ip);
}
/*
diff --git a/sound/usb/endpoint.h b/sound/usb/endpoint.h
index 11e3bb839fd7..eea4ca49876d 100644
--- a/sound/usb/endpoint.h
+++ b/sound/usb/endpoint.h
@@ -42,7 +42,7 @@ void snd_usb_endpoint_sync_pending_stop(struct snd_usb_endpoint *ep);
void snd_usb_endpoint_suspend(struct snd_usb_endpoint *ep);
int snd_usb_endpoint_activate(struct snd_usb_endpoint *ep);
void snd_usb_endpoint_release(struct snd_usb_endpoint *ep);
-void snd_usb_endpoint_free(struct snd_usb_endpoint *ep);
+void snd_usb_endpoint_free_all(struct snd_usb_audio *chip);
int snd_usb_endpoint_implicit_feedback_sink(struct snd_usb_endpoint *ep);
int snd_usb_endpoint_next_packet_size(struct snd_usb_endpoint *ep,
diff --git a/sound/usb/implicit.c b/sound/usb/implicit.c
index eb3a4c433c3e..521cc846d9d9 100644
--- a/sound/usb/implicit.c
+++ b/sound/usb/implicit.c
@@ -58,8 +58,6 @@ static const struct snd_usb_implicit_fb_match playback_implicit_fb_quirks[] = {
IMPLICIT_FB_FIXED_DEV(0x0499, 0x172f, 0x81, 2), /* Steinberg UR22C */
IMPLICIT_FB_FIXED_DEV(0x0d9a, 0x00df, 0x81, 2), /* RTX6001 */
IMPLICIT_FB_FIXED_DEV(0x22f0, 0x0006, 0x81, 3), /* Allen&Heath Qu-16 */
- IMPLICIT_FB_FIXED_DEV(0x2b73, 0x000a, 0x82, 0), /* Pioneer DJ DJM-900NXS2 */
- IMPLICIT_FB_FIXED_DEV(0x2b73, 0x0017, 0x82, 0), /* Pioneer DJ DJM-250MK2 */
IMPLICIT_FB_FIXED_DEV(0x1686, 0xf029, 0x82, 2), /* Zoom UAC-2 */
IMPLICIT_FB_FIXED_DEV(0x2466, 0x8003, 0x86, 2), /* Fractal Audio Axe-Fx II */
IMPLICIT_FB_FIXED_DEV(0x0499, 0x172a, 0x86, 2), /* Yamaha MODX */
@@ -74,10 +72,12 @@ static const struct snd_usb_implicit_fb_match playback_implicit_fb_quirks[] = {
/* No quirk for playback but with capture quirk (see below) */
IMPLICIT_FB_SKIP_DEV(0x0582, 0x0130), /* BOSS BR-80 */
+ IMPLICIT_FB_SKIP_DEV(0x0582, 0x0171), /* BOSS RC-505 */
IMPLICIT_FB_SKIP_DEV(0x0582, 0x0189), /* BOSS GT-100v2 */
IMPLICIT_FB_SKIP_DEV(0x0582, 0x01d6), /* BOSS GT-1 */
IMPLICIT_FB_SKIP_DEV(0x0582, 0x01d8), /* BOSS Katana */
IMPLICIT_FB_SKIP_DEV(0x0582, 0x01e5), /* BOSS GT-001 */
+ IMPLICIT_FB_SKIP_DEV(0x0582, 0x0203), /* BOSS AD-10 */
{} /* terminator */
};
@@ -85,10 +85,12 @@ static const struct snd_usb_implicit_fb_match playback_implicit_fb_quirks[] = {
/* Implicit feedback quirk table for capture: only FIXED type */
static const struct snd_usb_implicit_fb_match capture_implicit_fb_quirks[] = {
IMPLICIT_FB_FIXED_DEV(0x0582, 0x0130, 0x0d, 0x01), /* BOSS BR-80 */
+ IMPLICIT_FB_FIXED_DEV(0x0582, 0x0171, 0x0d, 0x01), /* BOSS RC-505 */
IMPLICIT_FB_FIXED_DEV(0x0582, 0x0189, 0x0d, 0x01), /* BOSS GT-100v2 */
IMPLICIT_FB_FIXED_DEV(0x0582, 0x01d6, 0x0d, 0x01), /* BOSS GT-1 */
IMPLICIT_FB_FIXED_DEV(0x0582, 0x01d8, 0x0d, 0x01), /* BOSS Katana */
IMPLICIT_FB_FIXED_DEV(0x0582, 0x01e5, 0x0d, 0x01), /* BOSS GT-001 */
+ IMPLICIT_FB_FIXED_DEV(0x0582, 0x0203, 0x0d, 0x01), /* BOSS AD-10 */
{} /* terminator */
};
@@ -96,7 +98,7 @@ static const struct snd_usb_implicit_fb_match capture_implicit_fb_quirks[] = {
/* set up sync EP information on the audioformat */
static int add_implicit_fb_sync_ep(struct snd_usb_audio *chip,
struct audioformat *fmt,
- int ep, int ifnum,
+ int ep, int ep_idx, int ifnum,
const struct usb_host_interface *alts)
{
struct usb_interface *iface;
@@ -111,7 +113,7 @@ static int add_implicit_fb_sync_ep(struct snd_usb_audio *chip,
fmt->sync_ep = ep;
fmt->sync_iface = ifnum;
fmt->sync_altsetting = alts->desc.bAlternateSetting;
- fmt->sync_ep_idx = 0;
+ fmt->sync_ep_idx = ep_idx;
fmt->implicit_fb = 1;
usb_audio_dbg(chip,
"%d:%d: added %s implicit_fb sync_ep %x, iface %d:%d\n",
@@ -143,7 +145,7 @@ static int add_generic_uac2_implicit_fb(struct snd_usb_audio *chip,
(epd->bmAttributes & USB_ENDPOINT_USAGE_MASK) !=
USB_ENDPOINT_USAGE_IMPLICIT_FB)
return 0;
- return add_implicit_fb_sync_ep(chip, fmt, epd->bEndpointAddress,
+ return add_implicit_fb_sync_ep(chip, fmt, epd->bEndpointAddress, 0,
ifnum, alts);
}
@@ -169,10 +171,33 @@ static int add_roland_implicit_fb(struct snd_usb_audio *chip,
(epd->bmAttributes & USB_ENDPOINT_USAGE_MASK) !=
USB_ENDPOINT_USAGE_IMPLICIT_FB)
return 0;
- return add_implicit_fb_sync_ep(chip, fmt, epd->bEndpointAddress,
+ return add_implicit_fb_sync_ep(chip, fmt, epd->bEndpointAddress, 0,
ifnum, alts);
}
+/* Playback and capture EPs on Pioneer devices share the same iface/altset,
+ * but they don't seem working with the implicit fb mode well, hence we
+ * just return as if the sync were already set up.
+ */
+static int skip_pioneer_sync_ep(struct snd_usb_audio *chip,
+ struct audioformat *fmt,
+ struct usb_host_interface *alts)
+{
+ struct usb_endpoint_descriptor *epd;
+
+ if (alts->desc.bNumEndpoints != 2)
+ return 0;
+
+ epd = get_endpoint(alts, 1);
+ if (!usb_endpoint_is_isoc_in(epd) ||
+ (epd->bmAttributes & USB_ENDPOINT_SYNCTYPE) != USB_ENDPOINT_SYNC_ASYNC ||
+ ((epd->bmAttributes & USB_ENDPOINT_USAGE_MASK) !=
+ USB_ENDPOINT_USAGE_DATA &&
+ (epd->bmAttributes & USB_ENDPOINT_USAGE_MASK) !=
+ USB_ENDPOINT_USAGE_IMPLICIT_FB))
+ return 0;
+ return 1; /* don't handle with the implicit fb, just skip sync EP */
+}
static int __add_generic_implicit_fb(struct snd_usb_audio *chip,
struct audioformat *fmt,
@@ -193,7 +218,7 @@ static int __add_generic_implicit_fb(struct snd_usb_audio *chip,
if (!usb_endpoint_is_isoc_in(epd) ||
(epd->bmAttributes & USB_ENDPOINT_SYNCTYPE) != USB_ENDPOINT_SYNC_ASYNC)
return 0;
- return add_implicit_fb_sync_ep(chip, fmt, epd->bEndpointAddress,
+ return add_implicit_fb_sync_ep(chip, fmt, epd->bEndpointAddress, 0,
iface, alts);
}
@@ -246,7 +271,7 @@ static int audioformat_implicit_fb_quirk(struct snd_usb_audio *chip,
case IMPLICIT_FB_NONE:
return 0; /* No quirk */
case IMPLICIT_FB_FIXED:
- return add_implicit_fb_sync_ep(chip, fmt, p->ep_num,
+ return add_implicit_fb_sync_ep(chip, fmt, p->ep_num, 0,
p->iface, NULL);
}
}
@@ -274,6 +299,14 @@ static int audioformat_implicit_fb_quirk(struct snd_usb_audio *chip,
return 1;
}
+ /* Pioneer devices with vendor spec class */
+ if (attr == USB_ENDPOINT_SYNC_ASYNC &&
+ alts->desc.bInterfaceClass == USB_CLASS_VENDOR_SPEC &&
+ USB_ID_VENDOR(chip->usb_id) == 0x2b73 /* Pioneer */) {
+ if (skip_pioneer_sync_ep(chip, fmt, alts))
+ return 1;
+ }
+
/* Try the generic implicit fb if available */
if (chip->generic_implicit_fb)
return add_generic_implicit_fb(chip, fmt, alts);
@@ -291,8 +324,8 @@ static int audioformat_capture_quirk(struct snd_usb_audio *chip,
p = find_implicit_fb_entry(chip, capture_implicit_fb_quirks, alts);
if (p && p->type == IMPLICIT_FB_FIXED)
- return add_implicit_fb_sync_ep(chip, fmt, p->ep_num, p->iface,
- NULL);
+ return add_implicit_fb_sync_ep(chip, fmt, p->ep_num, 0,
+ p->iface, NULL);
return 0;
}
@@ -374,20 +407,19 @@ snd_usb_find_implicit_fb_sync_format(struct snd_usb_audio *chip,
int stream)
{
struct snd_usb_substream *subs;
- const struct audioformat *fp, *sync_fmt;
+ const struct audioformat *fp, *sync_fmt = NULL;
int score, high_score;
- /* When sharing the same altset, use the original audioformat */
+ /* Use the original audioformat as fallback for the shared altset */
if (target->iface == target->sync_iface &&
target->altsetting == target->sync_altsetting)
- return target;
+ sync_fmt = target;
subs = find_matching_substream(chip, stream, target->sync_ep,
target->fmt_type);
if (!subs)
- return NULL;
+ return sync_fmt;
- sync_fmt = NULL;
high_score = 0;
list_for_each_entry(fp, &subs->fmt_list, list) {
score = match_endpoint_audioformats(subs, fp,
diff --git a/sound/usb/midi.c b/sound/usb/midi.c
index c8213652470c..0c23fa6d8525 100644
--- a/sound/usb/midi.c
+++ b/sound/usb/midi.c
@@ -1889,6 +1889,8 @@ static int snd_usbmidi_get_ms_info(struct snd_usb_midi *umidi,
ms_ep = find_usb_ms_endpoint_descriptor(hostep);
if (!ms_ep)
continue;
+ if (ms_ep->bNumEmbMIDIJack > 0x10)
+ continue;
if (usb_endpoint_dir_out(ep)) {
if (endpoints[epidx].out_ep) {
if (++epidx >= MIDI_MAX_ENDPOINTS) {
@@ -2141,6 +2143,8 @@ static int snd_usbmidi_detect_roland(struct snd_usb_midi *umidi,
cs_desc[1] == USB_DT_CS_INTERFACE &&
cs_desc[2] == 0xf1 &&
cs_desc[3] == 0x02) {
+ if (cs_desc[4] > 0x10 || cs_desc[5] > 0x10)
+ continue;
endpoint->in_cables = (1 << cs_desc[4]) - 1;
endpoint->out_cables = (1 << cs_desc[5]) - 1;
return snd_usbmidi_detect_endpoints(umidi, endpoint, 1);
diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
index 56079901769f..078bb4c94033 100644
--- a/sound/usb/pcm.c
+++ b/sound/usb/pcm.c
@@ -663,7 +663,7 @@ static int hw_check_valid_format(struct snd_usb_substream *subs,
check_fmts.bits[1] = (u32)(fp->formats >> 32);
snd_mask_intersect(&check_fmts, fmts);
if (snd_mask_empty(&check_fmts)) {
- hwc_debug(" > check: no supported format %d\n", fp->format);
+ hwc_debug(" > check: no supported format 0x%llx\n", fp->formats);
return 0;
}
/* check the channels */
@@ -775,24 +775,11 @@ static int hw_rule_channels(struct snd_pcm_hw_params *params,
return apply_hw_params_minmax(it, rmin, rmax);
}
-static int hw_rule_format(struct snd_pcm_hw_params *params,
- struct snd_pcm_hw_rule *rule)
+static int apply_hw_params_format_bits(struct snd_mask *fmt, u64 fbits)
{
- struct snd_usb_substream *subs = rule->private;
- const struct audioformat *fp;
- struct snd_mask *fmt = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT);
- u64 fbits;
u32 oldbits[2];
int changed;
- hwc_debug("hw_rule_format: %x:%x\n", fmt->bits[0], fmt->bits[1]);
- fbits = 0;
- list_for_each_entry(fp, &subs->fmt_list, list) {
- if (!hw_check_valid_format(subs, params, fp))
- continue;
- fbits |= fp->formats;
- }
-
oldbits[0] = fmt->bits[0];
oldbits[1] = fmt->bits[1];
fmt->bits[0] &= (u32)fbits;
@@ -806,6 +793,24 @@ static int hw_rule_format(struct snd_pcm_hw_params *params,
return changed;
}
+static int hw_rule_format(struct snd_pcm_hw_params *params,
+ struct snd_pcm_hw_rule *rule)
+{
+ struct snd_usb_substream *subs = rule->private;
+ const struct audioformat *fp;
+ struct snd_mask *fmt = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT);
+ u64 fbits;
+
+ hwc_debug("hw_rule_format: %x:%x\n", fmt->bits[0], fmt->bits[1]);
+ fbits = 0;
+ list_for_each_entry(fp, &subs->fmt_list, list) {
+ if (!hw_check_valid_format(subs, params, fp))
+ continue;
+ fbits |= fp->formats;
+ }
+ return apply_hw_params_format_bits(fmt, fbits);
+}
+
static int hw_rule_period_time(struct snd_pcm_hw_params *params,
struct snd_pcm_hw_rule *rule)
{
@@ -833,64 +838,92 @@ static int hw_rule_period_time(struct snd_pcm_hw_params *params,
return apply_hw_params_minmax(it, pmin, UINT_MAX);
}
-/* apply PCM hw constraints from the concurrent sync EP */
-static int apply_hw_constraint_from_sync(struct snd_pcm_runtime *runtime,
- struct snd_usb_substream *subs)
+/* get the EP or the sync EP for implicit fb when it's already set up */
+static const struct snd_usb_endpoint *
+get_sync_ep_from_substream(struct snd_usb_substream *subs)
{
struct snd_usb_audio *chip = subs->stream->chip;
- struct snd_usb_endpoint *ep;
const struct audioformat *fp;
- int err;
+ const struct snd_usb_endpoint *ep;
list_for_each_entry(fp, &subs->fmt_list, list) {
ep = snd_usb_get_endpoint(chip, fp->endpoint);
if (ep && ep->cur_rate)
- goto found;
+ return ep;
if (!fp->implicit_fb)
continue;
/* for the implicit fb, check the sync ep as well */
ep = snd_usb_get_endpoint(chip, fp->sync_ep);
if (ep && ep->cur_rate)
- goto found;
+ return ep;
}
- return 0;
+ return NULL;
+}
- found:
- if (!find_format(&subs->fmt_list, ep->cur_format, ep->cur_rate,
- ep->cur_channels, false, NULL)) {
- usb_audio_dbg(chip, "EP 0x%x being used, but not applicable\n",
- ep->ep_num);
+/* additional hw constraints for implicit feedback mode */
+static int hw_rule_format_implicit_fb(struct snd_pcm_hw_params *params,
+ struct snd_pcm_hw_rule *rule)
+{
+ struct snd_usb_substream *subs = rule->private;
+ const struct snd_usb_endpoint *ep;
+ struct snd_mask *fmt = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT);
+
+ ep = get_sync_ep_from_substream(subs);
+ if (!ep)
return 0;
- }
- usb_audio_dbg(chip, "EP 0x%x being used, using fixed params:\n",
- ep->ep_num);
- usb_audio_dbg(chip, "rate=%d, period_size=%d, periods=%d\n",
- ep->cur_rate, ep->cur_period_frames,
- ep->cur_buffer_periods);
+ hwc_debug("applying %s\n", __func__);
+ return apply_hw_params_format_bits(fmt, pcm_format_to_bits(ep->cur_format));
+}
- runtime->hw.formats = subs->formats;
- runtime->hw.rate_min = runtime->hw.rate_max = ep->cur_rate;
- runtime->hw.rates = SNDRV_PCM_RATE_KNOT;
- runtime->hw.periods_min = runtime->hw.periods_max =
- ep->cur_buffer_periods;
+static int hw_rule_rate_implicit_fb(struct snd_pcm_hw_params *params,
+ struct snd_pcm_hw_rule *rule)
+{
+ struct snd_usb_substream *subs = rule->private;
+ const struct snd_usb_endpoint *ep;
+ struct snd_interval *it;
- err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS,
- hw_rule_channels, subs,
- SNDRV_PCM_HW_PARAM_FORMAT,
- SNDRV_PCM_HW_PARAM_RATE,
- -1);
- if (err < 0)
- return err;
+ ep = get_sync_ep_from_substream(subs);
+ if (!ep)
+ return 0;
- err = snd_pcm_hw_constraint_minmax(runtime,
- SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
- ep->cur_period_frames,
- ep->cur_period_frames);
- if (err < 0)
- return err;
+ hwc_debug("applying %s\n", __func__);
+ it = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE);
+ return apply_hw_params_minmax(it, ep->cur_rate, ep->cur_rate);
+}
- return 1; /* notify the finding */
+static int hw_rule_period_size_implicit_fb(struct snd_pcm_hw_params *params,
+ struct snd_pcm_hw_rule *rule)
+{
+ struct snd_usb_substream *subs = rule->private;
+ const struct snd_usb_endpoint *ep;
+ struct snd_interval *it;
+
+ ep = get_sync_ep_from_substream(subs);
+ if (!ep)
+ return 0;
+
+ hwc_debug("applying %s\n", __func__);
+ it = hw_param_interval(params, SNDRV_PCM_HW_PARAM_PERIOD_SIZE);
+ return apply_hw_params_minmax(it, ep->cur_period_frames,
+ ep->cur_period_frames);
+}
+
+static int hw_rule_periods_implicit_fb(struct snd_pcm_hw_params *params,
+ struct snd_pcm_hw_rule *rule)
+{
+ struct snd_usb_substream *subs = rule->private;
+ const struct snd_usb_endpoint *ep;
+ struct snd_interval *it;
+
+ ep = get_sync_ep_from_substream(subs);
+ if (!ep)
+ return 0;
+
+ hwc_debug("applying %s\n", __func__);
+ it = hw_param_interval(params, SNDRV_PCM_HW_PARAM_PERIODS);
+ return apply_hw_params_minmax(it, ep->cur_buffer_periods,
+ ep->cur_buffer_periods);
}
/*
@@ -899,20 +932,11 @@ static int apply_hw_constraint_from_sync(struct snd_pcm_runtime *runtime,
static int setup_hw_info(struct snd_pcm_runtime *runtime, struct snd_usb_substream *subs)
{
- struct snd_usb_audio *chip = subs->stream->chip;
const struct audioformat *fp;
unsigned int pt, ptmin;
int param_period_time_if_needed = -1;
int err;
- mutex_lock(&chip->mutex);
- err = apply_hw_constraint_from_sync(runtime, subs);
- mutex_unlock(&chip->mutex);
- if (err < 0)
- return err;
- if (err > 0) /* found the matching? */
- goto add_extra_rules;
-
runtime->hw.formats = subs->formats;
runtime->hw.rate_min = 0x7fffffff;
@@ -957,6 +981,7 @@ static int setup_hw_info(struct snd_pcm_runtime *runtime, struct snd_usb_substre
err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
hw_rule_rate, subs,
+ SNDRV_PCM_HW_PARAM_RATE,
SNDRV_PCM_HW_PARAM_FORMAT,
SNDRV_PCM_HW_PARAM_CHANNELS,
param_period_time_if_needed,
@@ -964,9 +989,9 @@ static int setup_hw_info(struct snd_pcm_runtime *runtime, struct snd_usb_substre
if (err < 0)
return err;
-add_extra_rules:
err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS,
hw_rule_channels, subs,
+ SNDRV_PCM_HW_PARAM_CHANNELS,
SNDRV_PCM_HW_PARAM_FORMAT,
SNDRV_PCM_HW_PARAM_RATE,
param_period_time_if_needed,
@@ -975,6 +1000,7 @@ add_extra_rules:
return err;
err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_FORMAT,
hw_rule_format, subs,
+ SNDRV_PCM_HW_PARAM_FORMAT,
SNDRV_PCM_HW_PARAM_RATE,
SNDRV_PCM_HW_PARAM_CHANNELS,
param_period_time_if_needed,
@@ -993,6 +1019,28 @@ add_extra_rules:
return err;
}
+ /* additional hw constraints for implicit fb */
+ err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_FORMAT,
+ hw_rule_format_implicit_fb, subs,
+ SNDRV_PCM_HW_PARAM_FORMAT, -1);
+ if (err < 0)
+ return err;
+ err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
+ hw_rule_rate_implicit_fb, subs,
+ SNDRV_PCM_HW_PARAM_RATE, -1);
+ if (err < 0)
+ return err;
+ err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
+ hw_rule_period_size_implicit_fb, subs,
+ SNDRV_PCM_HW_PARAM_PERIOD_SIZE, -1);
+ if (err < 0)
+ return err;
+ err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIODS,
+ hw_rule_periods_implicit_fb, subs,
+ SNDRV_PCM_HW_PARAM_PERIODS, -1);
+ if (err < 0)
+ return err;
+
return 0;
}
diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
index 0e11cb96fa8c..c8a4bdf18207 100644
--- a/sound/usb/quirks-table.h
+++ b/sound/usb/quirks-table.h
@@ -3362,6 +3362,7 @@ AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"),
.altsetting = 1,
.altset_idx = 1,
.endpoint = 0x86,
+ .ep_idx = 1,
.ep_attr = USB_ENDPOINT_XFER_ISOC|
USB_ENDPOINT_SYNC_ASYNC|
USB_ENDPOINT_USAGE_IMPLICIT_FB,
@@ -3450,6 +3451,7 @@ AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"),
.altsetting = 1,
.altset_idx = 1,
.endpoint = 0x82,
+ .ep_idx = 1,
.ep_attr = USB_ENDPOINT_XFER_ISOC|
USB_ENDPOINT_SYNC_ASYNC|
USB_ENDPOINT_USAGE_IMPLICIT_FB,
@@ -3506,6 +3508,7 @@ AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"),
.altsetting = 1,
.altset_idx = 1,
.endpoint = 0x82,
+ .ep_idx = 1,
.ep_attr = USB_ENDPOINT_XFER_ISOC|
USB_ENDPOINT_SYNC_ASYNC|
USB_ENDPOINT_USAGE_IMPLICIT_FB,
@@ -3562,6 +3565,7 @@ AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"),
.altsetting = 1,
.altset_idx = 1,
.endpoint = 0x82,
+ .ep_idx = 1,
.ep_attr = USB_ENDPOINT_XFER_ISOC|
USB_ENDPOINT_SYNC_ASYNC|
USB_ENDPOINT_USAGE_IMPLICIT_FB,
@@ -3619,6 +3623,7 @@ AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"),
.altsetting = 1,
.altset_idx = 1,
.endpoint = 0x82,
+ .ep_idx = 1,
.ep_attr = USB_ENDPOINT_XFER_ISOC|
USB_ENDPOINT_SYNC_ASYNC|
USB_ENDPOINT_USAGE_IMPLICIT_FB,
@@ -3679,6 +3684,7 @@ AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"),
.altsetting = 1,
.altset_idx = 1,
.endpoint = 0x82,
+ .ep_idx = 1,
.ep_attr = USB_ENDPOINT_XFER_ISOC|
USB_ENDPOINT_SYNC_ASYNC|
USB_ENDPOINT_USAGE_IMPLICIT_FB,
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
index e4a690bb4c99..e196e364cef1 100644
--- a/sound/usb/quirks.c
+++ b/sound/usb/quirks.c
@@ -120,6 +120,40 @@ static int create_standard_audio_quirk(struct snd_usb_audio *chip,
return 0;
}
+/* create the audio stream and the corresponding endpoints from the fixed
+ * audioformat object; this is used for quirks with the fixed EPs
+ */
+static int add_audio_stream_from_fixed_fmt(struct snd_usb_audio *chip,
+ struct audioformat *fp)
+{
+ int stream, err;
+
+ stream = (fp->endpoint & USB_DIR_IN) ?
+ SNDRV_PCM_STREAM_CAPTURE : SNDRV_PCM_STREAM_PLAYBACK;
+
+ snd_usb_audioformat_set_sync_ep(chip, fp);
+
+ err = snd_usb_add_audio_stream(chip, stream, fp);
+ if (err < 0)
+ return err;
+
+ err = snd_usb_add_endpoint(chip, fp->endpoint,
+ SND_USB_ENDPOINT_TYPE_DATA);
+ if (err < 0)
+ return err;
+
+ if (fp->sync_ep) {
+ err = snd_usb_add_endpoint(chip, fp->sync_ep,
+ fp->implicit_fb ?
+ SND_USB_ENDPOINT_TYPE_DATA :
+ SND_USB_ENDPOINT_TYPE_SYNC);
+ if (err < 0)
+ return err;
+ }
+
+ return 0;
+}
+
/*
* create a stream for an endpoint/altsetting without proper descriptors
*/
@@ -131,8 +165,8 @@ static int create_fixed_stream_quirk(struct snd_usb_audio *chip,
struct audioformat *fp;
struct usb_host_interface *alts;
struct usb_interface_descriptor *altsd;
- int stream, err;
unsigned *rate_table = NULL;
+ int err;
fp = kmemdup(quirk->data, sizeof(*fp), GFP_KERNEL);
if (!fp)
@@ -153,11 +187,6 @@ static int create_fixed_stream_quirk(struct snd_usb_audio *chip,
fp->rate_table = rate_table;
}
- stream = (fp->endpoint & USB_DIR_IN)
- ? SNDRV_PCM_STREAM_CAPTURE : SNDRV_PCM_STREAM_PLAYBACK;
- err = snd_usb_add_audio_stream(chip, stream, fp);
- if (err < 0)
- goto error;
if (fp->iface != get_iface_desc(&iface->altsetting[0])->bInterfaceNumber ||
fp->altset_idx >= iface->num_altsetting) {
err = -EINVAL;
@@ -165,7 +194,7 @@ static int create_fixed_stream_quirk(struct snd_usb_audio *chip,
}
alts = &iface->altsetting[fp->altset_idx];
altsd = get_iface_desc(alts);
- if (altsd->bNumEndpoints < 1) {
+ if (altsd->bNumEndpoints <= fp->ep_idx) {
err = -EINVAL;
goto error;
}
@@ -175,7 +204,14 @@ static int create_fixed_stream_quirk(struct snd_usb_audio *chip,
if (fp->datainterval == 0)
fp->datainterval = snd_usb_parse_datainterval(chip, alts);
if (fp->maxpacksize == 0)
- fp->maxpacksize = le16_to_cpu(get_endpoint(alts, 0)->wMaxPacketSize);
+ fp->maxpacksize = le16_to_cpu(get_endpoint(alts, fp->ep_idx)->wMaxPacketSize);
+ if (!fp->fmt_type)
+ fp->fmt_type = UAC_FORMAT_TYPE_I;
+
+ err = add_audio_stream_from_fixed_fmt(chip, fp);
+ if (err < 0)
+ goto error;
+
usb_set_interface(chip->dev, fp->iface, 0);
snd_usb_init_pitch(chip, fp);
snd_usb_init_sample_rate(chip, fp, fp->rate_max);
@@ -417,7 +453,7 @@ static int create_uaxx_quirk(struct snd_usb_audio *chip,
struct usb_host_interface *alts;
struct usb_interface_descriptor *altsd;
struct audioformat *fp;
- int stream, err;
+ int err;
/* both PCM and MIDI interfaces have 2 or more altsettings */
if (iface->num_altsetting < 2)
@@ -482,9 +518,7 @@ static int create_uaxx_quirk(struct snd_usb_audio *chip,
return -ENXIO;
}
- stream = (fp->endpoint & USB_DIR_IN)
- ? SNDRV_PCM_STREAM_CAPTURE : SNDRV_PCM_STREAM_PLAYBACK;
- err = snd_usb_add_audio_stream(chip, stream, fp);
+ err = add_audio_stream_from_fixed_fmt(chip, fp);
if (err < 0) {
list_del(&fp->list); /* unlink for avoiding double-free */
kfree(fp);
@@ -1436,30 +1470,6 @@ static void set_format_emu_quirk(struct snd_usb_substream *subs,
subs->pkt_offset_adj = (emu_samplerate_id >= EMU_QUIRK_SR_176400HZ) ? 4 : 0;
}
-
-/*
- * Pioneer DJ DJM-900NXS2
- * Device needs to know the sample rate each time substream is started
- */
-static int pioneer_djm_set_format_quirk(struct snd_usb_substream *subs)
-{
- unsigned int cur_rate = subs->data_endpoint->cur_rate;
- /* Convert sample rate value to little endian */
- u8 sr[3];
-
- sr[0] = cur_rate & 0xff;
- sr[1] = (cur_rate >> 8) & 0xff;
- sr[2] = (cur_rate >> 16) & 0xff;
-
- /* Configure device */
- usb_set_interface(subs->dev, 0, 1);
- snd_usb_ctl_msg(subs->stream->chip->dev,
- usb_rcvctrlpipe(subs->stream->chip->dev, 0),
- 0x01, 0x22, 0x0100, 0x0082, &sr, 0x0003);
-
- return 0;
-}
-
void snd_usb_set_format_quirk(struct snd_usb_substream *subs,
const struct audioformat *fmt)
{
@@ -1470,10 +1480,6 @@ void snd_usb_set_format_quirk(struct snd_usb_substream *subs,
case USB_ID(0x041e, 0x3f19): /* E-Mu 0204 USB */
set_format_emu_quirk(subs, fmt);
break;
- case USB_ID(0x2b73, 0x000a): /* Pioneer DJ DJM-900NXS2 */
- case USB_ID(0x2b73, 0x0017): /* Pioneer DJ DJM-250MK2 */
- pioneer_djm_set_format_quirk(subs);
- break;
case USB_ID(0x534d, 0x2109): /* MacroSilicon MS2109 */
subs->stream_offset_adj = 2;
break;
diff --git a/sound/usb/usbaudio.h b/sound/usb/usbaudio.h
index 980287aadd36..215c1771dd57 100644
--- a/sound/usb/usbaudio.h
+++ b/sound/usb/usbaudio.h
@@ -44,6 +44,7 @@ struct snd_usb_audio {
struct list_head pcm_list; /* list of pcm streams */
struct list_head ep_list; /* list of audio-related endpoints */
+ struct list_head iface_ref_list; /* list of interface refcounts */
int pcm_devs;
struct list_head midi_list; /* list of midi interfaces */
diff --git a/tools/bootconfig/scripts/bconf2ftrace.sh b/tools/bootconfig/scripts/bconf2ftrace.sh
index 595e164dc352..feb30c2c7881 100755
--- a/tools/bootconfig/scripts/bconf2ftrace.sh
+++ b/tools/bootconfig/scripts/bconf2ftrace.sh
@@ -152,6 +152,7 @@ setup_instance() { # [instance]
set_array_of ${instance}.options ${instancedir}/trace_options
set_value_of ${instance}.trace_clock ${instancedir}/trace_clock
set_value_of ${instance}.cpumask ${instancedir}/tracing_cpumask
+ set_value_of ${instance}.tracing_on ${instancedir}/tracing_on
set_value_of ${instance}.tracer ${instancedir}/current_tracer
set_array_of ${instance}.ftrace.filters \
${instancedir}/set_ftrace_filter
diff --git a/tools/bootconfig/scripts/ftrace2bconf.sh b/tools/bootconfig/scripts/ftrace2bconf.sh
index 6c0d4b61e0c2..a0c3bcc6da4f 100755
--- a/tools/bootconfig/scripts/ftrace2bconf.sh
+++ b/tools/bootconfig/scripts/ftrace2bconf.sh
@@ -221,6 +221,10 @@ instance_options() { # [instance-name]
if [ `echo $val | sed -e s/f//g`x != x ]; then
emit_kv $PREFIX.cpumask = $val
fi
+ val=`cat $INSTANCE/tracing_on`
+ if [ `echo $val | sed -e s/f//g`x != x ]; then
+ emit_kv $PREFIX.tracing_on = $val
+ fi
val=
for i in `cat $INSTANCE/set_event`; do
diff --git a/tools/bpf/bpftool/net.c b/tools/bpf/bpftool/net.c
index 3fae61ef6339..ff3aa0cf3997 100644
--- a/tools/bpf/bpftool/net.c
+++ b/tools/bpf/bpftool/net.c
@@ -11,7 +11,6 @@
#include <bpf/bpf.h>
#include <bpf/libbpf.h>
#include <net/if.h>
-#include <linux/if.h>
#include <linux/rtnetlink.h>
#include <linux/socket.h>
#include <linux/tc_act/tc_bpf.h>
diff --git a/tools/bpf/resolve_btfids/main.c b/tools/bpf/resolve_btfids/main.c
index e3ea569ee125..7409d7860aa6 100644
--- a/tools/bpf/resolve_btfids/main.c
+++ b/tools/bpf/resolve_btfids/main.c
@@ -139,6 +139,8 @@ int eprintf(int level, int var, const char *fmt, ...)
#define pr_debug2(fmt, ...) pr_debugN(2, pr_fmt(fmt), ##__VA_ARGS__)
#define pr_err(fmt, ...) \
eprintf(0, verbose, pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_info(fmt, ...) \
+ eprintf(0, verbose, pr_fmt(fmt), ##__VA_ARGS__)
static bool is_btf_id(const char *name)
{
@@ -472,7 +474,7 @@ static int symbols_resolve(struct object *obj)
int nr_funcs = obj->nr_funcs;
int err, type_id;
struct btf *btf;
- __u32 nr;
+ __u32 nr_types;
btf = btf__parse(obj->btf ?: obj->path, NULL);
err = libbpf_get_error(btf);
@@ -483,12 +485,12 @@ static int symbols_resolve(struct object *obj)
}
err = -1;
- nr = btf__get_nr_types(btf);
+ nr_types = btf__get_nr_types(btf);
/*
* Iterate all the BTF types and search for collected symbol IDs.
*/
- for (type_id = 1; type_id <= nr; type_id++) {
+ for (type_id = 1; type_id <= nr_types; type_id++) {
const struct btf_type *type;
struct rb_root *root;
struct btf_id *id;
@@ -526,8 +528,13 @@ static int symbols_resolve(struct object *obj)
id = btf_id__find(root, str);
if (id) {
- id->id = type_id;
- (*nr)--;
+ if (id->id) {
+ pr_info("WARN: multiple IDs found for '%s': %d, %d - using %d\n",
+ str, id->id, type_id, id->id);
+ } else {
+ id->id = type_id;
+ (*nr)--;
+ }
}
}
diff --git a/tools/gpio/gpio-event-mon.c b/tools/gpio/gpio-event-mon.c
index cacd66ad7926..a2b233fdb572 100644
--- a/tools/gpio/gpio-event-mon.c
+++ b/tools/gpio/gpio-event-mon.c
@@ -107,8 +107,8 @@ int monitor_device(const char *device_name,
ret = -EIO;
break;
}
- fprintf(stdout, "GPIO EVENT at %llu on line %d (%d|%d) ",
- event.timestamp_ns, event.offset, event.line_seqno,
+ fprintf(stdout, "GPIO EVENT at %" PRIu64 " on line %d (%d|%d) ",
+ (uint64_t)event.timestamp_ns, event.offset, event.line_seqno,
event.seqno);
switch (event.id) {
case GPIO_V2_LINE_EVENT_RISING_EDGE:
diff --git a/tools/gpio/gpio-watch.c b/tools/gpio/gpio-watch.c
index f229ec62301b..41e76d244192 100644
--- a/tools/gpio/gpio-watch.c
+++ b/tools/gpio/gpio-watch.c
@@ -10,6 +10,7 @@
#include <ctype.h>
#include <errno.h>
#include <fcntl.h>
+#include <inttypes.h>
#include <linux/gpio.h>
#include <poll.h>
#include <stdbool.h>
@@ -86,8 +87,8 @@ int main(int argc, char **argv)
return EXIT_FAILURE;
}
- printf("line %u: %s at %llu\n",
- chg.info.offset, event, chg.timestamp_ns);
+ printf("line %u: %s at %" PRIu64 "\n",
+ chg.info.offset, event, (uint64_t)chg.timestamp_ns);
}
}
diff --git a/tools/include/linux/build_bug.h b/tools/include/linux/build_bug.h
index ce365d212768..cc7070c7439b 100644
--- a/tools/include/linux/build_bug.h
+++ b/tools/include/linux/build_bug.h
@@ -79,9 +79,4 @@
#define __static_assert(expr, msg, ...) _Static_assert(expr, msg)
#endif // static_assert
-#ifdef __GENKSYMS__
-/* genksyms gets confused by _Static_assert */
-#define _Static_assert(expr, ...)
-#endif
-
#endif /* _LINUX_BUILD_BUG_H */
diff --git a/tools/include/uapi/linux/kvm.h b/tools/include/uapi/linux/kvm.h
index 886802b8ffba..374c67875cdb 100644
--- a/tools/include/uapi/linux/kvm.h
+++ b/tools/include/uapi/linux/kvm.h
@@ -251,6 +251,7 @@ struct kvm_hyperv_exit {
#define KVM_EXIT_X86_RDMSR 29
#define KVM_EXIT_X86_WRMSR 30
#define KVM_EXIT_DIRTY_RING_FULL 31
+#define KVM_EXIT_AP_RESET_HOLD 32
/* For KVM_EXIT_INTERNAL_ERROR */
/* Emulate instruction failed. */
@@ -573,6 +574,7 @@ struct kvm_vapic_addr {
#define KVM_MP_STATE_CHECK_STOP 6
#define KVM_MP_STATE_OPERATING 7
#define KVM_MP_STATE_LOAD 8
+#define KVM_MP_STATE_AP_RESET_HOLD 9
struct kvm_mp_state {
__u32 mp_state;
diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c
index 3c3f2bc6c652..9970a288dda5 100644
--- a/tools/lib/bpf/btf.c
+++ b/tools/lib/bpf/btf.c
@@ -240,11 +240,6 @@ static int btf_parse_hdr(struct btf *btf)
}
meta_left = btf->raw_size - sizeof(*hdr);
- if (!meta_left) {
- pr_debug("BTF has no data\n");
- return -EINVAL;
- }
-
if (meta_left < hdr->str_off + hdr->str_len) {
pr_debug("Invalid BTF total size:%u\n", btf->raw_size);
return -EINVAL;
diff --git a/tools/lib/perf/evlist.c b/tools/lib/perf/evlist.c
index cfcdbd7be066..17465d454a0e 100644
--- a/tools/lib/perf/evlist.c
+++ b/tools/lib/perf/evlist.c
@@ -367,21 +367,13 @@ static struct perf_mmap* perf_evlist__alloc_mmap(struct perf_evlist *evlist, boo
return map;
}
-static void perf_evlist__set_sid_idx(struct perf_evlist *evlist,
- struct perf_evsel *evsel, int idx, int cpu,
- int thread)
+static void perf_evsel__set_sid_idx(struct perf_evsel *evsel, int idx, int cpu, int thread)
{
struct perf_sample_id *sid = SID(evsel, cpu, thread);
sid->idx = idx;
- if (evlist->cpus && cpu >= 0)
- sid->cpu = evlist->cpus->map[cpu];
- else
- sid->cpu = -1;
- if (!evsel->system_wide && evlist->threads && thread >= 0)
- sid->tid = perf_thread_map__pid(evlist->threads, thread);
- else
- sid->tid = -1;
+ sid->cpu = perf_cpu_map__cpu(evsel->cpus, cpu);
+ sid->tid = perf_thread_map__pid(evsel->threads, thread);
}
static struct perf_mmap*
@@ -500,8 +492,7 @@ mmap_per_evsel(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops,
if (perf_evlist__id_add_fd(evlist, evsel, cpu, thread,
fd) < 0)
return -1;
- perf_evlist__set_sid_idx(evlist, evsel, idx, cpu,
- thread);
+ perf_evsel__set_sid_idx(evsel, idx, cpu, thread);
}
}
diff --git a/tools/lib/perf/tests/test-cpumap.c b/tools/lib/perf/tests/test-cpumap.c
index c8d45091e7c2..c70e9e03af3e 100644
--- a/tools/lib/perf/tests/test-cpumap.c
+++ b/tools/lib/perf/tests/test-cpumap.c
@@ -27,5 +27,5 @@ int main(int argc, char **argv)
perf_cpu_map__put(cpus);
__T_END;
- return 0;
+ return tests_failed == 0 ? 0 : -1;
}
diff --git a/tools/lib/perf/tests/test-evlist.c b/tools/lib/perf/tests/test-evlist.c
index 6d8ebe0c2504..e2ac0b7f432e 100644
--- a/tools/lib/perf/tests/test-evlist.c
+++ b/tools/lib/perf/tests/test-evlist.c
@@ -208,13 +208,13 @@ static int test_mmap_thread(void)
char path[PATH_MAX];
int id, err, pid, go_pipe[2];
union perf_event *event;
- char bf;
int count = 0;
snprintf(path, PATH_MAX, "%s/kernel/debug/tracing/events/syscalls/sys_enter_prctl/id",
sysfs__mountpoint());
if (filename__read_int(path, &id)) {
+ tests_failed++;
fprintf(stderr, "error: failed to get tracepoint id: %s\n", path);
return -1;
}
@@ -229,6 +229,7 @@ static int test_mmap_thread(void)
pid = fork();
if (!pid) {
int i;
+ char bf;
read(go_pipe[0], &bf, 1);
@@ -266,7 +267,7 @@ static int test_mmap_thread(void)
perf_evlist__enable(evlist);
/* kick the child and wait for it to finish */
- write(go_pipe[1], &bf, 1);
+ write(go_pipe[1], "A", 1);
waitpid(pid, NULL, 0);
/*
@@ -409,5 +410,5 @@ int main(int argc, char **argv)
test_mmap_cpus();
__T_END;
- return 0;
+ return tests_failed == 0 ? 0 : -1;
}
diff --git a/tools/lib/perf/tests/test-evsel.c b/tools/lib/perf/tests/test-evsel.c
index 135722ac965b..0ad82d7a2a51 100644
--- a/tools/lib/perf/tests/test-evsel.c
+++ b/tools/lib/perf/tests/test-evsel.c
@@ -131,5 +131,5 @@ int main(int argc, char **argv)
test_stat_thread_enable();
__T_END;
- return 0;
+ return tests_failed == 0 ? 0 : -1;
}
diff --git a/tools/lib/perf/tests/test-threadmap.c b/tools/lib/perf/tests/test-threadmap.c
index 7dc4d6fbedde..384471441b48 100644
--- a/tools/lib/perf/tests/test-threadmap.c
+++ b/tools/lib/perf/tests/test-threadmap.c
@@ -27,5 +27,5 @@ int main(int argc, char **argv)
perf_thread_map__put(threads);
__T_END;
- return 0;
+ return tests_failed == 0 ? 0 : -1;
}
diff --git a/tools/objtool/check.c b/tools/objtool/check.c
index 5f8d3eed78a1..4bd30315eb62 100644
--- a/tools/objtool/check.c
+++ b/tools/objtool/check.c
@@ -2928,14 +2928,10 @@ int check(struct objtool_file *file)
warnings += ret;
out:
- if (ret < 0) {
- /*
- * Fatal error. The binary is corrupt or otherwise broken in
- * some way, or objtool itself is broken. Fail the kernel
- * build.
- */
- return ret;
- }
-
+ /*
+ * For now, don't fail the kernel build on fatal warnings. These
+ * errors are still fairly common due to the growing matrix of
+ * supported toolchains and their recent pace of change.
+ */
return 0;
}
diff --git a/tools/objtool/elf.c b/tools/objtool/elf.c
index be89c741ba9a..d8421e1d06be 100644
--- a/tools/objtool/elf.c
+++ b/tools/objtool/elf.c
@@ -380,8 +380,11 @@ static int read_symbols(struct elf *elf)
symtab = find_section_by_name(elf, ".symtab");
if (!symtab) {
- WARN("missing symbol table");
- return -1;
+ /*
+ * A missing symbol table is actually possible if it's an empty
+ * .o file. This can happen for thunk_64.o.
+ */
+ return 0;
}
symtab_shndx = find_section_by_name(elf, ".symtab_shndx");
@@ -448,6 +451,13 @@ static int read_symbols(struct elf *elf)
list_add(&sym->list, entry);
elf_hash_add(elf->symbol_hash, &sym->hash, sym->idx);
elf_hash_add(elf->symbol_name_hash, &sym->name_hash, str_hash(sym->name));
+
+ /*
+ * Don't store empty STT_NOTYPE symbols in the rbtree. They
+ * can exist within a function, confusing the sorting.
+ */
+ if (!sym->len)
+ rb_erase(&sym->node, &sym->sec->symbol_tree);
}
if (stats)
diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
index edacfa98d073..42dad4a0f8cf 100644
--- a/tools/perf/builtin-script.c
+++ b/tools/perf/builtin-script.c
@@ -186,6 +186,7 @@ struct output_option {
enum {
OUTPUT_TYPE_SYNTH = PERF_TYPE_MAX,
+ OUTPUT_TYPE_OTHER,
OUTPUT_TYPE_MAX
};
@@ -283,6 +284,18 @@ static struct {
.invalid_fields = PERF_OUTPUT_TRACE | PERF_OUTPUT_BPF_OUTPUT,
},
+
+ [OUTPUT_TYPE_OTHER] = {
+ .user_set = false,
+
+ .fields = PERF_OUTPUT_COMM | PERF_OUTPUT_TID |
+ PERF_OUTPUT_CPU | PERF_OUTPUT_TIME |
+ PERF_OUTPUT_EVNAME | PERF_OUTPUT_IP |
+ PERF_OUTPUT_SYM | PERF_OUTPUT_SYMOFFSET |
+ PERF_OUTPUT_DSO | PERF_OUTPUT_PERIOD,
+
+ .invalid_fields = PERF_OUTPUT_TRACE | PERF_OUTPUT_BPF_OUTPUT,
+ },
};
struct evsel_script {
@@ -343,8 +356,11 @@ static inline int output_type(unsigned int type)
case PERF_TYPE_SYNTH:
return OUTPUT_TYPE_SYNTH;
default:
- return type;
+ if (type < PERF_TYPE_MAX)
+ return type;
}
+
+ return OUTPUT_TYPE_OTHER;
}
static inline unsigned int attr_type(unsigned int type)
diff --git a/tools/perf/examples/bpf/5sec.c b/tools/perf/examples/bpf/5sec.c
index 65c4ff6892d9..e6b6181c6dc6 100644
--- a/tools/perf/examples/bpf/5sec.c
+++ b/tools/perf/examples/bpf/5sec.c
@@ -39,7 +39,7 @@
Copyright (C) 2018 Red Hat, Inc., Arnaldo Carvalho de Melo <[email protected]>
*/
-#include <bpf/bpf.h>
+#include <bpf.h>
#define NSEC_PER_SEC 1000000000L
diff --git a/tools/perf/tests/shell/stat+shadow_stat.sh b/tools/perf/tests/shell/stat+shadow_stat.sh
index 249dfe48cf6a..ebebd3596cf9 100755
--- a/tools/perf/tests/shell/stat+shadow_stat.sh
+++ b/tools/perf/tests/shell/stat+shadow_stat.sh
@@ -9,31 +9,29 @@ perf stat -a true > /dev/null 2>&1 || exit 2
test_global_aggr()
{
- local cyc
-
perf stat -a --no-big-num -e cycles,instructions sleep 1 2>&1 | \
grep -e cycles -e instructions | \
while read num evt hash ipc rest
do
# skip not counted events
- if [[ $num == "<not" ]]; then
+ if [ "$num" = "<not" ]; then
continue
fi
# save cycles count
- if [[ $evt == "cycles" ]]; then
+ if [ "$evt" = "cycles" ]; then
cyc=$num
continue
fi
# skip if no cycles
- if [[ -z $cyc ]]; then
+ if [ -z "$cyc" ]; then
continue
fi
# use printf for rounding and a leading zero
- local res=`printf "%.2f" $(echo "scale=6; $num / $cyc" | bc -q)`
- if [[ $ipc != $res ]]; then
+ res=`printf "%.2f" $(echo "scale=6; $num / $cyc" | bc -q)`
+ if [ "$ipc" != "$res" ]; then
echo "IPC is different: $res != $ipc ($num / $cyc)"
exit 1
fi
@@ -42,32 +40,32 @@ test_global_aggr()
test_no_aggr()
{
- declare -A results
-
perf stat -a -A --no-big-num -e cycles,instructions sleep 1 2>&1 | \
grep ^CPU | \
while read cpu num evt hash ipc rest
do
# skip not counted events
- if [[ $num == "<not" ]]; then
+ if [ "$num" = "<not" ]; then
continue
fi
# save cycles count
- if [[ $evt == "cycles" ]]; then
- results[$cpu]=$num
+ if [ "$evt" = "cycles" ]; then
+ results="$results $cpu:$num"
continue
fi
+ cyc=${results##* $cpu:}
+ cyc=${cyc%% *}
+
# skip if no cycles
- local cyc=${results[$cpu]}
- if [[ -z $cyc ]]; then
+ if [ -z "$cyc" ]; then
continue
fi
# use printf for rounding and a leading zero
- local res=`printf "%.2f" $(echo "scale=6; $num / $cyc" | bc -q)`
- if [[ $ipc != $res ]]; then
+ res=`printf "%.2f" $(echo "scale=6; $num / $cyc" | bc -q)`
+ if [ "$ipc" != "$res" ]; then
echo "IPC is different for $cpu: $res != $ipc ($num / $cyc)"
exit 1
fi
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index 062383e225a3..c4ed3dc2c8f4 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -3323,6 +3323,14 @@ int perf_session__write_header(struct perf_session *session,
attr_offset = lseek(ff.fd, 0, SEEK_CUR);
evlist__for_each_entry(evlist, evsel) {
+ if (evsel->core.attr.size < sizeof(evsel->core.attr)) {
+ /*
+ * We are likely in "perf inject" and have read
+ * from an older file. Update attr size so that
+ * reader gets the right offset to the ids.
+ */
+ evsel->core.attr.size = sizeof(evsel->core.attr);
+ }
f_attr = (struct perf_file_attr){
.attr = evsel->core.attr,
.ids = {
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
index f841f3503cae..1e9d3f982b47 100644
--- a/tools/perf/util/machine.c
+++ b/tools/perf/util/machine.c
@@ -2980,7 +2980,7 @@ int machines__for_each_thread(struct machines *machines,
pid_t machine__get_current_tid(struct machine *machine, int cpu)
{
- int nr_cpus = min(machine->env->nr_cpus_online, MAX_NR_CPUS);
+ int nr_cpus = min(machine->env->nr_cpus_avail, MAX_NR_CPUS);
if (cpu < 0 || cpu >= nr_cpus || !machine->current_tid)
return -1;
@@ -2992,7 +2992,7 @@ int machine__set_current_tid(struct machine *machine, int cpu, pid_t pid,
pid_t tid)
{
struct thread *thread;
- int nr_cpus = min(machine->env->nr_cpus_online, MAX_NR_CPUS);
+ int nr_cpus = min(machine->env->nr_cpus_avail, MAX_NR_CPUS);
if (cpu < 0)
return -EINVAL;
diff --git a/tools/perf/util/metricgroup.c b/tools/perf/util/metricgroup.c
index ee94d3e8dd65..e6d3452031e5 100644
--- a/tools/perf/util/metricgroup.c
+++ b/tools/perf/util/metricgroup.c
@@ -162,6 +162,14 @@ static bool contains_event(struct evsel **metric_events, int num_events,
return false;
}
+static bool evsel_same_pmu(struct evsel *ev1, struct evsel *ev2)
+{
+ if (!ev1->pmu_name || !ev2->pmu_name)
+ return false;
+
+ return !strcmp(ev1->pmu_name, ev2->pmu_name);
+}
+
/**
* Find a group of events in perf_evlist that correspond to those from a parsed
* metric expression. Note, as find_evsel_group is called in the same order as
@@ -280,8 +288,7 @@ static struct evsel *find_evsel_group(struct evlist *perf_evlist,
*/
if (!has_constraint &&
ev->leader != metric_events[i]->leader &&
- !strcmp(ev->leader->pmu_name,
- metric_events[i]->leader->pmu_name))
+ evsel_same_pmu(ev->leader, metric_events[i]->leader))
break;
if (!strcmp(metric_events[i]->name, ev->name)) {
set_bit(ev->idx, evlist_used);
@@ -766,7 +773,6 @@ int __weak arch_get_runtimeparam(struct pmu_event *pe __maybe_unused)
struct metricgroup_add_iter_data {
struct list_head *metric_list;
const char *metric;
- struct metric **m;
struct expr_ids *ids;
int *ret;
bool *has_match;
@@ -1058,12 +1064,13 @@ static int metricgroup__add_metric_sys_event_iter(struct pmu_event *pe,
void *data)
{
struct metricgroup_add_iter_data *d = data;
+ struct metric *m = NULL;
int ret;
if (!match_pe_metric(pe, d->metric))
return 0;
- ret = add_metric(d->metric_list, pe, d->metric_no_group, d->m, NULL, d->ids);
+ ret = add_metric(d->metric_list, pe, d->metric_no_group, &m, NULL, d->ids);
if (ret)
return ret;
@@ -1114,7 +1121,6 @@ static int metricgroup__add_metric(const char *metric, bool metric_no_group,
.metric_list = &list,
.metric = metric,
.metric_no_group = metric_no_group,
- .m = &m,
.ids = &ids,
.has_match = &has_match,
.ret = &ret,
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index 50ff9795a4f1..25adbcce0281 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -2404,7 +2404,7 @@ int perf_session__cpu_bitmap(struct perf_session *session,
{
int i, err = -1;
struct perf_cpu_map *map;
- int nr_cpus = min(session->header.env.nr_cpus_online, MAX_NR_CPUS);
+ int nr_cpus = min(session->header.env.nr_cpus_avail, MAX_NR_CPUS);
for (i = 0; i < PERF_TYPE_MAX; ++i) {
struct evsel *evsel;
diff --git a/tools/perf/util/stat-shadow.c b/tools/perf/util/stat-shadow.c
index 901265127e36..12eafd12a693 100644
--- a/tools/perf/util/stat-shadow.c
+++ b/tools/perf/util/stat-shadow.c
@@ -8,6 +8,7 @@
#include "evlist.h"
#include "expr.h"
#include "metricgroup.h"
+#include "cgroup.h"
#include <linux/zalloc.h>
/*
@@ -28,6 +29,7 @@ struct saved_value {
enum stat_type type;
int ctx;
int cpu;
+ struct cgroup *cgrp;
struct runtime_stat *stat;
struct stats stats;
u64 metric_total;
@@ -57,6 +59,9 @@ static int saved_value_cmp(struct rb_node *rb_node, const void *entry)
if (a->ctx != b->ctx)
return a->ctx - b->ctx;
+ if (a->cgrp != b->cgrp)
+ return (char *)a->cgrp < (char *)b->cgrp ? -1 : +1;
+
if (a->evsel == NULL && b->evsel == NULL) {
if (a->stat == b->stat)
return 0;
@@ -100,7 +105,8 @@ static struct saved_value *saved_value_lookup(struct evsel *evsel,
bool create,
enum stat_type type,
int ctx,
- struct runtime_stat *st)
+ struct runtime_stat *st,
+ struct cgroup *cgrp)
{
struct rblist *rblist;
struct rb_node *nd;
@@ -110,10 +116,15 @@ static struct saved_value *saved_value_lookup(struct evsel *evsel,
.type = type,
.ctx = ctx,
.stat = st,
+ .cgrp = cgrp,
};
rblist = &st->value_list;
+ /* don't use context info for clock events */
+ if (type == STAT_NSECS)
+ dm.ctx = 0;
+
nd = rblist__find(rblist, &dm);
if (nd)
return container_of(nd, struct saved_value, rb_node);
@@ -191,12 +202,18 @@ void perf_stat__reset_shadow_per_stat(struct runtime_stat *st)
reset_stat(st);
}
+struct runtime_stat_data {
+ int ctx;
+ struct cgroup *cgrp;
+};
+
static void update_runtime_stat(struct runtime_stat *st,
enum stat_type type,
- int ctx, int cpu, u64 count)
+ int cpu, u64 count,
+ struct runtime_stat_data *rsd)
{
- struct saved_value *v = saved_value_lookup(NULL, cpu, true,
- type, ctx, st);
+ struct saved_value *v = saved_value_lookup(NULL, cpu, true, type,
+ rsd->ctx, st, rsd->cgrp);
if (v)
update_stats(&v->stats, count);
@@ -210,82 +227,86 @@ static void update_runtime_stat(struct runtime_stat *st,
void perf_stat__update_shadow_stats(struct evsel *counter, u64 count,
int cpu, struct runtime_stat *st)
{
- int ctx = evsel_context(counter);
u64 count_ns = count;
struct saved_value *v;
+ struct runtime_stat_data rsd = {
+ .ctx = evsel_context(counter),
+ .cgrp = counter->cgrp,
+ };
count *= counter->scale;
if (evsel__is_clock(counter))
- update_runtime_stat(st, STAT_NSECS, 0, cpu, count_ns);
+ update_runtime_stat(st, STAT_NSECS, cpu, count_ns, &rsd);
else if (evsel__match(counter, HARDWARE, HW_CPU_CYCLES))
- update_runtime_stat(st, STAT_CYCLES, ctx, cpu, count);
+ update_runtime_stat(st, STAT_CYCLES, cpu, count, &rsd);
else if (perf_stat_evsel__is(counter, CYCLES_IN_TX))
- update_runtime_stat(st, STAT_CYCLES_IN_TX, ctx, cpu, count);
+ update_runtime_stat(st, STAT_CYCLES_IN_TX, cpu, count, &rsd);
else if (perf_stat_evsel__is(counter, TRANSACTION_START))
- update_runtime_stat(st, STAT_TRANSACTION, ctx, cpu, count);
+ update_runtime_stat(st, STAT_TRANSACTION, cpu, count, &rsd);
else if (perf_stat_evsel__is(counter, ELISION_START))
- update_runtime_stat(st, STAT_ELISION, ctx, cpu, count);
+ update_runtime_stat(st, STAT_ELISION, cpu, count, &rsd);
else if (perf_stat_evsel__is(counter, TOPDOWN_TOTAL_SLOTS))
update_runtime_stat(st, STAT_TOPDOWN_TOTAL_SLOTS,
- ctx, cpu, count);
+ cpu, count, &rsd);
else if (perf_stat_evsel__is(counter, TOPDOWN_SLOTS_ISSUED))
update_runtime_stat(st, STAT_TOPDOWN_SLOTS_ISSUED,
- ctx, cpu, count);
+ cpu, count, &rsd);
else if (perf_stat_evsel__is(counter, TOPDOWN_SLOTS_RETIRED))
update_runtime_stat(st, STAT_TOPDOWN_SLOTS_RETIRED,
- ctx, cpu, count);
+ cpu, count, &rsd);
else if (perf_stat_evsel__is(counter, TOPDOWN_FETCH_BUBBLES))
update_runtime_stat(st, STAT_TOPDOWN_FETCH_BUBBLES,
- ctx, cpu, count);
+ cpu, count, &rsd);
else if (perf_stat_evsel__is(counter, TOPDOWN_RECOVERY_BUBBLES))
update_runtime_stat(st, STAT_TOPDOWN_RECOVERY_BUBBLES,
- ctx, cpu, count);
+ cpu, count, &rsd);
else if (perf_stat_evsel__is(counter, TOPDOWN_RETIRING))
update_runtime_stat(st, STAT_TOPDOWN_RETIRING,
- ctx, cpu, count);
+ cpu, count, &rsd);
else if (perf_stat_evsel__is(counter, TOPDOWN_BAD_SPEC))
update_runtime_stat(st, STAT_TOPDOWN_BAD_SPEC,
- ctx, cpu, count);
+ cpu, count, &rsd);
else if (perf_stat_evsel__is(counter, TOPDOWN_FE_BOUND))
update_runtime_stat(st, STAT_TOPDOWN_FE_BOUND,
- ctx, cpu, count);
+ cpu, count, &rsd);
else if (perf_stat_evsel__is(counter, TOPDOWN_BE_BOUND))
update_runtime_stat(st, STAT_TOPDOWN_BE_BOUND,
- ctx, cpu, count);
+ cpu, count, &rsd);
else if (evsel__match(counter, HARDWARE, HW_STALLED_CYCLES_FRONTEND))
update_runtime_stat(st, STAT_STALLED_CYCLES_FRONT,
- ctx, cpu, count);
+ cpu, count, &rsd);
else if (evsel__match(counter, HARDWARE, HW_STALLED_CYCLES_BACKEND))
update_runtime_stat(st, STAT_STALLED_CYCLES_BACK,
- ctx, cpu, count);
+ cpu, count, &rsd);
else if (evsel__match(counter, HARDWARE, HW_BRANCH_INSTRUCTIONS))
- update_runtime_stat(st, STAT_BRANCHES, ctx, cpu, count);
+ update_runtime_stat(st, STAT_BRANCHES, cpu, count, &rsd);
else if (evsel__match(counter, HARDWARE, HW_CACHE_REFERENCES))
- update_runtime_stat(st, STAT_CACHEREFS, ctx, cpu, count);
+ update_runtime_stat(st, STAT_CACHEREFS, cpu, count, &rsd);
else if (evsel__match(counter, HW_CACHE, HW_CACHE_L1D))
- update_runtime_stat(st, STAT_L1_DCACHE, ctx, cpu, count);
+ update_runtime_stat(st, STAT_L1_DCACHE, cpu, count, &rsd);
else if (evsel__match(counter, HW_CACHE, HW_CACHE_L1I))
- update_runtime_stat(st, STAT_L1_ICACHE, ctx, cpu, count);
+ update_runtime_stat(st, STAT_L1_ICACHE, cpu, count, &rsd);
else if (evsel__match(counter, HW_CACHE, HW_CACHE_LL))
- update_runtime_stat(st, STAT_LL_CACHE, ctx, cpu, count);
+ update_runtime_stat(st, STAT_LL_CACHE, cpu, count, &rsd);
else if (evsel__match(counter, HW_CACHE, HW_CACHE_DTLB))
- update_runtime_stat(st, STAT_DTLB_CACHE, ctx, cpu, count);
+ update_runtime_stat(st, STAT_DTLB_CACHE, cpu, count, &rsd);
else if (evsel__match(counter, HW_CACHE, HW_CACHE_ITLB))
- update_runtime_stat(st, STAT_ITLB_CACHE, ctx, cpu, count);
+ update_runtime_stat(st, STAT_ITLB_CACHE, cpu, count, &rsd);
else if (perf_stat_evsel__is(counter, SMI_NUM))
- update_runtime_stat(st, STAT_SMI_NUM, ctx, cpu, count);
+ update_runtime_stat(st, STAT_SMI_NUM, cpu, count, &rsd);
else if (perf_stat_evsel__is(counter, APERF))
- update_runtime_stat(st, STAT_APERF, ctx, cpu, count);
+ update_runtime_stat(st, STAT_APERF, cpu, count, &rsd);
if (counter->collect_stat) {
- v = saved_value_lookup(counter, cpu, true, STAT_NONE, 0, st);
+ v = saved_value_lookup(counter, cpu, true, STAT_NONE, 0, st,
+ rsd.cgrp);
update_stats(&v->stats, count);
if (counter->metric_leader)
v->metric_total += count;
} else if (counter->metric_leader) {
v = saved_value_lookup(counter->metric_leader,
- cpu, true, STAT_NONE, 0, st);
+ cpu, true, STAT_NONE, 0, st, rsd.cgrp);
v->metric_total += count;
v->metric_other++;
}
@@ -422,11 +443,12 @@ void perf_stat__collect_metric_expr(struct evlist *evsel_list)
}
static double runtime_stat_avg(struct runtime_stat *st,
- enum stat_type type, int ctx, int cpu)
+ enum stat_type type, int cpu,
+ struct runtime_stat_data *rsd)
{
struct saved_value *v;
- v = saved_value_lookup(NULL, cpu, false, type, ctx, st);
+ v = saved_value_lookup(NULL, cpu, false, type, rsd->ctx, st, rsd->cgrp);
if (!v)
return 0.0;
@@ -434,11 +456,12 @@ static double runtime_stat_avg(struct runtime_stat *st,
}
static double runtime_stat_n(struct runtime_stat *st,
- enum stat_type type, int ctx, int cpu)
+ enum stat_type type, int cpu,
+ struct runtime_stat_data *rsd)
{
struct saved_value *v;
- v = saved_value_lookup(NULL, cpu, false, type, ctx, st);
+ v = saved_value_lookup(NULL, cpu, false, type, rsd->ctx, st, rsd->cgrp);
if (!v)
return 0.0;
@@ -446,16 +469,15 @@ static double runtime_stat_n(struct runtime_stat *st,
}
static void print_stalled_cycles_frontend(struct perf_stat_config *config,
- int cpu,
- struct evsel *evsel, double avg,
+ int cpu, double avg,
struct perf_stat_output_ctx *out,
- struct runtime_stat *st)
+ struct runtime_stat *st,
+ struct runtime_stat_data *rsd)
{
double total, ratio = 0.0;
const char *color;
- int ctx = evsel_context(evsel);
- total = runtime_stat_avg(st, STAT_CYCLES, ctx, cpu);
+ total = runtime_stat_avg(st, STAT_CYCLES, cpu, rsd);
if (total)
ratio = avg / total * 100.0;
@@ -470,16 +492,15 @@ static void print_stalled_cycles_frontend(struct perf_stat_config *config,
}
static void print_stalled_cycles_backend(struct perf_stat_config *config,
- int cpu,
- struct evsel *evsel, double avg,
+ int cpu, double avg,
struct perf_stat_output_ctx *out,
- struct runtime_stat *st)
+ struct runtime_stat *st,
+ struct runtime_stat_data *rsd)
{
double total, ratio = 0.0;
const char *color;
- int ctx = evsel_context(evsel);
- total = runtime_stat_avg(st, STAT_CYCLES, ctx, cpu);
+ total = runtime_stat_avg(st, STAT_CYCLES, cpu, rsd);
if (total)
ratio = avg / total * 100.0;
@@ -490,17 +511,15 @@ static void print_stalled_cycles_backend(struct perf_stat_config *config,
}
static void print_branch_misses(struct perf_stat_config *config,
- int cpu,
- struct evsel *evsel,
- double avg,
+ int cpu, double avg,
struct perf_stat_output_ctx *out,
- struct runtime_stat *st)
+ struct runtime_stat *st,
+ struct runtime_stat_data *rsd)
{
double total, ratio = 0.0;
const char *color;
- int ctx = evsel_context(evsel);
- total = runtime_stat_avg(st, STAT_BRANCHES, ctx, cpu);
+ total = runtime_stat_avg(st, STAT_BRANCHES, cpu, rsd);
if (total)
ratio = avg / total * 100.0;
@@ -511,18 +530,15 @@ static void print_branch_misses(struct perf_stat_config *config,
}
static void print_l1_dcache_misses(struct perf_stat_config *config,
- int cpu,
- struct evsel *evsel,
- double avg,
+ int cpu, double avg,
struct perf_stat_output_ctx *out,
- struct runtime_stat *st)
-
+ struct runtime_stat *st,
+ struct runtime_stat_data *rsd)
{
double total, ratio = 0.0;
const char *color;
- int ctx = evsel_context(evsel);
- total = runtime_stat_avg(st, STAT_L1_DCACHE, ctx, cpu);
+ total = runtime_stat_avg(st, STAT_L1_DCACHE, cpu, rsd);
if (total)
ratio = avg / total * 100.0;
@@ -533,18 +549,15 @@ static void print_l1_dcache_misses(struct perf_stat_config *config,
}
static void print_l1_icache_misses(struct perf_stat_config *config,
- int cpu,
- struct evsel *evsel,
- double avg,
+ int cpu, double avg,
struct perf_stat_output_ctx *out,
- struct runtime_stat *st)
-
+ struct runtime_stat *st,
+ struct runtime_stat_data *rsd)
{
double total, ratio = 0.0;
const char *color;
- int ctx = evsel_context(evsel);
- total = runtime_stat_avg(st, STAT_L1_ICACHE, ctx, cpu);
+ total = runtime_stat_avg(st, STAT_L1_ICACHE, cpu, rsd);
if (total)
ratio = avg / total * 100.0;
@@ -554,17 +567,15 @@ static void print_l1_icache_misses(struct perf_stat_config *config,
}
static void print_dtlb_cache_misses(struct perf_stat_config *config,
- int cpu,
- struct evsel *evsel,
- double avg,
+ int cpu, double avg,
struct perf_stat_output_ctx *out,
- struct runtime_stat *st)
+ struct runtime_stat *st,
+ struct runtime_stat_data *rsd)
{
double total, ratio = 0.0;
const char *color;
- int ctx = evsel_context(evsel);
- total = runtime_stat_avg(st, STAT_DTLB_CACHE, ctx, cpu);
+ total = runtime_stat_avg(st, STAT_DTLB_CACHE, cpu, rsd);
if (total)
ratio = avg / total * 100.0;
@@ -574,17 +585,15 @@ static void print_dtlb_cache_misses(struct perf_stat_config *config,
}
static void print_itlb_cache_misses(struct perf_stat_config *config,
- int cpu,
- struct evsel *evsel,
- double avg,
+ int cpu, double avg,
struct perf_stat_output_ctx *out,
- struct runtime_stat *st)
+ struct runtime_stat *st,
+ struct runtime_stat_data *rsd)
{
double total, ratio = 0.0;
const char *color;
- int ctx = evsel_context(evsel);
- total = runtime_stat_avg(st, STAT_ITLB_CACHE, ctx, cpu);
+ total = runtime_stat_avg(st, STAT_ITLB_CACHE, cpu, rsd);
if (total)
ratio = avg / total * 100.0;
@@ -594,17 +603,15 @@ static void print_itlb_cache_misses(struct perf_stat_config *config,
}
static void print_ll_cache_misses(struct perf_stat_config *config,
- int cpu,
- struct evsel *evsel,
- double avg,
+ int cpu, double avg,
struct perf_stat_output_ctx *out,
- struct runtime_stat *st)
+ struct runtime_stat *st,
+ struct runtime_stat_data *rsd)
{
double total, ratio = 0.0;
const char *color;
- int ctx = evsel_context(evsel);
- total = runtime_stat_avg(st, STAT_LL_CACHE, ctx, cpu);
+ total = runtime_stat_avg(st, STAT_LL_CACHE, cpu, rsd);
if (total)
ratio = avg / total * 100.0;
@@ -662,56 +669,61 @@ static double sanitize_val(double x)
return x;
}
-static double td_total_slots(int ctx, int cpu, struct runtime_stat *st)
+static double td_total_slots(int cpu, struct runtime_stat *st,
+ struct runtime_stat_data *rsd)
{
- return runtime_stat_avg(st, STAT_TOPDOWN_TOTAL_SLOTS, ctx, cpu);
+ return runtime_stat_avg(st, STAT_TOPDOWN_TOTAL_SLOTS, cpu, rsd);
}
-static double td_bad_spec(int ctx, int cpu, struct runtime_stat *st)
+static double td_bad_spec(int cpu, struct runtime_stat *st,
+ struct runtime_stat_data *rsd)
{
double bad_spec = 0;
double total_slots;
double total;
- total = runtime_stat_avg(st, STAT_TOPDOWN_SLOTS_ISSUED, ctx, cpu) -
- runtime_stat_avg(st, STAT_TOPDOWN_SLOTS_RETIRED, ctx, cpu) +
- runtime_stat_avg(st, STAT_TOPDOWN_RECOVERY_BUBBLES, ctx, cpu);
+ total = runtime_stat_avg(st, STAT_TOPDOWN_SLOTS_ISSUED, cpu, rsd) -
+ runtime_stat_avg(st, STAT_TOPDOWN_SLOTS_RETIRED, cpu, rsd) +
+ runtime_stat_avg(st, STAT_TOPDOWN_RECOVERY_BUBBLES, cpu, rsd);
- total_slots = td_total_slots(ctx, cpu, st);
+ total_slots = td_total_slots(cpu, st, rsd);
if (total_slots)
bad_spec = total / total_slots;
return sanitize_val(bad_spec);
}
-static double td_retiring(int ctx, int cpu, struct runtime_stat *st)
+static double td_retiring(int cpu, struct runtime_stat *st,
+ struct runtime_stat_data *rsd)
{
double retiring = 0;
- double total_slots = td_total_slots(ctx, cpu, st);
+ double total_slots = td_total_slots(cpu, st, rsd);
double ret_slots = runtime_stat_avg(st, STAT_TOPDOWN_SLOTS_RETIRED,
- ctx, cpu);
+ cpu, rsd);
if (total_slots)
retiring = ret_slots / total_slots;
return retiring;
}
-static double td_fe_bound(int ctx, int cpu, struct runtime_stat *st)
+static double td_fe_bound(int cpu, struct runtime_stat *st,
+ struct runtime_stat_data *rsd)
{
double fe_bound = 0;
- double total_slots = td_total_slots(ctx, cpu, st);
+ double total_slots = td_total_slots(cpu, st, rsd);
double fetch_bub = runtime_stat_avg(st, STAT_TOPDOWN_FETCH_BUBBLES,
- ctx, cpu);
+ cpu, rsd);
if (total_slots)
fe_bound = fetch_bub / total_slots;
return fe_bound;
}
-static double td_be_bound(int ctx, int cpu, struct runtime_stat *st)
+static double td_be_bound(int cpu, struct runtime_stat *st,
+ struct runtime_stat_data *rsd)
{
- double sum = (td_fe_bound(ctx, cpu, st) +
- td_bad_spec(ctx, cpu, st) +
- td_retiring(ctx, cpu, st));
+ double sum = (td_fe_bound(cpu, st, rsd) +
+ td_bad_spec(cpu, st, rsd) +
+ td_retiring(cpu, st, rsd));
if (sum == 0)
return 0;
return sanitize_val(1.0 - sum);
@@ -722,15 +734,15 @@ static double td_be_bound(int ctx, int cpu, struct runtime_stat *st)
* the ratios we need to recreate the sum.
*/
-static double td_metric_ratio(int ctx, int cpu,
- enum stat_type type,
- struct runtime_stat *stat)
+static double td_metric_ratio(int cpu, enum stat_type type,
+ struct runtime_stat *stat,
+ struct runtime_stat_data *rsd)
{
- double sum = runtime_stat_avg(stat, STAT_TOPDOWN_RETIRING, ctx, cpu) +
- runtime_stat_avg(stat, STAT_TOPDOWN_FE_BOUND, ctx, cpu) +
- runtime_stat_avg(stat, STAT_TOPDOWN_BE_BOUND, ctx, cpu) +
- runtime_stat_avg(stat, STAT_TOPDOWN_BAD_SPEC, ctx, cpu);
- double d = runtime_stat_avg(stat, type, ctx, cpu);
+ double sum = runtime_stat_avg(stat, STAT_TOPDOWN_RETIRING, cpu, rsd) +
+ runtime_stat_avg(stat, STAT_TOPDOWN_FE_BOUND, cpu, rsd) +
+ runtime_stat_avg(stat, STAT_TOPDOWN_BE_BOUND, cpu, rsd) +
+ runtime_stat_avg(stat, STAT_TOPDOWN_BAD_SPEC, cpu, rsd);
+ double d = runtime_stat_avg(stat, type, cpu, rsd);
if (sum)
return d / sum;
@@ -742,34 +754,33 @@ static double td_metric_ratio(int ctx, int cpu,
* We allow two missing.
*/
-static bool full_td(int ctx, int cpu,
- struct runtime_stat *stat)
+static bool full_td(int cpu, struct runtime_stat *stat,
+ struct runtime_stat_data *rsd)
{
int c = 0;
- if (runtime_stat_avg(stat, STAT_TOPDOWN_RETIRING, ctx, cpu) > 0)
+ if (runtime_stat_avg(stat, STAT_TOPDOWN_RETIRING, cpu, rsd) > 0)
c++;
- if (runtime_stat_avg(stat, STAT_TOPDOWN_BE_BOUND, ctx, cpu) > 0)
+ if (runtime_stat_avg(stat, STAT_TOPDOWN_BE_BOUND, cpu, rsd) > 0)
c++;
- if (runtime_stat_avg(stat, STAT_TOPDOWN_FE_BOUND, ctx, cpu) > 0)
+ if (runtime_stat_avg(stat, STAT_TOPDOWN_FE_BOUND, cpu, rsd) > 0)
c++;
- if (runtime_stat_avg(stat, STAT_TOPDOWN_BAD_SPEC, ctx, cpu) > 0)
+ if (runtime_stat_avg(stat, STAT_TOPDOWN_BAD_SPEC, cpu, rsd) > 0)
c++;
return c >= 2;
}
-static void print_smi_cost(struct perf_stat_config *config,
- int cpu, struct evsel *evsel,
+static void print_smi_cost(struct perf_stat_config *config, int cpu,
struct perf_stat_output_ctx *out,
- struct runtime_stat *st)
+ struct runtime_stat *st,
+ struct runtime_stat_data *rsd)
{
double smi_num, aperf, cycles, cost = 0.0;
- int ctx = evsel_context(evsel);
const char *color = NULL;
- smi_num = runtime_stat_avg(st, STAT_SMI_NUM, ctx, cpu);
- aperf = runtime_stat_avg(st, STAT_APERF, ctx, cpu);
- cycles = runtime_stat_avg(st, STAT_CYCLES, ctx, cpu);
+ smi_num = runtime_stat_avg(st, STAT_SMI_NUM, cpu, rsd);
+ aperf = runtime_stat_avg(st, STAT_APERF, cpu, rsd);
+ cycles = runtime_stat_avg(st, STAT_CYCLES, cpu, rsd);
if ((cycles == 0) || (aperf == 0))
return;
@@ -804,7 +815,8 @@ static int prepare_metric(struct evsel **metric_events,
scale = 1e-9;
} else {
v = saved_value_lookup(metric_events[i], cpu, false,
- STAT_NONE, 0, st);
+ STAT_NONE, 0, st,
+ metric_events[i]->cgrp);
if (!v)
break;
stats = &v->stats;
@@ -930,12 +942,15 @@ void perf_stat__print_shadow_stats(struct perf_stat_config *config,
print_metric_t print_metric = out->print_metric;
double total, ratio = 0.0, total2;
const char *color = NULL;
- int ctx = evsel_context(evsel);
+ struct runtime_stat_data rsd = {
+ .ctx = evsel_context(evsel),
+ .cgrp = evsel->cgrp,
+ };
struct metric_event *me;
int num = 1;
if (evsel__match(evsel, HARDWARE, HW_INSTRUCTIONS)) {
- total = runtime_stat_avg(st, STAT_CYCLES, ctx, cpu);
+ total = runtime_stat_avg(st, STAT_CYCLES, cpu, &rsd);
if (total) {
ratio = avg / total;
@@ -945,12 +960,11 @@ void perf_stat__print_shadow_stats(struct perf_stat_config *config,
print_metric(config, ctxp, NULL, NULL, "insn per cycle", 0);
}
- total = runtime_stat_avg(st, STAT_STALLED_CYCLES_FRONT,
- ctx, cpu);
+ total = runtime_stat_avg(st, STAT_STALLED_CYCLES_FRONT, cpu, &rsd);
total = max(total, runtime_stat_avg(st,
STAT_STALLED_CYCLES_BACK,
- ctx, cpu));
+ cpu, &rsd));
if (total && avg) {
out->new_line(config, ctxp);
@@ -960,8 +974,8 @@ void perf_stat__print_shadow_stats(struct perf_stat_config *config,
ratio);
}
} else if (evsel__match(evsel, HARDWARE, HW_BRANCH_MISSES)) {
- if (runtime_stat_n(st, STAT_BRANCHES, ctx, cpu) != 0)
- print_branch_misses(config, cpu, evsel, avg, out, st);
+ if (runtime_stat_n(st, STAT_BRANCHES, cpu, &rsd) != 0)
+ print_branch_misses(config, cpu, avg, out, st, &rsd);
else
print_metric(config, ctxp, NULL, NULL, "of all branches", 0);
} else if (
@@ -970,8 +984,8 @@ void perf_stat__print_shadow_stats(struct perf_stat_config *config,
((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16))) {
- if (runtime_stat_n(st, STAT_L1_DCACHE, ctx, cpu) != 0)
- print_l1_dcache_misses(config, cpu, evsel, avg, out, st);
+ if (runtime_stat_n(st, STAT_L1_DCACHE, cpu, &rsd) != 0)
+ print_l1_dcache_misses(config, cpu, avg, out, st, &rsd);
else
print_metric(config, ctxp, NULL, NULL, "of all L1-dcache accesses", 0);
} else if (
@@ -980,8 +994,8 @@ void perf_stat__print_shadow_stats(struct perf_stat_config *config,
((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16))) {
- if (runtime_stat_n(st, STAT_L1_ICACHE, ctx, cpu) != 0)
- print_l1_icache_misses(config, cpu, evsel, avg, out, st);
+ if (runtime_stat_n(st, STAT_L1_ICACHE, cpu, &rsd) != 0)
+ print_l1_icache_misses(config, cpu, avg, out, st, &rsd);
else
print_metric(config, ctxp, NULL, NULL, "of all L1-icache accesses", 0);
} else if (
@@ -990,8 +1004,8 @@ void perf_stat__print_shadow_stats(struct perf_stat_config *config,
((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16))) {
- if (runtime_stat_n(st, STAT_DTLB_CACHE, ctx, cpu) != 0)
- print_dtlb_cache_misses(config, cpu, evsel, avg, out, st);
+ if (runtime_stat_n(st, STAT_DTLB_CACHE, cpu, &rsd) != 0)
+ print_dtlb_cache_misses(config, cpu, avg, out, st, &rsd);
else
print_metric(config, ctxp, NULL, NULL, "of all dTLB cache accesses", 0);
} else if (
@@ -1000,8 +1014,8 @@ void perf_stat__print_shadow_stats(struct perf_stat_config *config,
((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16))) {
- if (runtime_stat_n(st, STAT_ITLB_CACHE, ctx, cpu) != 0)
- print_itlb_cache_misses(config, cpu, evsel, avg, out, st);
+ if (runtime_stat_n(st, STAT_ITLB_CACHE, cpu, &rsd) != 0)
+ print_itlb_cache_misses(config, cpu, avg, out, st, &rsd);
else
print_metric(config, ctxp, NULL, NULL, "of all iTLB cache accesses", 0);
} else if (
@@ -1010,27 +1024,27 @@ void perf_stat__print_shadow_stats(struct perf_stat_config *config,
((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16))) {
- if (runtime_stat_n(st, STAT_LL_CACHE, ctx, cpu) != 0)
- print_ll_cache_misses(config, cpu, evsel, avg, out, st);
+ if (runtime_stat_n(st, STAT_LL_CACHE, cpu, &rsd) != 0)
+ print_ll_cache_misses(config, cpu, avg, out, st, &rsd);
else
print_metric(config, ctxp, NULL, NULL, "of all LL-cache accesses", 0);
} else if (evsel__match(evsel, HARDWARE, HW_CACHE_MISSES)) {
- total = runtime_stat_avg(st, STAT_CACHEREFS, ctx, cpu);
+ total = runtime_stat_avg(st, STAT_CACHEREFS, cpu, &rsd);
if (total)
ratio = avg * 100 / total;
- if (runtime_stat_n(st, STAT_CACHEREFS, ctx, cpu) != 0)
+ if (runtime_stat_n(st, STAT_CACHEREFS, cpu, &rsd) != 0)
print_metric(config, ctxp, NULL, "%8.3f %%",
"of all cache refs", ratio);
else
print_metric(config, ctxp, NULL, NULL, "of all cache refs", 0);
} else if (evsel__match(evsel, HARDWARE, HW_STALLED_CYCLES_FRONTEND)) {
- print_stalled_cycles_frontend(config, cpu, evsel, avg, out, st);
+ print_stalled_cycles_frontend(config, cpu, avg, out, st, &rsd);
} else if (evsel__match(evsel, HARDWARE, HW_STALLED_CYCLES_BACKEND)) {
- print_stalled_cycles_backend(config, cpu, evsel, avg, out, st);
+ print_stalled_cycles_backend(config, cpu, avg, out, st, &rsd);
} else if (evsel__match(evsel, HARDWARE, HW_CPU_CYCLES)) {
- total = runtime_stat_avg(st, STAT_NSECS, 0, cpu);
+ total = runtime_stat_avg(st, STAT_NSECS, cpu, &rsd);
if (total) {
ratio = avg / total;
@@ -1039,7 +1053,7 @@ void perf_stat__print_shadow_stats(struct perf_stat_config *config,
print_metric(config, ctxp, NULL, NULL, "Ghz", 0);
}
} else if (perf_stat_evsel__is(evsel, CYCLES_IN_TX)) {
- total = runtime_stat_avg(st, STAT_CYCLES, ctx, cpu);
+ total = runtime_stat_avg(st, STAT_CYCLES, cpu, &rsd);
if (total)
print_metric(config, ctxp, NULL,
@@ -1049,8 +1063,8 @@ void perf_stat__print_shadow_stats(struct perf_stat_config *config,
print_metric(config, ctxp, NULL, NULL, "transactional cycles",
0);
} else if (perf_stat_evsel__is(evsel, CYCLES_IN_TX_CP)) {
- total = runtime_stat_avg(st, STAT_CYCLES, ctx, cpu);
- total2 = runtime_stat_avg(st, STAT_CYCLES_IN_TX, ctx, cpu);
+ total = runtime_stat_avg(st, STAT_CYCLES, cpu, &rsd);
+ total2 = runtime_stat_avg(st, STAT_CYCLES_IN_TX, cpu, &rsd);
if (total2 < avg)
total2 = avg;
@@ -1060,21 +1074,19 @@ void perf_stat__print_shadow_stats(struct perf_stat_config *config,
else
print_metric(config, ctxp, NULL, NULL, "aborted cycles", 0);
} else if (perf_stat_evsel__is(evsel, TRANSACTION_START)) {
- total = runtime_stat_avg(st, STAT_CYCLES_IN_TX,
- ctx, cpu);
+ total = runtime_stat_avg(st, STAT_CYCLES_IN_TX, cpu, &rsd);
if (avg)
ratio = total / avg;
- if (runtime_stat_n(st, STAT_CYCLES_IN_TX, ctx, cpu) != 0)
+ if (runtime_stat_n(st, STAT_CYCLES_IN_TX, cpu, &rsd) != 0)
print_metric(config, ctxp, NULL, "%8.0f",
"cycles / transaction", ratio);
else
print_metric(config, ctxp, NULL, NULL, "cycles / transaction",
0);
} else if (perf_stat_evsel__is(evsel, ELISION_START)) {
- total = runtime_stat_avg(st, STAT_CYCLES_IN_TX,
- ctx, cpu);
+ total = runtime_stat_avg(st, STAT_CYCLES_IN_TX, cpu, &rsd);
if (avg)
ratio = total / avg;
@@ -1087,28 +1099,28 @@ void perf_stat__print_shadow_stats(struct perf_stat_config *config,
else
print_metric(config, ctxp, NULL, NULL, "CPUs utilized", 0);
} else if (perf_stat_evsel__is(evsel, TOPDOWN_FETCH_BUBBLES)) {
- double fe_bound = td_fe_bound(ctx, cpu, st);
+ double fe_bound = td_fe_bound(cpu, st, &rsd);
if (fe_bound > 0.2)
color = PERF_COLOR_RED;
print_metric(config, ctxp, color, "%8.1f%%", "frontend bound",
fe_bound * 100.);
} else if (perf_stat_evsel__is(evsel, TOPDOWN_SLOTS_RETIRED)) {
- double retiring = td_retiring(ctx, cpu, st);
+ double retiring = td_retiring(cpu, st, &rsd);
if (retiring > 0.7)
color = PERF_COLOR_GREEN;
print_metric(config, ctxp, color, "%8.1f%%", "retiring",
retiring * 100.);
} else if (perf_stat_evsel__is(evsel, TOPDOWN_RECOVERY_BUBBLES)) {
- double bad_spec = td_bad_spec(ctx, cpu, st);
+ double bad_spec = td_bad_spec(cpu, st, &rsd);
if (bad_spec > 0.1)
color = PERF_COLOR_RED;
print_metric(config, ctxp, color, "%8.1f%%", "bad speculation",
bad_spec * 100.);
} else if (perf_stat_evsel__is(evsel, TOPDOWN_SLOTS_ISSUED)) {
- double be_bound = td_be_bound(ctx, cpu, st);
+ double be_bound = td_be_bound(cpu, st, &rsd);
const char *name = "backend bound";
static int have_recovery_bubbles = -1;
@@ -1121,43 +1133,43 @@ void perf_stat__print_shadow_stats(struct perf_stat_config *config,
if (be_bound > 0.2)
color = PERF_COLOR_RED;
- if (td_total_slots(ctx, cpu, st) > 0)
+ if (td_total_slots(cpu, st, &rsd) > 0)
print_metric(config, ctxp, color, "%8.1f%%", name,
be_bound * 100.);
else
print_metric(config, ctxp, NULL, NULL, name, 0);
} else if (perf_stat_evsel__is(evsel, TOPDOWN_RETIRING) &&
- full_td(ctx, cpu, st)) {
- double retiring = td_metric_ratio(ctx, cpu,
- STAT_TOPDOWN_RETIRING, st);
-
+ full_td(cpu, st, &rsd)) {
+ double retiring = td_metric_ratio(cpu,
+ STAT_TOPDOWN_RETIRING, st,
+ &rsd);
if (retiring > 0.7)
color = PERF_COLOR_GREEN;
print_metric(config, ctxp, color, "%8.1f%%", "retiring",
retiring * 100.);
} else if (perf_stat_evsel__is(evsel, TOPDOWN_FE_BOUND) &&
- full_td(ctx, cpu, st)) {
- double fe_bound = td_metric_ratio(ctx, cpu,
- STAT_TOPDOWN_FE_BOUND, st);
-
+ full_td(cpu, st, &rsd)) {
+ double fe_bound = td_metric_ratio(cpu,
+ STAT_TOPDOWN_FE_BOUND, st,
+ &rsd);
if (fe_bound > 0.2)
color = PERF_COLOR_RED;
print_metric(config, ctxp, color, "%8.1f%%", "frontend bound",
fe_bound * 100.);
} else if (perf_stat_evsel__is(evsel, TOPDOWN_BE_BOUND) &&
- full_td(ctx, cpu, st)) {
- double be_bound = td_metric_ratio(ctx, cpu,
- STAT_TOPDOWN_BE_BOUND, st);
-
+ full_td(cpu, st, &rsd)) {
+ double be_bound = td_metric_ratio(cpu,
+ STAT_TOPDOWN_BE_BOUND, st,
+ &rsd);
if (be_bound > 0.2)
color = PERF_COLOR_RED;
print_metric(config, ctxp, color, "%8.1f%%", "backend bound",
be_bound * 100.);
} else if (perf_stat_evsel__is(evsel, TOPDOWN_BAD_SPEC) &&
- full_td(ctx, cpu, st)) {
- double bad_spec = td_metric_ratio(ctx, cpu,
- STAT_TOPDOWN_BAD_SPEC, st);
-
+ full_td(cpu, st, &rsd)) {
+ double bad_spec = td_metric_ratio(cpu,
+ STAT_TOPDOWN_BAD_SPEC, st,
+ &rsd);
if (bad_spec > 0.1)
color = PERF_COLOR_RED;
print_metric(config, ctxp, color, "%8.1f%%", "bad speculation",
@@ -1165,11 +1177,11 @@ void perf_stat__print_shadow_stats(struct perf_stat_config *config,
} else if (evsel->metric_expr) {
generic_metric(config, evsel->metric_expr, evsel->metric_events, NULL,
evsel->name, evsel->metric_name, NULL, 1, cpu, out, st);
- } else if (runtime_stat_n(st, STAT_NSECS, 0, cpu) != 0) {
+ } else if (runtime_stat_n(st, STAT_NSECS, cpu, &rsd) != 0) {
char unit = 'M';
char unit_buf[10];
- total = runtime_stat_avg(st, STAT_NSECS, 0, cpu);
+ total = runtime_stat_avg(st, STAT_NSECS, cpu, &rsd);
if (total)
ratio = 1000.0 * avg / total;
@@ -1180,7 +1192,7 @@ void perf_stat__print_shadow_stats(struct perf_stat_config *config,
snprintf(unit_buf, sizeof(unit_buf), "%c/sec", unit);
print_metric(config, ctxp, NULL, "%8.3f", unit_buf, ratio);
} else if (perf_stat_evsel__is(evsel, SMI_NUM)) {
- print_smi_cost(config, cpu, evsel, out, st);
+ print_smi_cost(config, cpu, out, st, &rsd);
} else {
num = 0;
}
diff --git a/tools/power/x86/intel-speed-select/isst-config.c b/tools/power/x86/intel-speed-select/isst-config.c
index 5390158cdb40..09cb3a6672f3 100644
--- a/tools/power/x86/intel-speed-select/isst-config.c
+++ b/tools/power/x86/intel-speed-select/isst-config.c
@@ -1249,6 +1249,8 @@ static void dump_isst_config(int arg)
isst_ctdp_display_information_end(outf);
}
+static void adjust_scaling_max_from_base_freq(int cpu);
+
static void set_tdp_level_for_cpu(int cpu, void *arg1, void *arg2, void *arg3,
void *arg4)
{
@@ -1267,6 +1269,9 @@ static void set_tdp_level_for_cpu(int cpu, void *arg1, void *arg2, void *arg3,
int pkg_id = get_physical_package_id(cpu);
int die_id = get_physical_die_id(cpu);
+ /* Wait for updated base frequencies */
+ usleep(2000);
+
fprintf(stderr, "Option is set to online/offline\n");
ctdp_level.core_cpumask_size =
alloc_cpu_set(&ctdp_level.core_cpumask);
@@ -1283,6 +1288,7 @@ static void set_tdp_level_for_cpu(int cpu, void *arg1, void *arg2, void *arg3,
if (CPU_ISSET_S(i, ctdp_level.core_cpumask_size, ctdp_level.core_cpumask)) {
fprintf(stderr, "online cpu %d\n", i);
set_cpu_online_offline(i, 1);
+ adjust_scaling_max_from_base_freq(i);
} else {
fprintf(stderr, "offline cpu %d\n", i);
set_cpu_online_offline(i, 0);
@@ -1440,6 +1446,31 @@ static int set_cpufreq_scaling_min_max(int cpu, int max, int freq)
return 0;
}
+static int no_turbo(void)
+{
+ return parse_int_file(0, "/sys/devices/system/cpu/intel_pstate/no_turbo");
+}
+
+static void adjust_scaling_max_from_base_freq(int cpu)
+{
+ int base_freq, scaling_max_freq;
+
+ scaling_max_freq = parse_int_file(0, "/sys/devices/system/cpu/cpu%d/cpufreq/scaling_max_freq", cpu);
+ base_freq = get_cpufreq_base_freq(cpu);
+ if (scaling_max_freq < base_freq || no_turbo())
+ set_cpufreq_scaling_min_max(cpu, 1, base_freq);
+}
+
+static void adjust_scaling_min_from_base_freq(int cpu)
+{
+ int base_freq, scaling_min_freq;
+
+ scaling_min_freq = parse_int_file(0, "/sys/devices/system/cpu/cpu%d/cpufreq/scaling_min_freq", cpu);
+ base_freq = get_cpufreq_base_freq(cpu);
+ if (scaling_min_freq < base_freq)
+ set_cpufreq_scaling_min_max(cpu, 0, base_freq);
+}
+
static int set_clx_pbf_cpufreq_scaling_min_max(int cpu)
{
struct isst_pkg_ctdp_level_info *ctdp_level;
@@ -1537,6 +1568,7 @@ static void set_scaling_min_to_cpuinfo_max(int cpu)
continue;
set_cpufreq_scaling_min_max_from_cpuinfo(i, 1, 0);
+ adjust_scaling_min_from_base_freq(i);
}
}
diff --git a/tools/testing/kunit/kunit.py b/tools/testing/kunit/kunit.py
index 21516e293d17..e808a47c839b 100755
--- a/tools/testing/kunit/kunit.py
+++ b/tools/testing/kunit/kunit.py
@@ -43,9 +43,9 @@ class KunitStatus(Enum):
BUILD_FAILURE = auto()
TEST_FAILURE = auto()
-def get_kernel_root_path():
- parts = sys.argv[0] if not __file__ else __file__
- parts = os.path.realpath(parts).split('tools/testing/kunit')
+def get_kernel_root_path() -> str:
+ path = sys.argv[0] if not __file__ else __file__
+ parts = os.path.realpath(path).split('tools/testing/kunit')
if len(parts) != 2:
sys.exit(1)
return parts[0]
@@ -171,7 +171,7 @@ def run_tests(linux: kunit_kernel.LinuxSourceTree,
exec_result.elapsed_time))
return parse_result
-def add_common_opts(parser):
+def add_common_opts(parser) -> None:
parser.add_argument('--build_dir',
help='As in the make command, it specifies the build '
'directory.',
@@ -183,13 +183,13 @@ def add_common_opts(parser):
help='Run all KUnit tests through allyesconfig',
action='store_true')
-def add_build_opts(parser):
+def add_build_opts(parser) -> None:
parser.add_argument('--jobs',
help='As in the make command, "Specifies the number of '
'jobs (commands) to run simultaneously."',
type=int, default=8, metavar='jobs')
-def add_exec_opts(parser):
+def add_exec_opts(parser) -> None:
parser.add_argument('--timeout',
help='maximum number of seconds to allow for all tests '
'to run. This does not include time taken to build the '
@@ -198,7 +198,7 @@ def add_exec_opts(parser):
default=300,
metavar='timeout')
-def add_parse_opts(parser):
+def add_parse_opts(parser) -> None:
parser.add_argument('--raw_output', help='don\'t format output from kernel',
action='store_true')
parser.add_argument('--json',
@@ -256,10 +256,7 @@ def main(argv, linux=None):
os.mkdir(cli_args.build_dir)
if not linux:
- linux = kunit_kernel.LinuxSourceTree()
-
- linux.create_kunitconfig(cli_args.build_dir)
- linux.read_kunitconfig(cli_args.build_dir)
+ linux = kunit_kernel.LinuxSourceTree(cli_args.build_dir)
request = KunitRequest(cli_args.raw_output,
cli_args.timeout,
@@ -277,10 +274,7 @@ def main(argv, linux=None):
os.mkdir(cli_args.build_dir)
if not linux:
- linux = kunit_kernel.LinuxSourceTree()
-
- linux.create_kunitconfig(cli_args.build_dir)
- linux.read_kunitconfig(cli_args.build_dir)
+ linux = kunit_kernel.LinuxSourceTree(cli_args.build_dir)
request = KunitConfigRequest(cli_args.build_dir,
cli_args.make_options)
@@ -292,10 +286,7 @@ def main(argv, linux=None):
sys.exit(1)
elif cli_args.subcommand == 'build':
if not linux:
- linux = kunit_kernel.LinuxSourceTree()
-
- linux.create_kunitconfig(cli_args.build_dir)
- linux.read_kunitconfig(cli_args.build_dir)
+ linux = kunit_kernel.LinuxSourceTree(cli_args.build_dir)
request = KunitBuildRequest(cli_args.jobs,
cli_args.build_dir,
@@ -309,10 +300,7 @@ def main(argv, linux=None):
sys.exit(1)
elif cli_args.subcommand == 'exec':
if not linux:
- linux = kunit_kernel.LinuxSourceTree()
-
- linux.create_kunitconfig(cli_args.build_dir)
- linux.read_kunitconfig(cli_args.build_dir)
+ linux = kunit_kernel.LinuxSourceTree(cli_args.build_dir)
exec_request = KunitExecRequest(cli_args.timeout,
cli_args.build_dir,
diff --git a/tools/testing/kunit/kunit_config.py b/tools/testing/kunit/kunit_config.py
index 02ffc3a3e5dc..bdd60230764b 100644
--- a/tools/testing/kunit/kunit_config.py
+++ b/tools/testing/kunit/kunit_config.py
@@ -8,6 +8,7 @@
import collections
import re
+from typing import List, Set
CONFIG_IS_NOT_SET_PATTERN = r'^# CONFIG_(\w+) is not set$'
CONFIG_PATTERN = r'^CONFIG_(\w+)=(\S+|".*")$'
@@ -30,10 +31,10 @@ class KconfigParseError(Exception):
class Kconfig(object):
"""Represents defconfig or .config specified using the Kconfig language."""
- def __init__(self):
- self._entries = []
+ def __init__(self) -> None:
+ self._entries = [] # type: List[KconfigEntry]
- def entries(self):
+ def entries(self) -> Set[KconfigEntry]:
return set(self._entries)
def add_entry(self, entry: KconfigEntry) -> None:
diff --git a/tools/testing/kunit/kunit_json.py b/tools/testing/kunit/kunit_json.py
index 624b31b2dbd6..f5cca5c38cac 100644
--- a/tools/testing/kunit/kunit_json.py
+++ b/tools/testing/kunit/kunit_json.py
@@ -13,7 +13,7 @@ import kunit_parser
from kunit_parser import TestStatus
-def get_json_result(test_result, def_config, build_dir, json_path):
+def get_json_result(test_result, def_config, build_dir, json_path) -> str:
sub_groups = []
# Each test suite is mapped to a KernelCI sub_group
diff --git a/tools/testing/kunit/kunit_kernel.py b/tools/testing/kunit/kunit_kernel.py
index 57c1724b7e5d..2076a5a2d060 100644
--- a/tools/testing/kunit/kunit_kernel.py
+++ b/tools/testing/kunit/kunit_kernel.py
@@ -11,6 +11,7 @@ import subprocess
import os
import shutil
import signal
+from typing import Iterator
from contextlib import ExitStack
@@ -39,7 +40,7 @@ class BuildError(Exception):
class LinuxSourceTreeOperations(object):
"""An abstraction over command line operations performed on a source tree."""
- def make_mrproper(self):
+ def make_mrproper(self) -> None:
try:
subprocess.check_output(['make', 'mrproper'], stderr=subprocess.STDOUT)
except OSError as e:
@@ -47,7 +48,7 @@ class LinuxSourceTreeOperations(object):
except subprocess.CalledProcessError as e:
raise ConfigError(e.output.decode())
- def make_olddefconfig(self, build_dir, make_options):
+ def make_olddefconfig(self, build_dir, make_options) -> None:
command = ['make', 'ARCH=um', 'olddefconfig']
if make_options:
command.extend(make_options)
@@ -60,7 +61,7 @@ class LinuxSourceTreeOperations(object):
except subprocess.CalledProcessError as e:
raise ConfigError(e.output.decode())
- def make_allyesconfig(self, build_dir, make_options):
+ def make_allyesconfig(self, build_dir, make_options) -> None:
kunit_parser.print_with_timestamp(
'Enabling all CONFIGs for UML...')
command = ['make', 'ARCH=um', 'allyesconfig']
@@ -82,7 +83,7 @@ class LinuxSourceTreeOperations(object):
kunit_parser.print_with_timestamp(
'Starting Kernel with all configs takes a few minutes...')
- def make(self, jobs, build_dir, make_options):
+ def make(self, jobs, build_dir, make_options) -> None:
command = ['make', 'ARCH=um', '--jobs=' + str(jobs)]
if make_options:
command.extend(make_options)
@@ -100,7 +101,7 @@ class LinuxSourceTreeOperations(object):
if stderr: # likely only due to build warnings
print(stderr.decode())
- def linux_bin(self, params, timeout, build_dir):
+ def linux_bin(self, params, timeout, build_dir) -> None:
"""Runs the Linux UML binary. Must be named 'linux'."""
linux_bin = get_file_path(build_dir, 'linux')
outfile = get_outfile_path(build_dir)
@@ -110,41 +111,42 @@ class LinuxSourceTreeOperations(object):
stderr=subprocess.STDOUT)
process.wait(timeout)
-def get_kconfig_path(build_dir):
+def get_kconfig_path(build_dir) -> str:
return get_file_path(build_dir, KCONFIG_PATH)
-def get_kunitconfig_path(build_dir):
+def get_kunitconfig_path(build_dir) -> str:
return get_file_path(build_dir, KUNITCONFIG_PATH)
-def get_outfile_path(build_dir):
+def get_outfile_path(build_dir) -> str:
return get_file_path(build_dir, OUTFILE_PATH)
class LinuxSourceTree(object):
"""Represents a Linux kernel source tree with KUnit tests."""
- def __init__(self):
- self._ops = LinuxSourceTreeOperations()
+ def __init__(self, build_dir: str, load_config=True, defconfig=DEFAULT_KUNITCONFIG_PATH) -> None:
signal.signal(signal.SIGINT, self.signal_handler)
- def clean(self):
- try:
- self._ops.make_mrproper()
- except ConfigError as e:
- logging.error(e)
- return False
- return True
+ self._ops = LinuxSourceTreeOperations()
+
+ if not load_config:
+ return
- def create_kunitconfig(self, build_dir, defconfig=DEFAULT_KUNITCONFIG_PATH):
kunitconfig_path = get_kunitconfig_path(build_dir)
if not os.path.exists(kunitconfig_path):
shutil.copyfile(defconfig, kunitconfig_path)
- def read_kunitconfig(self, build_dir):
- kunitconfig_path = get_kunitconfig_path(build_dir)
self._kconfig = kunit_config.Kconfig()
self._kconfig.read_from_file(kunitconfig_path)
- def validate_config(self, build_dir):
+ def clean(self) -> bool:
+ try:
+ self._ops.make_mrproper()
+ except ConfigError as e:
+ logging.error(e)
+ return False
+ return True
+
+ def validate_config(self, build_dir) -> bool:
kconfig_path = get_kconfig_path(build_dir)
validated_kconfig = kunit_config.Kconfig()
validated_kconfig.read_from_file(kconfig_path)
@@ -158,7 +160,7 @@ class LinuxSourceTree(object):
return False
return True
- def build_config(self, build_dir, make_options):
+ def build_config(self, build_dir, make_options) -> bool:
kconfig_path = get_kconfig_path(build_dir)
if build_dir and not os.path.exists(build_dir):
os.mkdir(build_dir)
@@ -170,7 +172,7 @@ class LinuxSourceTree(object):
return False
return self.validate_config(build_dir)
- def build_reconfig(self, build_dir, make_options):
+ def build_reconfig(self, build_dir, make_options) -> bool:
"""Creates a new .config if it is not a subset of the .kunitconfig."""
kconfig_path = get_kconfig_path(build_dir)
if os.path.exists(kconfig_path):
@@ -186,7 +188,7 @@ class LinuxSourceTree(object):
print('Generating .config ...')
return self.build_config(build_dir, make_options)
- def build_um_kernel(self, alltests, jobs, build_dir, make_options):
+ def build_um_kernel(self, alltests, jobs, build_dir, make_options) -> bool:
try:
if alltests:
self._ops.make_allyesconfig(build_dir, make_options)
@@ -197,8 +199,8 @@ class LinuxSourceTree(object):
return False
return self.validate_config(build_dir)
- def run_kernel(self, args=[], build_dir='', timeout=None):
- args.extend(['mem=1G'])
+ def run_kernel(self, args=[], build_dir='', timeout=None) -> Iterator[str]:
+ args.extend(['mem=1G', 'console=tty'])
self._ops.linux_bin(args, timeout, build_dir)
outfile = get_outfile_path(build_dir)
subprocess.call(['stty', 'sane'])
@@ -206,6 +208,6 @@ class LinuxSourceTree(object):
for line in file:
yield line
- def signal_handler(self, sig, frame):
+ def signal_handler(self, sig, frame) -> None:
logging.error('Build interruption occurred. Cleaning console.')
subprocess.call(['stty', 'sane'])
diff --git a/tools/testing/kunit/kunit_parser.py b/tools/testing/kunit/kunit_parser.py
index 6614ec4d0898..e8bcc139702e 100644
--- a/tools/testing/kunit/kunit_parser.py
+++ b/tools/testing/kunit/kunit_parser.py
@@ -12,32 +12,32 @@ from collections import namedtuple
from datetime import datetime
from enum import Enum, auto
from functools import reduce
-from typing import List, Optional, Tuple
+from typing import Iterable, Iterator, List, Optional, Tuple
TestResult = namedtuple('TestResult', ['status','suites','log'])
class TestSuite(object):
- def __init__(self):
- self.status = None
- self.name = None
- self.cases = []
+ def __init__(self) -> None:
+ self.status = TestStatus.SUCCESS
+ self.name = ''
+ self.cases = [] # type: List[TestCase]
- def __str__(self):
- return 'TestSuite(' + self.status + ',' + self.name + ',' + str(self.cases) + ')'
+ def __str__(self) -> str:
+ return 'TestSuite(' + str(self.status) + ',' + self.name + ',' + str(self.cases) + ')'
- def __repr__(self):
+ def __repr__(self) -> str:
return str(self)
class TestCase(object):
- def __init__(self):
- self.status = None
+ def __init__(self) -> None:
+ self.status = TestStatus.SUCCESS
self.name = ''
- self.log = []
+ self.log = [] # type: List[str]
- def __str__(self):
- return 'TestCase(' + self.status + ',' + self.name + ',' + str(self.log) + ')'
+ def __str__(self) -> str:
+ return 'TestCase(' + str(self.status) + ',' + self.name + ',' + str(self.log) + ')'
- def __repr__(self):
+ def __repr__(self) -> str:
return str(self)
class TestStatus(Enum):
@@ -51,7 +51,7 @@ kunit_start_re = re.compile(r'TAP version [0-9]+$')
kunit_end_re = re.compile('(List of all partitions:|'
'Kernel panic - not syncing: VFS:)')
-def isolate_kunit_output(kernel_output):
+def isolate_kunit_output(kernel_output) -> Iterator[str]:
started = False
for line in kernel_output:
line = line.rstrip() # line always has a trailing \n
@@ -64,7 +64,7 @@ def isolate_kunit_output(kernel_output):
elif started:
yield line[prefix_len:] if prefix_len > 0 else line
-def raw_output(kernel_output):
+def raw_output(kernel_output) -> None:
for line in kernel_output:
print(line.rstrip())
@@ -72,36 +72,36 @@ DIVIDER = '=' * 60
RESET = '\033[0;0m'
-def red(text):
+def red(text) -> str:
return '\033[1;31m' + text + RESET
-def yellow(text):
+def yellow(text) -> str:
return '\033[1;33m' + text + RESET
-def green(text):
+def green(text) -> str:
return '\033[1;32m' + text + RESET
-def print_with_timestamp(message):
+def print_with_timestamp(message) -> None:
print('[%s] %s' % (datetime.now().strftime('%H:%M:%S'), message))
-def format_suite_divider(message):
+def format_suite_divider(message) -> str:
return '======== ' + message + ' ========'
-def print_suite_divider(message):
+def print_suite_divider(message) -> None:
print_with_timestamp(DIVIDER)
print_with_timestamp(format_suite_divider(message))
-def print_log(log):
+def print_log(log) -> None:
for m in log:
print_with_timestamp(m)
TAP_ENTRIES = re.compile(r'^(TAP|[\s]*ok|[\s]*not ok|[\s]*[0-9]+\.\.[0-9]+|[\s]*#).*$')
-def consume_non_diagnositic(lines: List[str]) -> None:
+def consume_non_diagnostic(lines: List[str]) -> None:
while lines and not TAP_ENTRIES.match(lines[0]):
lines.pop(0)
-def save_non_diagnositic(lines: List[str], test_case: TestCase) -> None:
+def save_non_diagnostic(lines: List[str], test_case: TestCase) -> None:
while lines and not TAP_ENTRIES.match(lines[0]):
test_case.log.append(lines[0])
lines.pop(0)
@@ -113,7 +113,7 @@ OK_NOT_OK_SUBTEST = re.compile(r'^[\s]+(ok|not ok) [0-9]+ - (.*)$')
OK_NOT_OK_MODULE = re.compile(r'^(ok|not ok) ([0-9]+) - (.*)$')
def parse_ok_not_ok_test_case(lines: List[str], test_case: TestCase) -> bool:
- save_non_diagnositic(lines, test_case)
+ save_non_diagnostic(lines, test_case)
if not lines:
test_case.status = TestStatus.TEST_CRASHED
return True
@@ -139,7 +139,7 @@ SUBTEST_DIAGNOSTIC = re.compile(r'^[\s]+# (.*)$')
DIAGNOSTIC_CRASH_MESSAGE = re.compile(r'^[\s]+# .*?: kunit test case crashed!$')
def parse_diagnostic(lines: List[str], test_case: TestCase) -> bool:
- save_non_diagnositic(lines, test_case)
+ save_non_diagnostic(lines, test_case)
if not lines:
return False
line = lines[0]
@@ -155,7 +155,7 @@ def parse_diagnostic(lines: List[str], test_case: TestCase) -> bool:
def parse_test_case(lines: List[str]) -> Optional[TestCase]:
test_case = TestCase()
- save_non_diagnositic(lines, test_case)
+ save_non_diagnostic(lines, test_case)
while parse_diagnostic(lines, test_case):
pass
if parse_ok_not_ok_test_case(lines, test_case):
@@ -166,7 +166,7 @@ def parse_test_case(lines: List[str]) -> Optional[TestCase]:
SUBTEST_HEADER = re.compile(r'^[\s]+# Subtest: (.*)$')
def parse_subtest_header(lines: List[str]) -> Optional[str]:
- consume_non_diagnositic(lines)
+ consume_non_diagnostic(lines)
if not lines:
return None
match = SUBTEST_HEADER.match(lines[0])
@@ -179,7 +179,7 @@ def parse_subtest_header(lines: List[str]) -> Optional[str]:
SUBTEST_PLAN = re.compile(r'[\s]+[0-9]+\.\.([0-9]+)')
def parse_subtest_plan(lines: List[str]) -> Optional[int]:
- consume_non_diagnositic(lines)
+ consume_non_diagnostic(lines)
match = SUBTEST_PLAN.match(lines[0])
if match:
lines.pop(0)
@@ -202,7 +202,7 @@ def max_status(left: TestStatus, right: TestStatus) -> TestStatus:
def parse_ok_not_ok_test_suite(lines: List[str],
test_suite: TestSuite,
expected_suite_index: int) -> bool:
- consume_non_diagnositic(lines)
+ consume_non_diagnostic(lines)
if not lines:
test_suite.status = TestStatus.TEST_CRASHED
return False
@@ -224,18 +224,17 @@ def parse_ok_not_ok_test_suite(lines: List[str],
else:
return False
-def bubble_up_errors(to_status, status_container_list) -> TestStatus:
- status_list = map(to_status, status_container_list)
- return reduce(max_status, status_list, TestStatus.SUCCESS)
+def bubble_up_errors(statuses: Iterable[TestStatus]) -> TestStatus:
+ return reduce(max_status, statuses, TestStatus.SUCCESS)
def bubble_up_test_case_errors(test_suite: TestSuite) -> TestStatus:
- max_test_case_status = bubble_up_errors(lambda x: x.status, test_suite.cases)
+ max_test_case_status = bubble_up_errors(x.status for x in test_suite.cases)
return max_status(max_test_case_status, test_suite.status)
def parse_test_suite(lines: List[str], expected_suite_index: int) -> Optional[TestSuite]:
if not lines:
return None
- consume_non_diagnositic(lines)
+ consume_non_diagnostic(lines)
test_suite = TestSuite()
test_suite.status = TestStatus.SUCCESS
name = parse_subtest_header(lines)
@@ -264,7 +263,7 @@ def parse_test_suite(lines: List[str], expected_suite_index: int) -> Optional[Te
TAP_HEADER = re.compile(r'^TAP version 14$')
def parse_tap_header(lines: List[str]) -> bool:
- consume_non_diagnositic(lines)
+ consume_non_diagnostic(lines)
if TAP_HEADER.match(lines[0]):
lines.pop(0)
return True
@@ -274,7 +273,7 @@ def parse_tap_header(lines: List[str]) -> bool:
TEST_PLAN = re.compile(r'[0-9]+\.\.([0-9]+)')
def parse_test_plan(lines: List[str]) -> Optional[int]:
- consume_non_diagnositic(lines)
+ consume_non_diagnostic(lines)
match = TEST_PLAN.match(lines[0])
if match:
lines.pop(0)
@@ -282,11 +281,11 @@ def parse_test_plan(lines: List[str]) -> Optional[int]:
else:
return None
-def bubble_up_suite_errors(test_suite_list: List[TestSuite]) -> TestStatus:
- return bubble_up_errors(lambda x: x.status, test_suite_list)
+def bubble_up_suite_errors(test_suites: Iterable[TestSuite]) -> TestStatus:
+ return bubble_up_errors(x.status for x in test_suites)
def parse_test_result(lines: List[str]) -> TestResult:
- consume_non_diagnositic(lines)
+ consume_non_diagnostic(lines)
if not lines or not parse_tap_header(lines):
return TestResult(TestStatus.NO_TESTS, [], lines)
expected_test_suite_num = parse_test_plan(lines)
diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile
index afbab4aeef3c..8a917cb4426a 100644
--- a/tools/testing/selftests/Makefile
+++ b/tools/testing/selftests/Makefile
@@ -77,8 +77,10 @@ TARGETS += zram
TARGETS_HOTPLUG = cpu-hotplug
TARGETS_HOTPLUG += memory-hotplug
-# User can optionally provide a TARGETS skiplist.
-SKIP_TARGETS ?=
+# User can optionally provide a TARGETS skiplist. By default we skip
+# BPF since it has cutting edge build time dependencies which require
+# more effort to install.
+SKIP_TARGETS ?= bpf
ifneq ($(SKIP_TARGETS),)
TMP := $(filter-out $(SKIP_TARGETS), $(TARGETS))
override TARGETS := $(TMP)
diff --git a/tools/testing/selftests/arm64/fp/fpsimd-test.S b/tools/testing/selftests/arm64/fp/fpsimd-test.S
index 1c5556bdd11d..0dbd594c2747 100644
--- a/tools/testing/selftests/arm64/fp/fpsimd-test.S
+++ b/tools/testing/selftests/arm64/fp/fpsimd-test.S
@@ -457,7 +457,7 @@ function barf
mov x11, x1 // actual data
mov x12, x2 // data size
- puts "Mistatch: PID="
+ puts "Mismatch: PID="
mov x0, x20
bl putdec
puts ", iteration="
diff --git a/tools/testing/selftests/arm64/fp/sve-test.S b/tools/testing/selftests/arm64/fp/sve-test.S
index f95074c9b48b..9210691aa998 100644
--- a/tools/testing/selftests/arm64/fp/sve-test.S
+++ b/tools/testing/selftests/arm64/fp/sve-test.S
@@ -625,7 +625,7 @@ function barf
mov x11, x1 // actual data
mov x12, x2 // data size
- puts "Mistatch: PID="
+ puts "Mismatch: PID="
mov x0, x20
bl putdec
puts ", iteration="
diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
index 8c33e999319a..c51df6b91bef 100644
--- a/tools/testing/selftests/bpf/Makefile
+++ b/tools/testing/selftests/bpf/Makefile
@@ -121,6 +121,9 @@ VMLINUX_BTF_PATHS ?= $(if $(O),$(O)/vmlinux) \
/sys/kernel/btf/vmlinux \
/boot/vmlinux-$(shell uname -r)
VMLINUX_BTF ?= $(abspath $(firstword $(wildcard $(VMLINUX_BTF_PATHS))))
+ifeq ($(VMLINUX_BTF),)
+$(error Cannot find a vmlinux for VMLINUX_BTF at any of "$(VMLINUX_BTF_PATHS)")
+endif
# Define simple and short `make test_progs`, `make test_sysctl`, etc targets
# to build individual tests.
diff --git a/tools/testing/selftests/bpf/prog_tests/test_local_storage.c b/tools/testing/selftests/bpf/prog_tests/test_local_storage.c
index c0fe73a17ed1..3bfcf00c0a67 100644
--- a/tools/testing/selftests/bpf/prog_tests/test_local_storage.c
+++ b/tools/testing/selftests/bpf/prog_tests/test_local_storage.c
@@ -34,61 +34,6 @@ struct storage {
struct bpf_spin_lock lock;
};
-/* Copies an rm binary to a temp file. dest is a mkstemp template */
-static int copy_rm(char *dest)
-{
- int fd_in, fd_out = -1, ret = 0;
- struct stat stat;
- char *buf = NULL;
-
- fd_in = open("/bin/rm", O_RDONLY);
- if (fd_in < 0)
- return -errno;
-
- fd_out = mkstemp(dest);
- if (fd_out < 0) {
- ret = -errno;
- goto out;
- }
-
- ret = fstat(fd_in, &stat);
- if (ret == -1) {
- ret = -errno;
- goto out;
- }
-
- buf = malloc(stat.st_blksize);
- if (!buf) {
- ret = -errno;
- goto out;
- }
-
- while (ret = read(fd_in, buf, stat.st_blksize), ret > 0) {
- ret = write(fd_out, buf, ret);
- if (ret < 0) {
- ret = -errno;
- goto out;
-
- }
- }
- if (ret < 0) {
- ret = -errno;
- goto out;
-
- }
-
- /* Set executable permission on the copied file */
- ret = chmod(dest, 0100);
- if (ret == -1)
- ret = -errno;
-
-out:
- free(buf);
- close(fd_in);
- close(fd_out);
- return ret;
-}
-
/* Fork and exec the provided rm binary and return the exit code of the
* forked process and its pid.
*/
@@ -168,9 +113,11 @@ static bool check_syscall_operations(int map_fd, int obj_fd)
void test_test_local_storage(void)
{
- char tmp_exec_path[PATH_MAX] = "/tmp/copy_of_rmXXXXXX";
+ char tmp_dir_path[64] = "/tmp/local_storageXXXXXX";
int err, serv_sk = -1, task_fd = -1, rm_fd = -1;
struct local_storage *skel = NULL;
+ char tmp_exec_path[64];
+ char cmd[256];
skel = local_storage__open_and_load();
if (CHECK(!skel, "skel_load", "lsm skeleton failed\n"))
@@ -189,18 +136,24 @@ void test_test_local_storage(void)
task_fd))
goto close_prog;
- err = copy_rm(tmp_exec_path);
- if (CHECK(err < 0, "copy_rm", "err %d errno %d\n", err, errno))
+ if (CHECK(!mkdtemp(tmp_dir_path), "mkdtemp",
+ "unable to create tmpdir: %d\n", errno))
goto close_prog;
+ snprintf(tmp_exec_path, sizeof(tmp_exec_path), "%s/copy_of_rm",
+ tmp_dir_path);
+ snprintf(cmd, sizeof(cmd), "cp /bin/rm %s", tmp_exec_path);
+ if (CHECK_FAIL(system(cmd)))
+ goto close_prog_rmdir;
+
rm_fd = open(tmp_exec_path, O_RDONLY);
if (CHECK(rm_fd < 0, "open", "failed to open %s err:%d, errno:%d",
tmp_exec_path, rm_fd, errno))
- goto close_prog;
+ goto close_prog_rmdir;
if (!check_syscall_operations(bpf_map__fd(skel->maps.inode_storage_map),
rm_fd))
- goto close_prog;
+ goto close_prog_rmdir;
/* Sets skel->bss->monitored_pid to the pid of the forked child
* forks a child process that executes tmp_exec_path and tries to
@@ -209,33 +162,36 @@ void test_test_local_storage(void)
*/
err = run_self_unlink(&skel->bss->monitored_pid, tmp_exec_path);
if (CHECK(err != EPERM, "run_self_unlink", "err %d want EPERM\n", err))
- goto close_prog_unlink;
+ goto close_prog_rmdir;
/* Set the process being monitored to be the current process */
skel->bss->monitored_pid = getpid();
- /* Remove the temporary created executable */
- err = unlink(tmp_exec_path);
- if (CHECK(err != 0, "unlink", "unable to unlink %s: %d", tmp_exec_path,
- errno))
- goto close_prog_unlink;
+ /* Move copy_of_rm to a new location so that it triggers the
+ * inode_rename LSM hook with a new_dentry that has a NULL inode ptr.
+ */
+ snprintf(cmd, sizeof(cmd), "mv %s/copy_of_rm %s/check_null_ptr",
+ tmp_dir_path, tmp_dir_path);
+ if (CHECK_FAIL(system(cmd)))
+ goto close_prog_rmdir;
CHECK(skel->data->inode_storage_result != 0, "inode_storage_result",
"inode_local_storage not set\n");
serv_sk = start_server(AF_INET6, SOCK_STREAM, NULL, 0, 0);
if (CHECK(serv_sk < 0, "start_server", "failed to start server\n"))
- goto close_prog;
+ goto close_prog_rmdir;
CHECK(skel->data->sk_storage_result != 0, "sk_storage_result",
"sk_local_storage not set\n");
if (!check_syscall_operations(bpf_map__fd(skel->maps.sk_storage_map),
serv_sk))
- goto close_prog;
+ goto close_prog_rmdir;
-close_prog_unlink:
- unlink(tmp_exec_path);
+close_prog_rmdir:
+ snprintf(cmd, sizeof(cmd), "rm -rf %s", tmp_dir_path);
+ system(cmd);
close_prog:
close(serv_sk);
close(rm_fd);
diff --git a/tools/testing/selftests/bpf/progs/bprm_opts.c b/tools/testing/selftests/bpf/progs/bprm_opts.c
index 5bfef2887e70..418d9c6d4952 100644
--- a/tools/testing/selftests/bpf/progs/bprm_opts.c
+++ b/tools/testing/selftests/bpf/progs/bprm_opts.c
@@ -4,7 +4,7 @@
* Copyright 2020 Google LLC.
*/
-#include "vmlinux.h"
+#include <linux/bpf.h>
#include <errno.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
diff --git a/tools/testing/selftests/bpf/progs/local_storage.c b/tools/testing/selftests/bpf/progs/local_storage.c
index 3e3de130f28f..95868bc7ada9 100644
--- a/tools/testing/selftests/bpf/progs/local_storage.c
+++ b/tools/testing/selftests/bpf/progs/local_storage.c
@@ -50,7 +50,6 @@ int BPF_PROG(unlink_hook, struct inode *dir, struct dentry *victim)
__u32 pid = bpf_get_current_pid_tgid() >> 32;
struct local_storage *storage;
bool is_self_unlink;
- int err;
if (pid != monitored_pid)
return 0;
@@ -66,8 +65,27 @@ int BPF_PROG(unlink_hook, struct inode *dir, struct dentry *victim)
return -EPERM;
}
- storage = bpf_inode_storage_get(&inode_storage_map, victim->d_inode, 0,
- BPF_LOCAL_STORAGE_GET_F_CREATE);
+ return 0;
+}
+
+SEC("lsm/inode_rename")
+int BPF_PROG(inode_rename, struct inode *old_dir, struct dentry *old_dentry,
+ struct inode *new_dir, struct dentry *new_dentry,
+ unsigned int flags)
+{
+ __u32 pid = bpf_get_current_pid_tgid() >> 32;
+ struct local_storage *storage;
+ int err;
+
+ /* new_dentry->d_inode can be NULL when the inode is renamed to a file
+ * that did not exist before. The helper should be able to handle this
+ * NULL pointer.
+ */
+ bpf_inode_storage_get(&inode_storage_map, new_dentry->d_inode, 0,
+ BPF_LOCAL_STORAGE_GET_F_CREATE);
+
+ storage = bpf_inode_storage_get(&inode_storage_map, old_dentry->d_inode,
+ 0, 0);
if (!storage)
return 0;
@@ -76,7 +94,7 @@ int BPF_PROG(unlink_hook, struct inode *dir, struct dentry *victim)
inode_storage_result = -1;
bpf_spin_unlock(&storage->lock);
- err = bpf_inode_storage_delete(&inode_storage_map, victim->d_inode);
+ err = bpf_inode_storage_delete(&inode_storage_map, old_dentry->d_inode);
if (!err)
inode_storage_result = err;
@@ -133,37 +151,18 @@ int BPF_PROG(socket_post_create, struct socket *sock, int family, int type,
return 0;
}
-SEC("lsm/file_open")
-int BPF_PROG(file_open, struct file *file)
-{
- __u32 pid = bpf_get_current_pid_tgid() >> 32;
- struct local_storage *storage;
-
- if (pid != monitored_pid)
- return 0;
-
- if (!file->f_inode)
- return 0;
-
- storage = bpf_inode_storage_get(&inode_storage_map, file->f_inode, 0,
- BPF_LOCAL_STORAGE_GET_F_CREATE);
- if (!storage)
- return 0;
-
- bpf_spin_lock(&storage->lock);
- storage->value = DUMMY_STORAGE_VALUE;
- bpf_spin_unlock(&storage->lock);
- return 0;
-}
-
/* This uses the local storage to remember the inode of the binary that a
* process was originally executing.
*/
SEC("lsm/bprm_committed_creds")
void BPF_PROG(exec, struct linux_binprm *bprm)
{
+ __u32 pid = bpf_get_current_pid_tgid() >> 32;
struct local_storage *storage;
+ if (pid != monitored_pid)
+ return;
+
storage = bpf_task_storage_get(&task_storage_map,
bpf_get_current_task_btf(), 0,
BPF_LOCAL_STORAGE_GET_F_CREATE);
@@ -172,4 +171,13 @@ void BPF_PROG(exec, struct linux_binprm *bprm)
storage->exec_inode = bprm->file->f_inode;
bpf_spin_unlock(&storage->lock);
}
+
+ storage = bpf_inode_storage_get(&inode_storage_map, bprm->file->f_inode,
+ 0, BPF_LOCAL_STORAGE_GET_F_CREATE);
+ if (!storage)
+ return;
+
+ bpf_spin_lock(&storage->lock);
+ storage->value = DUMMY_STORAGE_VALUE;
+ bpf_spin_unlock(&storage->lock);
}
diff --git a/tools/testing/selftests/bpf/test_maps.c b/tools/testing/selftests/bpf/test_maps.c
index 0ad3e6305ff0..51adc42b2b40 100644
--- a/tools/testing/selftests/bpf/test_maps.c
+++ b/tools/testing/selftests/bpf/test_maps.c
@@ -1312,22 +1312,58 @@ static void test_map_stress(void)
#define DO_UPDATE 1
#define DO_DELETE 0
+#define MAP_RETRIES 20
+
+static int map_update_retriable(int map_fd, const void *key, const void *value,
+ int flags, int attempts)
+{
+ while (bpf_map_update_elem(map_fd, key, value, flags)) {
+ if (!attempts || (errno != EAGAIN && errno != EBUSY))
+ return -errno;
+
+ usleep(1);
+ attempts--;
+ }
+
+ return 0;
+}
+
+static int map_delete_retriable(int map_fd, const void *key, int attempts)
+{
+ while (bpf_map_delete_elem(map_fd, key)) {
+ if (!attempts || (errno != EAGAIN && errno != EBUSY))
+ return -errno;
+
+ usleep(1);
+ attempts--;
+ }
+
+ return 0;
+}
+
static void test_update_delete(unsigned int fn, void *data)
{
int do_update = ((int *)data)[1];
int fd = ((int *)data)[0];
- int i, key, value;
+ int i, key, value, err;
for (i = fn; i < MAP_SIZE; i += TASKS) {
key = value = i;
if (do_update) {
- assert(bpf_map_update_elem(fd, &key, &value,
- BPF_NOEXIST) == 0);
- assert(bpf_map_update_elem(fd, &key, &value,
- BPF_EXIST) == 0);
+ err = map_update_retriable(fd, &key, &value, BPF_NOEXIST, MAP_RETRIES);
+ if (err)
+ printf("error %d %d\n", err, errno);
+ assert(err == 0);
+ err = map_update_retriable(fd, &key, &value, BPF_EXIST, MAP_RETRIES);
+ if (err)
+ printf("error %d %d\n", err, errno);
+ assert(err == 0);
} else {
- assert(bpf_map_delete_elem(fd, &key) == 0);
+ err = map_delete_retriable(fd, &key, MAP_RETRIES);
+ if (err)
+ printf("error %d %d\n", err, errno);
+ assert(err == 0);
}
}
}
diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
index 777a81404fdb..f8569f04064b 100644
--- a/tools/testing/selftests/bpf/test_verifier.c
+++ b/tools/testing/selftests/bpf/test_verifier.c
@@ -50,7 +50,7 @@
#define MAX_INSNS BPF_MAXINSNS
#define MAX_TEST_INSNS 1000000
#define MAX_FIXUPS 8
-#define MAX_NR_MAPS 20
+#define MAX_NR_MAPS 21
#define MAX_TEST_RUNS 8
#define POINTER_VALUE 0xcafe4all
#define TEST_DATA_LEN 64
@@ -87,6 +87,7 @@ struct bpf_test {
int fixup_sk_storage_map[MAX_FIXUPS];
int fixup_map_event_output[MAX_FIXUPS];
int fixup_map_reuseport_array[MAX_FIXUPS];
+ int fixup_map_ringbuf[MAX_FIXUPS];
const char *errstr;
const char *errstr_unpriv;
uint32_t insn_processed;
@@ -640,6 +641,7 @@ static void do_test_fixup(struct bpf_test *test, enum bpf_prog_type prog_type,
int *fixup_sk_storage_map = test->fixup_sk_storage_map;
int *fixup_map_event_output = test->fixup_map_event_output;
int *fixup_map_reuseport_array = test->fixup_map_reuseport_array;
+ int *fixup_map_ringbuf = test->fixup_map_ringbuf;
if (test->fill_helper) {
test->fill_insns = calloc(MAX_TEST_INSNS, sizeof(struct bpf_insn));
@@ -817,6 +819,14 @@ static void do_test_fixup(struct bpf_test *test, enum bpf_prog_type prog_type,
fixup_map_reuseport_array++;
} while (*fixup_map_reuseport_array);
}
+ if (*fixup_map_ringbuf) {
+ map_fds[20] = create_map(BPF_MAP_TYPE_RINGBUF, 0,
+ 0, 4096);
+ do {
+ prog[*fixup_map_ringbuf].imm = map_fds[20];
+ fixup_map_ringbuf++;
+ } while (*fixup_map_ringbuf);
+ }
}
struct libcap {
diff --git a/tools/testing/selftests/bpf/verifier/spill_fill.c b/tools/testing/selftests/bpf/verifier/spill_fill.c
index 45d43bf82f26..0b943897aaf6 100644
--- a/tools/testing/selftests/bpf/verifier/spill_fill.c
+++ b/tools/testing/selftests/bpf/verifier/spill_fill.c
@@ -29,6 +29,36 @@
.result_unpriv = ACCEPT,
},
{
+ "check valid spill/fill, ptr to mem",
+ .insns = {
+ /* reserve 8 byte ringbuf memory */
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_MOV64_IMM(BPF_REG_2, 8),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_ringbuf_reserve),
+ /* store a pointer to the reserved memory in R6 */
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+ /* check whether the reservation was successful */
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+ /* spill R6(mem) into the stack */
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
+ /* fill it back in R7 */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_10, -8),
+ /* should be able to access *(R7) = 0 */
+ BPF_ST_MEM(BPF_DW, BPF_REG_7, 0, 0),
+ /* submit the reserved ringbuf memory */
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_ringbuf_submit),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_ringbuf = { 1 },
+ .result = ACCEPT,
+ .result_unpriv = ACCEPT,
+},
+{
"check corrupted spill/fill",
.insns = {
/* spill R1(ctx) into stack */
diff --git a/tools/testing/selftests/bpf/xdpxceiver.c b/tools/testing/selftests/bpf/xdpxceiver.c
index 014dedaa4dd2..1e722ee76b1f 100644
--- a/tools/testing/selftests/bpf/xdpxceiver.c
+++ b/tools/testing/selftests/bpf/xdpxceiver.c
@@ -715,7 +715,7 @@ static void worker_pkt_dump(void)
int payload = *((uint32_t *)(pkt_buf[iter]->payload + PKT_HDR_SIZE));
if (payload == EOT) {
- ksft_print_msg("End-of-tranmission frame received\n");
+ ksft_print_msg("End-of-transmission frame received\n");
fprintf(stdout, "---------------------------------------\n");
break;
}
@@ -747,7 +747,7 @@ static void worker_pkt_validate(void)
}
if (payloadseqnum == EOT) {
- ksft_print_msg("End-of-tranmission frame received: PASS\n");
+ ksft_print_msg("End-of-transmission frame received: PASS\n");
sigvar = 1;
break;
}
diff --git a/tools/testing/selftests/drivers/net/mlxsw/qos_pfc.sh b/tools/testing/selftests/drivers/net/mlxsw/qos_pfc.sh
index 4d900bc1f76c..5c7700212f75 100755
--- a/tools/testing/selftests/drivers/net/mlxsw/qos_pfc.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/qos_pfc.sh
@@ -230,7 +230,7 @@ switch_create()
__mlnx_qos -i $swp4 --pfc=0,1,0,0,0,0,0,0 >/dev/null
# PG0 will get autoconfigured to Xoff, give PG1 arbitrarily 100K, which
# is (-2*MTU) about 80K of delay provision.
- __mlnx_qos -i $swp3 --buffer_size=0,$_100KB,0,0,0,0,0,0 >/dev/null
+ __mlnx_qos -i $swp4 --buffer_size=0,$_100KB,0,0,0,0,0,0 >/dev/null
# bridges
# -------
diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile
index c7ca4faba272..fe41c6a0fa67 100644
--- a/tools/testing/selftests/kvm/Makefile
+++ b/tools/testing/selftests/kvm/Makefile
@@ -33,7 +33,7 @@ ifeq ($(ARCH),s390)
UNAME_M := s390x
endif
-LIBKVM = lib/assert.c lib/elf.c lib/io.c lib/kvm_util.c lib/sparsebit.c lib/test_util.c
+LIBKVM = lib/assert.c lib/elf.c lib/io.c lib/kvm_util.c lib/sparsebit.c lib/test_util.c lib/guest_modes.c lib/perf_test_util.c
LIBKVM_x86_64 = lib/x86_64/processor.c lib/x86_64/vmx.c lib/x86_64/svm.c lib/x86_64/ucall.c lib/x86_64/handlers.S
LIBKVM_aarch64 = lib/aarch64/processor.c lib/aarch64/ucall.c
LIBKVM_s390x = lib/s390x/processor.c lib/s390x/ucall.c lib/s390x/diag318_test_handler.c
diff --git a/tools/testing/selftests/kvm/demand_paging_test.c b/tools/testing/selftests/kvm/demand_paging_test.c
index 3d96a7bfaff3..cdad1eca72f7 100644
--- a/tools/testing/selftests/kvm/demand_paging_test.c
+++ b/tools/testing/selftests/kvm/demand_paging_test.c
@@ -7,23 +7,20 @@
* Copyright (C) 2019, Google, Inc.
*/
-#define _GNU_SOURCE /* for program_invocation_name */
+#define _GNU_SOURCE /* for pipe2 */
#include <stdio.h>
#include <stdlib.h>
-#include <sys/syscall.h>
-#include <unistd.h>
-#include <asm/unistd.h>
#include <time.h>
#include <poll.h>
#include <pthread.h>
-#include <linux/bitmap.h>
-#include <linux/bitops.h>
#include <linux/userfaultfd.h>
+#include <sys/syscall.h>
-#include "perf_test_util.h"
-#include "processor.h"
+#include "kvm_util.h"
#include "test_util.h"
+#include "perf_test_util.h"
+#include "guest_modes.h"
#ifdef __NR_userfaultfd
@@ -39,12 +36,14 @@
#define PER_VCPU_DEBUG(...) _no_printf(__VA_ARGS__)
#endif
+static int nr_vcpus = 1;
+static uint64_t guest_percpu_mem_size = DEFAULT_PER_VCPU_MEM_SIZE;
static char *guest_data_prototype;
static void *vcpu_worker(void *data)
{
int ret;
- struct vcpu_args *vcpu_args = (struct vcpu_args *)data;
+ struct perf_test_vcpu_args *vcpu_args = (struct perf_test_vcpu_args *)data;
int vcpu_id = vcpu_args->vcpu_id;
struct kvm_vm *vm = perf_test_args.vm;
struct kvm_run *run;
@@ -248,9 +247,14 @@ static int setup_demand_paging(struct kvm_vm *vm,
return 0;
}
-static void run_test(enum vm_guest_mode mode, bool use_uffd,
- useconds_t uffd_delay)
+struct test_params {
+ bool use_uffd;
+ useconds_t uffd_delay;
+};
+
+static void run_test(enum vm_guest_mode mode, void *arg)
{
+ struct test_params *p = arg;
pthread_t *vcpu_threads;
pthread_t *uffd_handler_threads = NULL;
struct uffd_handler_args *uffd_args = NULL;
@@ -261,7 +265,7 @@ static void run_test(enum vm_guest_mode mode, bool use_uffd,
int vcpu_id;
int r;
- vm = create_vm(mode, nr_vcpus, guest_percpu_mem_size);
+ vm = perf_test_create_vm(mode, nr_vcpus, guest_percpu_mem_size);
perf_test_args.wr_fract = 1;
@@ -273,9 +277,9 @@ static void run_test(enum vm_guest_mode mode, bool use_uffd,
vcpu_threads = malloc(nr_vcpus * sizeof(*vcpu_threads));
TEST_ASSERT(vcpu_threads, "Memory allocation failed");
- add_vcpus(vm, nr_vcpus, guest_percpu_mem_size);
+ perf_test_setup_vcpus(vm, nr_vcpus, guest_percpu_mem_size);
- if (use_uffd) {
+ if (p->use_uffd) {
uffd_handler_threads =
malloc(nr_vcpus * sizeof(*uffd_handler_threads));
TEST_ASSERT(uffd_handler_threads, "Memory allocation failed");
@@ -308,7 +312,7 @@ static void run_test(enum vm_guest_mode mode, bool use_uffd,
r = setup_demand_paging(vm,
&uffd_handler_threads[vcpu_id],
pipefds[vcpu_id * 2],
- uffd_delay, &uffd_args[vcpu_id],
+ p->uffd_delay, &uffd_args[vcpu_id],
vcpu_hva, guest_percpu_mem_size);
if (r < 0)
exit(-r);
@@ -339,7 +343,7 @@ static void run_test(enum vm_guest_mode mode, bool use_uffd,
pr_info("All vCPU threads joined\n");
- if (use_uffd) {
+ if (p->use_uffd) {
char c;
/* Tell the user fault fd handler threads to quit */
@@ -357,43 +361,23 @@ static void run_test(enum vm_guest_mode mode, bool use_uffd,
perf_test_args.vcpu_args[0].pages * nr_vcpus /
((double)ts_diff.tv_sec + (double)ts_diff.tv_nsec / 100000000.0));
- ucall_uninit(vm);
- kvm_vm_free(vm);
+ perf_test_destroy_vm(vm);
free(guest_data_prototype);
free(vcpu_threads);
- if (use_uffd) {
+ if (p->use_uffd) {
free(uffd_handler_threads);
free(uffd_args);
free(pipefds);
}
}
-struct guest_mode {
- bool supported;
- bool enabled;
-};
-static struct guest_mode guest_modes[NUM_VM_MODES];
-
-#define guest_mode_init(mode, supported, enabled) ({ \
- guest_modes[mode] = (struct guest_mode){ supported, enabled }; \
-})
-
static void help(char *name)
{
- int i;
-
puts("");
printf("usage: %s [-h] [-m mode] [-u] [-d uffd_delay_usec]\n"
" [-b memory] [-v vcpus]\n", name);
- printf(" -m: specify the guest mode ID to test\n"
- " (default: test all supported modes)\n"
- " This option may be used multiple times.\n"
- " Guest mode IDs:\n");
- for (i = 0; i < NUM_VM_MODES; ++i) {
- printf(" %d: %s%s\n", i, vm_guest_mode_string(i),
- guest_modes[i].supported ? " (supported)" : "");
- }
+ guest_modes_help();
printf(" -u: use User Fault FD to handle vCPU page\n"
" faults.\n");
printf(" -d: add a delay in usec to the User Fault\n"
@@ -410,53 +394,22 @@ static void help(char *name)
int main(int argc, char *argv[])
{
int max_vcpus = kvm_check_cap(KVM_CAP_MAX_VCPUS);
- bool mode_selected = false;
- unsigned int mode;
- int opt, i;
- bool use_uffd = false;
- useconds_t uffd_delay = 0;
-
-#ifdef __x86_64__
- guest_mode_init(VM_MODE_PXXV48_4K, true, true);
-#endif
-#ifdef __aarch64__
- guest_mode_init(VM_MODE_P40V48_4K, true, true);
- guest_mode_init(VM_MODE_P40V48_64K, true, true);
- {
- unsigned int limit = kvm_check_cap(KVM_CAP_ARM_VM_IPA_SIZE);
-
- if (limit >= 52)
- guest_mode_init(VM_MODE_P52V48_64K, true, true);
- if (limit >= 48) {
- guest_mode_init(VM_MODE_P48V48_4K, true, true);
- guest_mode_init(VM_MODE_P48V48_64K, true, true);
- }
- }
-#endif
-#ifdef __s390x__
- guest_mode_init(VM_MODE_P40V48_4K, true, true);
-#endif
+ struct test_params p = {};
+ int opt;
+
+ guest_modes_append_default();
while ((opt = getopt(argc, argv, "hm:ud:b:v:")) != -1) {
switch (opt) {
case 'm':
- if (!mode_selected) {
- for (i = 0; i < NUM_VM_MODES; ++i)
- guest_modes[i].enabled = false;
- mode_selected = true;
- }
- mode = strtoul(optarg, NULL, 10);
- TEST_ASSERT(mode < NUM_VM_MODES,
- "Guest mode ID %d too big", mode);
- guest_modes[mode].enabled = true;
+ guest_modes_cmdline(optarg);
break;
case 'u':
- use_uffd = true;
+ p.use_uffd = true;
break;
case 'd':
- uffd_delay = strtoul(optarg, NULL, 0);
- TEST_ASSERT(uffd_delay >= 0,
- "A negative UFFD delay is not supported.");
+ p.uffd_delay = strtoul(optarg, NULL, 0);
+ TEST_ASSERT(p.uffd_delay >= 0, "A negative UFFD delay is not supported.");
break;
case 'b':
guest_percpu_mem_size = parse_size(optarg);
@@ -473,14 +426,7 @@ int main(int argc, char *argv[])
}
}
- for (i = 0; i < NUM_VM_MODES; ++i) {
- if (!guest_modes[i].enabled)
- continue;
- TEST_ASSERT(guest_modes[i].supported,
- "Guest mode ID %d (%s) not supported.",
- i, vm_guest_mode_string(i));
- run_test(i, use_uffd, uffd_delay);
- }
+ for_each_guest_mode(run_test, &p);
return 0;
}
diff --git a/tools/testing/selftests/kvm/dirty_log_perf_test.c b/tools/testing/selftests/kvm/dirty_log_perf_test.c
index 9c6a7be31e03..2283a0ec74a9 100644
--- a/tools/testing/selftests/kvm/dirty_log_perf_test.c
+++ b/tools/testing/selftests/kvm/dirty_log_perf_test.c
@@ -8,29 +8,28 @@
* Copyright (C) 2020, Google, Inc.
*/
-#define _GNU_SOURCE /* for program_invocation_name */
-
#include <stdio.h>
#include <stdlib.h>
-#include <unistd.h>
#include <time.h>
#include <pthread.h>
#include <linux/bitmap.h>
-#include <linux/bitops.h>
#include "kvm_util.h"
-#include "perf_test_util.h"
-#include "processor.h"
#include "test_util.h"
+#include "perf_test_util.h"
+#include "guest_modes.h"
/* How many host loops to run by default (one KVM_GET_DIRTY_LOG for each loop)*/
#define TEST_HOST_LOOP_N 2UL
+static int nr_vcpus = 1;
+static uint64_t guest_percpu_mem_size = DEFAULT_PER_VCPU_MEM_SIZE;
+
/* Host variables */
static u64 dirty_log_manual_caps;
static bool host_quit;
static uint64_t iteration;
-static uint64_t vcpu_last_completed_iteration[MAX_VCPUS];
+static uint64_t vcpu_last_completed_iteration[KVM_MAX_VCPUS];
static void *vcpu_worker(void *data)
{
@@ -42,7 +41,7 @@ static void *vcpu_worker(void *data)
struct timespec ts_diff;
struct timespec total = (struct timespec){0};
struct timespec avg;
- struct vcpu_args *vcpu_args = (struct vcpu_args *)data;
+ struct perf_test_vcpu_args *vcpu_args = (struct perf_test_vcpu_args *)data;
int vcpu_id = vcpu_args->vcpu_id;
vcpu_args_set(vm, vcpu_id, 1, vcpu_id);
@@ -89,9 +88,15 @@ static void *vcpu_worker(void *data)
return NULL;
}
-static void run_test(enum vm_guest_mode mode, unsigned long iterations,
- uint64_t phys_offset, int wr_fract)
+struct test_params {
+ unsigned long iterations;
+ uint64_t phys_offset;
+ int wr_fract;
+};
+
+static void run_test(enum vm_guest_mode mode, void *arg)
{
+ struct test_params *p = arg;
pthread_t *vcpu_threads;
struct kvm_vm *vm;
unsigned long *bmap;
@@ -106,9 +111,9 @@ static void run_test(enum vm_guest_mode mode, unsigned long iterations,
struct kvm_enable_cap cap = {};
struct timespec clear_dirty_log_total = (struct timespec){0};
- vm = create_vm(mode, nr_vcpus, guest_percpu_mem_size);
+ vm = perf_test_create_vm(mode, nr_vcpus, guest_percpu_mem_size);
- perf_test_args.wr_fract = wr_fract;
+ perf_test_args.wr_fract = p->wr_fract;
guest_num_pages = (nr_vcpus * guest_percpu_mem_size) >> vm_get_page_shift(vm);
guest_num_pages = vm_adjust_num_guest_pages(mode, guest_num_pages);
@@ -124,7 +129,7 @@ static void run_test(enum vm_guest_mode mode, unsigned long iterations,
vcpu_threads = malloc(nr_vcpus * sizeof(*vcpu_threads));
TEST_ASSERT(vcpu_threads, "Memory allocation failed");
- add_vcpus(vm, nr_vcpus, guest_percpu_mem_size);
+ perf_test_setup_vcpus(vm, nr_vcpus, guest_percpu_mem_size);
sync_global_to_guest(vm, perf_test_args);
@@ -150,13 +155,13 @@ static void run_test(enum vm_guest_mode mode, unsigned long iterations,
/* Enable dirty logging */
clock_gettime(CLOCK_MONOTONIC, &start);
- vm_mem_region_set_flags(vm, TEST_MEM_SLOT_INDEX,
+ vm_mem_region_set_flags(vm, PERF_TEST_MEM_SLOT_INDEX,
KVM_MEM_LOG_DIRTY_PAGES);
ts_diff = timespec_diff_now(start);
pr_info("Enabling dirty logging time: %ld.%.9lds\n\n",
ts_diff.tv_sec, ts_diff.tv_nsec);
- while (iteration < iterations) {
+ while (iteration < p->iterations) {
/*
* Incrementing the iteration number will start the vCPUs
* dirtying memory again.
@@ -177,7 +182,7 @@ static void run_test(enum vm_guest_mode mode, unsigned long iterations,
iteration, ts_diff.tv_sec, ts_diff.tv_nsec);
clock_gettime(CLOCK_MONOTONIC, &start);
- kvm_vm_get_dirty_log(vm, TEST_MEM_SLOT_INDEX, bmap);
+ kvm_vm_get_dirty_log(vm, PERF_TEST_MEM_SLOT_INDEX, bmap);
ts_diff = timespec_diff_now(start);
get_dirty_log_total = timespec_add(get_dirty_log_total,
@@ -187,7 +192,7 @@ static void run_test(enum vm_guest_mode mode, unsigned long iterations,
if (dirty_log_manual_caps) {
clock_gettime(CLOCK_MONOTONIC, &start);
- kvm_vm_clear_dirty_log(vm, TEST_MEM_SLOT_INDEX, bmap, 0,
+ kvm_vm_clear_dirty_log(vm, PERF_TEST_MEM_SLOT_INDEX, bmap, 0,
host_num_pages);
ts_diff = timespec_diff_now(start);
@@ -205,43 +210,30 @@ static void run_test(enum vm_guest_mode mode, unsigned long iterations,
/* Disable dirty logging */
clock_gettime(CLOCK_MONOTONIC, &start);
- vm_mem_region_set_flags(vm, TEST_MEM_SLOT_INDEX, 0);
+ vm_mem_region_set_flags(vm, PERF_TEST_MEM_SLOT_INDEX, 0);
ts_diff = timespec_diff_now(start);
pr_info("Disabling dirty logging time: %ld.%.9lds\n",
ts_diff.tv_sec, ts_diff.tv_nsec);
- avg = timespec_div(get_dirty_log_total, iterations);
+ avg = timespec_div(get_dirty_log_total, p->iterations);
pr_info("Get dirty log over %lu iterations took %ld.%.9lds. (Avg %ld.%.9lds/iteration)\n",
- iterations, get_dirty_log_total.tv_sec,
+ p->iterations, get_dirty_log_total.tv_sec,
get_dirty_log_total.tv_nsec, avg.tv_sec, avg.tv_nsec);
if (dirty_log_manual_caps) {
- avg = timespec_div(clear_dirty_log_total, iterations);
+ avg = timespec_div(clear_dirty_log_total, p->iterations);
pr_info("Clear dirty log over %lu iterations took %ld.%.9lds. (Avg %ld.%.9lds/iteration)\n",
- iterations, clear_dirty_log_total.tv_sec,
+ p->iterations, clear_dirty_log_total.tv_sec,
clear_dirty_log_total.tv_nsec, avg.tv_sec, avg.tv_nsec);
}
free(bmap);
free(vcpu_threads);
- ucall_uninit(vm);
- kvm_vm_free(vm);
+ perf_test_destroy_vm(vm);
}
-struct guest_mode {
- bool supported;
- bool enabled;
-};
-static struct guest_mode guest_modes[NUM_VM_MODES];
-
-#define guest_mode_init(mode, supported, enabled) ({ \
- guest_modes[mode] = (struct guest_mode){ supported, enabled }; \
-})
-
static void help(char *name)
{
- int i;
-
puts("");
printf("usage: %s [-h] [-i iterations] [-p offset] "
"[-m mode] [-b vcpu bytes] [-v vcpus]\n", name);
@@ -250,14 +242,7 @@ static void help(char *name)
TEST_HOST_LOOP_N);
printf(" -p: specify guest physical test memory offset\n"
" Warning: a low offset can conflict with the loaded test code.\n");
- printf(" -m: specify the guest mode ID to test "
- "(default: test all supported modes)\n"
- " This option may be used multiple times.\n"
- " Guest mode IDs:\n");
- for (i = 0; i < NUM_VM_MODES; ++i) {
- printf(" %d: %s%s\n", i, vm_guest_mode_string(i),
- guest_modes[i].supported ? " (supported)" : "");
- }
+ guest_modes_help();
printf(" -b: specify the size of the memory region which should be\n"
" dirtied by each vCPU. e.g. 10M or 3G.\n"
" (default: 1G)\n");
@@ -272,74 +257,43 @@ static void help(char *name)
int main(int argc, char *argv[])
{
- unsigned long iterations = TEST_HOST_LOOP_N;
- bool mode_selected = false;
- uint64_t phys_offset = 0;
- unsigned int mode;
- int opt, i;
- int wr_fract = 1;
+ int max_vcpus = kvm_check_cap(KVM_CAP_MAX_VCPUS);
+ struct test_params p = {
+ .iterations = TEST_HOST_LOOP_N,
+ .wr_fract = 1,
+ };
+ int opt;
dirty_log_manual_caps =
kvm_check_cap(KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2);
dirty_log_manual_caps &= (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE |
KVM_DIRTY_LOG_INITIALLY_SET);
-#ifdef __x86_64__
- guest_mode_init(VM_MODE_PXXV48_4K, true, true);
-#endif
-#ifdef __aarch64__
- guest_mode_init(VM_MODE_P40V48_4K, true, true);
- guest_mode_init(VM_MODE_P40V48_64K, true, true);
-
- {
- unsigned int limit = kvm_check_cap(KVM_CAP_ARM_VM_IPA_SIZE);
-
- if (limit >= 52)
- guest_mode_init(VM_MODE_P52V48_64K, true, true);
- if (limit >= 48) {
- guest_mode_init(VM_MODE_P48V48_4K, true, true);
- guest_mode_init(VM_MODE_P48V48_64K, true, true);
- }
- }
-#endif
-#ifdef __s390x__
- guest_mode_init(VM_MODE_P40V48_4K, true, true);
-#endif
+ guest_modes_append_default();
while ((opt = getopt(argc, argv, "hi:p:m:b:f:v:")) != -1) {
switch (opt) {
case 'i':
- iterations = strtol(optarg, NULL, 10);
+ p.iterations = strtol(optarg, NULL, 10);
break;
case 'p':
- phys_offset = strtoull(optarg, NULL, 0);
+ p.phys_offset = strtoull(optarg, NULL, 0);
break;
case 'm':
- if (!mode_selected) {
- for (i = 0; i < NUM_VM_MODES; ++i)
- guest_modes[i].enabled = false;
- mode_selected = true;
- }
- mode = strtoul(optarg, NULL, 10);
- TEST_ASSERT(mode < NUM_VM_MODES,
- "Guest mode ID %d too big", mode);
- guest_modes[mode].enabled = true;
+ guest_modes_cmdline(optarg);
break;
case 'b':
guest_percpu_mem_size = parse_size(optarg);
break;
case 'f':
- wr_fract = atoi(optarg);
- TEST_ASSERT(wr_fract >= 1,
+ p.wr_fract = atoi(optarg);
+ TEST_ASSERT(p.wr_fract >= 1,
"Write fraction cannot be less than one");
break;
case 'v':
nr_vcpus = atoi(optarg);
- TEST_ASSERT(nr_vcpus > 0,
- "Must have a positive number of vCPUs");
- TEST_ASSERT(nr_vcpus <= MAX_VCPUS,
- "This test does not currently support\n"
- "more than %d vCPUs.", MAX_VCPUS);
+ TEST_ASSERT(nr_vcpus > 0 && nr_vcpus <= max_vcpus,
+ "Invalid number of vcpus, must be between 1 and %d", max_vcpus);
break;
case 'h':
default:
@@ -348,18 +302,11 @@ int main(int argc, char *argv[])
}
}
- TEST_ASSERT(iterations >= 2, "The test should have at least two iterations");
+ TEST_ASSERT(p.iterations >= 2, "The test should have at least two iterations");
- pr_info("Test iterations: %"PRIu64"\n", iterations);
+ pr_info("Test iterations: %"PRIu64"\n", p.iterations);
- for (i = 0; i < NUM_VM_MODES; ++i) {
- if (!guest_modes[i].enabled)
- continue;
- TEST_ASSERT(guest_modes[i].supported,
- "Guest mode ID %d (%s) not supported.",
- i, vm_guest_mode_string(i));
- run_test(i, iterations, phys_offset, wr_fract);
- }
+ for_each_guest_mode(run_test, &p);
return 0;
}
diff --git a/tools/testing/selftests/kvm/dirty_log_test.c b/tools/testing/selftests/kvm/dirty_log_test.c
index 471baecb7772..bb2752d78fe3 100644
--- a/tools/testing/selftests/kvm/dirty_log_test.c
+++ b/tools/testing/selftests/kvm/dirty_log_test.c
@@ -9,8 +9,6 @@
#include <stdio.h>
#include <stdlib.h>
-#include <unistd.h>
-#include <time.h>
#include <pthread.h>
#include <semaphore.h>
#include <sys/types.h>
@@ -20,8 +18,9 @@
#include <linux/bitops.h>
#include <asm/barrier.h>
-#include "test_util.h"
#include "kvm_util.h"
+#include "test_util.h"
+#include "guest_modes.h"
#include "processor.h"
#define VCPU_ID 1
@@ -673,9 +672,15 @@ static struct kvm_vm *create_vm(enum vm_guest_mode mode, uint32_t vcpuid,
#define DIRTY_MEM_BITS 30 /* 1G */
#define PAGE_SHIFT_4K 12
-static void run_test(enum vm_guest_mode mode, unsigned long iterations,
- unsigned long interval, uint64_t phys_offset)
+struct test_params {
+ unsigned long iterations;
+ unsigned long interval;
+ uint64_t phys_offset;
+};
+
+static void run_test(enum vm_guest_mode mode, void *arg)
{
+ struct test_params *p = arg;
struct kvm_vm *vm;
unsigned long *bmap;
@@ -709,12 +714,12 @@ static void run_test(enum vm_guest_mode mode, unsigned long iterations,
host_page_size = getpagesize();
host_num_pages = vm_num_host_pages(mode, guest_num_pages);
- if (!phys_offset) {
+ if (!p->phys_offset) {
guest_test_phys_mem = (vm_get_max_gfn(vm) -
guest_num_pages) * guest_page_size;
guest_test_phys_mem &= ~(host_page_size - 1);
} else {
- guest_test_phys_mem = phys_offset;
+ guest_test_phys_mem = p->phys_offset;
}
#ifdef __s390x__
@@ -758,9 +763,9 @@ static void run_test(enum vm_guest_mode mode, unsigned long iterations,
pthread_create(&vcpu_thread, NULL, vcpu_worker, vm);
- while (iteration < iterations) {
+ while (iteration < p->iterations) {
/* Give the vcpu thread some time to dirty some pages */
- usleep(interval * 1000);
+ usleep(p->interval * 1000);
log_mode_collect_dirty_pages(vm, TEST_MEM_SLOT_INDEX,
bmap, host_num_pages);
vm_dirty_log_verify(mode, bmap);
@@ -783,20 +788,8 @@ static void run_test(enum vm_guest_mode mode, unsigned long iterations,
kvm_vm_free(vm);
}
-struct guest_mode {
- bool supported;
- bool enabled;
-};
-static struct guest_mode guest_modes[NUM_VM_MODES];
-
-#define guest_mode_init(mode, supported, enabled) ({ \
- guest_modes[mode] = (struct guest_mode){ supported, enabled }; \
-})
-
static void help(char *name)
{
- int i;
-
puts("");
printf("usage: %s [-h] [-i iterations] [-I interval] "
"[-p offset] [-m mode]\n", name);
@@ -813,51 +806,23 @@ static void help(char *name)
printf(" -M: specify the host logging mode "
"(default: run all log modes). Supported modes: \n\t");
log_modes_dump();
- printf(" -m: specify the guest mode ID to test "
- "(default: test all supported modes)\n"
- " This option may be used multiple times.\n"
- " Guest mode IDs:\n");
- for (i = 0; i < NUM_VM_MODES; ++i) {
- printf(" %d: %s%s\n", i, vm_guest_mode_string(i),
- guest_modes[i].supported ? " (supported)" : "");
- }
+ guest_modes_help();
puts("");
exit(0);
}
int main(int argc, char *argv[])
{
- unsigned long iterations = TEST_HOST_LOOP_N;
- unsigned long interval = TEST_HOST_LOOP_INTERVAL;
- bool mode_selected = false;
- uint64_t phys_offset = 0;
- unsigned int mode;
- int opt, i, j;
+ struct test_params p = {
+ .iterations = TEST_HOST_LOOP_N,
+ .interval = TEST_HOST_LOOP_INTERVAL,
+ };
+ int opt, i;
sem_init(&dirty_ring_vcpu_stop, 0, 0);
sem_init(&dirty_ring_vcpu_cont, 0, 0);
-#ifdef __x86_64__
- guest_mode_init(VM_MODE_PXXV48_4K, true, true);
-#endif
-#ifdef __aarch64__
- guest_mode_init(VM_MODE_P40V48_4K, true, true);
- guest_mode_init(VM_MODE_P40V48_64K, true, true);
-
- {
- unsigned int limit = kvm_check_cap(KVM_CAP_ARM_VM_IPA_SIZE);
-
- if (limit >= 52)
- guest_mode_init(VM_MODE_P52V48_64K, true, true);
- if (limit >= 48) {
- guest_mode_init(VM_MODE_P48V48_4K, true, true);
- guest_mode_init(VM_MODE_P48V48_64K, true, true);
- }
- }
-#endif
-#ifdef __s390x__
- guest_mode_init(VM_MODE_P40V48_4K, true, true);
-#endif
+ guest_modes_append_default();
while ((opt = getopt(argc, argv, "c:hi:I:p:m:M:")) != -1) {
switch (opt) {
@@ -865,24 +830,16 @@ int main(int argc, char *argv[])
test_dirty_ring_count = strtol(optarg, NULL, 10);
break;
case 'i':
- iterations = strtol(optarg, NULL, 10);
+ p.iterations = strtol(optarg, NULL, 10);
break;
case 'I':
- interval = strtol(optarg, NULL, 10);
+ p.interval = strtol(optarg, NULL, 10);
break;
case 'p':
- phys_offset = strtoull(optarg, NULL, 0);
+ p.phys_offset = strtoull(optarg, NULL, 0);
break;
case 'm':
- if (!mode_selected) {
- for (i = 0; i < NUM_VM_MODES; ++i)
- guest_modes[i].enabled = false;
- mode_selected = true;
- }
- mode = strtoul(optarg, NULL, 10);
- TEST_ASSERT(mode < NUM_VM_MODES,
- "Guest mode ID %d too big", mode);
- guest_modes[mode].enabled = true;
+ guest_modes_cmdline(optarg);
break;
case 'M':
if (!strcmp(optarg, "all")) {
@@ -911,32 +868,24 @@ int main(int argc, char *argv[])
}
}
- TEST_ASSERT(iterations > 2, "Iterations must be greater than two");
- TEST_ASSERT(interval > 0, "Interval must be greater than zero");
+ TEST_ASSERT(p.iterations > 2, "Iterations must be greater than two");
+ TEST_ASSERT(p.interval > 0, "Interval must be greater than zero");
pr_info("Test iterations: %"PRIu64", interval: %"PRIu64" (ms)\n",
- iterations, interval);
+ p.iterations, p.interval);
srandom(time(0));
- for (i = 0; i < NUM_VM_MODES; ++i) {
- if (!guest_modes[i].enabled)
- continue;
- TEST_ASSERT(guest_modes[i].supported,
- "Guest mode ID %d (%s) not supported.",
- i, vm_guest_mode_string(i));
- if (host_log_mode_option == LOG_MODE_ALL) {
- /* Run each log mode */
- for (j = 0; j < LOG_MODE_NUM; j++) {
- pr_info("Testing Log Mode '%s'\n",
- log_modes[j].name);
- host_log_mode = j;
- run_test(i, iterations, interval, phys_offset);
- }
- } else {
- host_log_mode = host_log_mode_option;
- run_test(i, iterations, interval, phys_offset);
+ if (host_log_mode_option == LOG_MODE_ALL) {
+ /* Run each log mode */
+ for (i = 0; i < LOG_MODE_NUM; i++) {
+ pr_info("Testing Log Mode '%s'\n", log_modes[i].name);
+ host_log_mode = i;
+ for_each_guest_mode(run_test, &p);
}
+ } else {
+ host_log_mode = host_log_mode_option;
+ for_each_guest_mode(run_test, &p);
}
return 0;
diff --git a/tools/testing/selftests/kvm/include/guest_modes.h b/tools/testing/selftests/kvm/include/guest_modes.h
new file mode 100644
index 000000000000..b691df33e64e
--- /dev/null
+++ b/tools/testing/selftests/kvm/include/guest_modes.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020, Red Hat, Inc.
+ */
+#include "kvm_util.h"
+
+struct guest_mode {
+ bool supported;
+ bool enabled;
+};
+
+extern struct guest_mode guest_modes[NUM_VM_MODES];
+
+#define guest_mode_append(mode, supported, enabled) ({ \
+ guest_modes[mode] = (struct guest_mode){ supported, enabled }; \
+})
+
+void guest_modes_append_default(void);
+void for_each_guest_mode(void (*func)(enum vm_guest_mode, void *), void *arg);
+void guest_modes_help(void);
+void guest_modes_cmdline(const char *arg);
diff --git a/tools/testing/selftests/kvm/include/kvm_util.h b/tools/testing/selftests/kvm/include/kvm_util.h
index dfa9d369e8fc..5cbb861525ed 100644
--- a/tools/testing/selftests/kvm/include/kvm_util.h
+++ b/tools/testing/selftests/kvm/include/kvm_util.h
@@ -16,6 +16,7 @@
#include "sparsebit.h"
+#define KVM_MAX_VCPUS 512
/*
* Callers of kvm_util only have an incomplete/opaque description of the
@@ -70,6 +71,14 @@ enum vm_guest_mode {
#define vm_guest_mode_string(m) vm_guest_mode_string[m]
extern const char * const vm_guest_mode_string[];
+struct vm_guest_mode_params {
+ unsigned int pa_bits;
+ unsigned int va_bits;
+ unsigned int page_size;
+ unsigned int page_shift;
+};
+extern const struct vm_guest_mode_params vm_guest_mode_params[];
+
enum vm_mem_backing_src_type {
VM_MEM_SRC_ANONYMOUS,
VM_MEM_SRC_ANONYMOUS_THP,
diff --git a/tools/testing/selftests/kvm/include/perf_test_util.h b/tools/testing/selftests/kvm/include/perf_test_util.h
index 239421e4f6b8..b1188823c31b 100644
--- a/tools/testing/selftests/kvm/include/perf_test_util.h
+++ b/tools/testing/selftests/kvm/include/perf_test_util.h
@@ -9,38 +9,15 @@
#define SELFTEST_KVM_PERF_TEST_UTIL_H
#include "kvm_util.h"
-#include "processor.h"
-
-#define MAX_VCPUS 512
-
-#define PAGE_SHIFT_4K 12
-#define PTES_PER_4K_PT 512
-
-#define TEST_MEM_SLOT_INDEX 1
/* Default guest test virtual memory offset */
#define DEFAULT_GUEST_TEST_MEM 0xc0000000
#define DEFAULT_PER_VCPU_MEM_SIZE (1 << 30) /* 1G */
-/*
- * Guest physical memory offset of the testing memory slot.
- * This will be set to the topmost valid physical address minus
- * the test memory size.
- */
-static uint64_t guest_test_phys_mem;
-
-/*
- * Guest virtual memory offset of the testing memory slot.
- * Must not conflict with identity mapped test code.
- */
-static uint64_t guest_test_virt_mem = DEFAULT_GUEST_TEST_MEM;
-static uint64_t guest_percpu_mem_size = DEFAULT_PER_VCPU_MEM_SIZE;
-
-/* Number of VCPUs for the test */
-static int nr_vcpus = 1;
+#define PERF_TEST_MEM_SLOT_INDEX 1
-struct vcpu_args {
+struct perf_test_vcpu_args {
uint64_t gva;
uint64_t pages;
@@ -54,141 +31,21 @@ struct perf_test_args {
uint64_t guest_page_size;
int wr_fract;
- struct vcpu_args vcpu_args[MAX_VCPUS];
+ struct perf_test_vcpu_args vcpu_args[KVM_MAX_VCPUS];
};
-static struct perf_test_args perf_test_args;
+extern struct perf_test_args perf_test_args;
/*
- * Continuously write to the first 8 bytes of each page in the
- * specified region.
+ * Guest physical memory offset of the testing memory slot.
+ * This will be set to the topmost valid physical address minus
+ * the test memory size.
*/
-static void guest_code(uint32_t vcpu_id)
-{
- struct vcpu_args *vcpu_args = &perf_test_args.vcpu_args[vcpu_id];
- uint64_t gva;
- uint64_t pages;
- int i;
-
- /* Make sure vCPU args data structure is not corrupt. */
- GUEST_ASSERT(vcpu_args->vcpu_id == vcpu_id);
-
- gva = vcpu_args->gva;
- pages = vcpu_args->pages;
-
- while (true) {
- for (i = 0; i < pages; i++) {
- uint64_t addr = gva + (i * perf_test_args.guest_page_size);
-
- if (i % perf_test_args.wr_fract == 0)
- *(uint64_t *)addr = 0x0123456789ABCDEF;
- else
- READ_ONCE(*(uint64_t *)addr);
- }
-
- GUEST_SYNC(1);
- }
-}
-
-static struct kvm_vm *create_vm(enum vm_guest_mode mode, int vcpus,
- uint64_t vcpu_memory_bytes)
-{
- struct kvm_vm *vm;
- uint64_t pages = DEFAULT_GUEST_PHY_PAGES;
- uint64_t guest_num_pages;
-
- /* Account for a few pages per-vCPU for stacks */
- pages += DEFAULT_STACK_PGS * vcpus;
-
- /*
- * Reserve twice the ammount of memory needed to map the test region and
- * the page table / stacks region, at 4k, for page tables. Do the
- * calculation with 4K page size: the smallest of all archs. (e.g., 64K
- * page size guest will need even less memory for page tables).
- */
- pages += (2 * pages) / PTES_PER_4K_PT;
- pages += ((2 * vcpus * vcpu_memory_bytes) >> PAGE_SHIFT_4K) /
- PTES_PER_4K_PT;
- pages = vm_adjust_num_guest_pages(mode, pages);
-
- pr_info("Testing guest mode: %s\n", vm_guest_mode_string(mode));
-
- vm = vm_create(mode, pages, O_RDWR);
- kvm_vm_elf_load(vm, program_invocation_name, 0, 0);
-#ifdef __x86_64__
- vm_create_irqchip(vm);
-#endif
-
- perf_test_args.vm = vm;
- perf_test_args.guest_page_size = vm_get_page_size(vm);
- perf_test_args.host_page_size = getpagesize();
-
- TEST_ASSERT(vcpu_memory_bytes % perf_test_args.guest_page_size == 0,
- "Guest memory size is not guest page size aligned.");
-
- guest_num_pages = (vcpus * vcpu_memory_bytes) /
- perf_test_args.guest_page_size;
- guest_num_pages = vm_adjust_num_guest_pages(mode, guest_num_pages);
-
- /*
- * If there should be more memory in the guest test region than there
- * can be pages in the guest, it will definitely cause problems.
- */
- TEST_ASSERT(guest_num_pages < vm_get_max_gfn(vm),
- "Requested more guest memory than address space allows.\n"
- " guest pages: %lx max gfn: %x vcpus: %d wss: %lx]\n",
- guest_num_pages, vm_get_max_gfn(vm), vcpus,
- vcpu_memory_bytes);
-
- TEST_ASSERT(vcpu_memory_bytes % perf_test_args.host_page_size == 0,
- "Guest memory size is not host page size aligned.");
-
- guest_test_phys_mem = (vm_get_max_gfn(vm) - guest_num_pages) *
- perf_test_args.guest_page_size;
- guest_test_phys_mem &= ~(perf_test_args.host_page_size - 1);
-
-#ifdef __s390x__
- /* Align to 1M (segment size) */
- guest_test_phys_mem &= ~((1 << 20) - 1);
-#endif
-
- pr_info("guest physical test memory offset: 0x%lx\n", guest_test_phys_mem);
-
- /* Add an extra memory slot for testing */
- vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS,
- guest_test_phys_mem,
- TEST_MEM_SLOT_INDEX,
- guest_num_pages, 0);
-
- /* Do mapping for the demand paging memory slot */
- virt_map(vm, guest_test_virt_mem, guest_test_phys_mem, guest_num_pages, 0);
-
- ucall_init(vm, NULL);
-
- return vm;
-}
-
-static void add_vcpus(struct kvm_vm *vm, int vcpus, uint64_t vcpu_memory_bytes)
-{
- vm_paddr_t vcpu_gpa;
- struct vcpu_args *vcpu_args;
- int vcpu_id;
-
- for (vcpu_id = 0; vcpu_id < vcpus; vcpu_id++) {
- vcpu_args = &perf_test_args.vcpu_args[vcpu_id];
-
- vm_vcpu_add_default(vm, vcpu_id, guest_code);
-
- vcpu_args->vcpu_id = vcpu_id;
- vcpu_args->gva = guest_test_virt_mem +
- (vcpu_id * vcpu_memory_bytes);
- vcpu_args->pages = vcpu_memory_bytes /
- perf_test_args.guest_page_size;
+extern uint64_t guest_test_phys_mem;
- vcpu_gpa = guest_test_phys_mem + (vcpu_id * vcpu_memory_bytes);
- pr_debug("Added VCPU %d with test mem gpa [%lx, %lx)\n",
- vcpu_id, vcpu_gpa, vcpu_gpa + vcpu_memory_bytes);
- }
-}
+struct kvm_vm *perf_test_create_vm(enum vm_guest_mode mode, int vcpus,
+ uint64_t vcpu_memory_bytes);
+void perf_test_destroy_vm(struct kvm_vm *vm);
+void perf_test_setup_vcpus(struct kvm_vm *vm, int vcpus, uint64_t vcpu_memory_bytes);
#endif /* SELFTEST_KVM_PERF_TEST_UTIL_H */
diff --git a/tools/testing/selftests/kvm/lib/guest_modes.c b/tools/testing/selftests/kvm/lib/guest_modes.c
new file mode 100644
index 000000000000..25bff307c71f
--- /dev/null
+++ b/tools/testing/selftests/kvm/lib/guest_modes.c
@@ -0,0 +1,70 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020, Red Hat, Inc.
+ */
+#include "guest_modes.h"
+
+struct guest_mode guest_modes[NUM_VM_MODES];
+
+void guest_modes_append_default(void)
+{
+ guest_mode_append(VM_MODE_DEFAULT, true, true);
+
+#ifdef __aarch64__
+ guest_mode_append(VM_MODE_P40V48_64K, true, true);
+ {
+ unsigned int limit = kvm_check_cap(KVM_CAP_ARM_VM_IPA_SIZE);
+ if (limit >= 52)
+ guest_mode_append(VM_MODE_P52V48_64K, true, true);
+ if (limit >= 48) {
+ guest_mode_append(VM_MODE_P48V48_4K, true, true);
+ guest_mode_append(VM_MODE_P48V48_64K, true, true);
+ }
+ }
+#endif
+}
+
+void for_each_guest_mode(void (*func)(enum vm_guest_mode, void *), void *arg)
+{
+ int i;
+
+ for (i = 0; i < NUM_VM_MODES; ++i) {
+ if (!guest_modes[i].enabled)
+ continue;
+ TEST_ASSERT(guest_modes[i].supported,
+ "Guest mode ID %d (%s) not supported.",
+ i, vm_guest_mode_string(i));
+ func(i, arg);
+ }
+}
+
+void guest_modes_help(void)
+{
+ int i;
+
+ printf(" -m: specify the guest mode ID to test\n"
+ " (default: test all supported modes)\n"
+ " This option may be used multiple times.\n"
+ " Guest mode IDs:\n");
+ for (i = 0; i < NUM_VM_MODES; ++i) {
+ printf(" %d: %s%s\n", i, vm_guest_mode_string(i),
+ guest_modes[i].supported ? " (supported)" : "");
+ }
+}
+
+void guest_modes_cmdline(const char *arg)
+{
+ static bool mode_selected;
+ unsigned int mode;
+ int i;
+
+ if (!mode_selected) {
+ for (i = 0; i < NUM_VM_MODES; ++i)
+ guest_modes[i].enabled = false;
+ mode_selected = true;
+ }
+
+ mode = strtoul(optarg, NULL, 10);
+ TEST_ASSERT(mode < NUM_VM_MODES, "Guest mode ID %d too big", mode);
+ guest_modes[mode].enabled = true;
+}
diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c
index 88ef7067f1e6..fa5a90e6c6f0 100644
--- a/tools/testing/selftests/kvm/lib/kvm_util.c
+++ b/tools/testing/selftests/kvm/lib/kvm_util.c
@@ -153,14 +153,7 @@ const char * const vm_guest_mode_string[] = {
_Static_assert(sizeof(vm_guest_mode_string)/sizeof(char *) == NUM_VM_MODES,
"Missing new mode strings?");
-struct vm_guest_mode_params {
- unsigned int pa_bits;
- unsigned int va_bits;
- unsigned int page_size;
- unsigned int page_shift;
-};
-
-static const struct vm_guest_mode_params vm_guest_mode_params[] = {
+const struct vm_guest_mode_params vm_guest_mode_params[] = {
{ 52, 48, 0x1000, 12 },
{ 52, 48, 0x10000, 16 },
{ 48, 48, 0x1000, 12 },
diff --git a/tools/testing/selftests/kvm/lib/perf_test_util.c b/tools/testing/selftests/kvm/lib/perf_test_util.c
new file mode 100644
index 000000000000..9be1944c2d1c
--- /dev/null
+++ b/tools/testing/selftests/kvm/lib/perf_test_util.c
@@ -0,0 +1,134 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020, Google LLC.
+ */
+
+#include "kvm_util.h"
+#include "perf_test_util.h"
+#include "processor.h"
+
+struct perf_test_args perf_test_args;
+
+uint64_t guest_test_phys_mem;
+
+/*
+ * Guest virtual memory offset of the testing memory slot.
+ * Must not conflict with identity mapped test code.
+ */
+static uint64_t guest_test_virt_mem = DEFAULT_GUEST_TEST_MEM;
+
+/*
+ * Continuously write to the first 8 bytes of each page in the
+ * specified region.
+ */
+static void guest_code(uint32_t vcpu_id)
+{
+ struct perf_test_vcpu_args *vcpu_args = &perf_test_args.vcpu_args[vcpu_id];
+ uint64_t gva;
+ uint64_t pages;
+ int i;
+
+ /* Make sure vCPU args data structure is not corrupt. */
+ GUEST_ASSERT(vcpu_args->vcpu_id == vcpu_id);
+
+ gva = vcpu_args->gva;
+ pages = vcpu_args->pages;
+
+ while (true) {
+ for (i = 0; i < pages; i++) {
+ uint64_t addr = gva + (i * perf_test_args.guest_page_size);
+
+ if (i % perf_test_args.wr_fract == 0)
+ *(uint64_t *)addr = 0x0123456789ABCDEF;
+ else
+ READ_ONCE(*(uint64_t *)addr);
+ }
+
+ GUEST_SYNC(1);
+ }
+}
+
+struct kvm_vm *perf_test_create_vm(enum vm_guest_mode mode, int vcpus,
+ uint64_t vcpu_memory_bytes)
+{
+ struct kvm_vm *vm;
+ uint64_t guest_num_pages;
+
+ pr_info("Testing guest mode: %s\n", vm_guest_mode_string(mode));
+
+ perf_test_args.host_page_size = getpagesize();
+ perf_test_args.guest_page_size = vm_guest_mode_params[mode].page_size;
+
+ guest_num_pages = vm_adjust_num_guest_pages(mode,
+ (vcpus * vcpu_memory_bytes) / perf_test_args.guest_page_size);
+
+ TEST_ASSERT(vcpu_memory_bytes % perf_test_args.host_page_size == 0,
+ "Guest memory size is not host page size aligned.");
+ TEST_ASSERT(vcpu_memory_bytes % perf_test_args.guest_page_size == 0,
+ "Guest memory size is not guest page size aligned.");
+
+ vm = vm_create_with_vcpus(mode, vcpus,
+ (vcpus * vcpu_memory_bytes) / perf_test_args.guest_page_size,
+ 0, guest_code, NULL);
+
+ perf_test_args.vm = vm;
+
+ /*
+ * If there should be more memory in the guest test region than there
+ * can be pages in the guest, it will definitely cause problems.
+ */
+ TEST_ASSERT(guest_num_pages < vm_get_max_gfn(vm),
+ "Requested more guest memory than address space allows.\n"
+ " guest pages: %lx max gfn: %x vcpus: %d wss: %lx]\n",
+ guest_num_pages, vm_get_max_gfn(vm), vcpus,
+ vcpu_memory_bytes);
+
+ guest_test_phys_mem = (vm_get_max_gfn(vm) - guest_num_pages) *
+ perf_test_args.guest_page_size;
+ guest_test_phys_mem &= ~(perf_test_args.host_page_size - 1);
+#ifdef __s390x__
+ /* Align to 1M (segment size) */
+ guest_test_phys_mem &= ~((1 << 20) - 1);
+#endif
+ pr_info("guest physical test memory offset: 0x%lx\n", guest_test_phys_mem);
+
+ /* Add an extra memory slot for testing */
+ vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS,
+ guest_test_phys_mem,
+ PERF_TEST_MEM_SLOT_INDEX,
+ guest_num_pages, 0);
+
+ /* Do mapping for the demand paging memory slot */
+ virt_map(vm, guest_test_virt_mem, guest_test_phys_mem, guest_num_pages, 0);
+
+ ucall_init(vm, NULL);
+
+ return vm;
+}
+
+void perf_test_destroy_vm(struct kvm_vm *vm)
+{
+ ucall_uninit(vm);
+ kvm_vm_free(vm);
+}
+
+void perf_test_setup_vcpus(struct kvm_vm *vm, int vcpus, uint64_t vcpu_memory_bytes)
+{
+ vm_paddr_t vcpu_gpa;
+ struct perf_test_vcpu_args *vcpu_args;
+ int vcpu_id;
+
+ for (vcpu_id = 0; vcpu_id < vcpus; vcpu_id++) {
+ vcpu_args = &perf_test_args.vcpu_args[vcpu_id];
+
+ vcpu_args->vcpu_id = vcpu_id;
+ vcpu_args->gva = guest_test_virt_mem +
+ (vcpu_id * vcpu_memory_bytes);
+ vcpu_args->pages = vcpu_memory_bytes /
+ perf_test_args.guest_page_size;
+
+ vcpu_gpa = guest_test_phys_mem + (vcpu_id * vcpu_memory_bytes);
+ pr_debug("Added VCPU %d with test mem gpa [%lx, %lx)\n",
+ vcpu_id, vcpu_gpa, vcpu_gpa + vcpu_memory_bytes);
+ }
+}
diff --git a/tools/testing/selftests/net/fib_nexthops.sh b/tools/testing/selftests/net/fib_nexthops.sh
index eb693a3b7b4a..4c7d33618437 100755
--- a/tools/testing/selftests/net/fib_nexthops.sh
+++ b/tools/testing/selftests/net/fib_nexthops.sh
@@ -869,7 +869,7 @@ ipv6_torture()
pid3=$!
ip netns exec me ping -f 2001:db8:101::2 >/dev/null 2>&1 &
pid4=$!
- ip netns exec me mausezahn veth1 -B 2001:db8:101::2 -A 2001:db8:91::1 -c 0 -t tcp "dp=1-1023, flags=syn" >/dev/null 2>&1 &
+ ip netns exec me mausezahn -6 veth1 -B 2001:db8:101::2 -A 2001:db8:91::1 -c 0 -t tcp "dp=1-1023, flags=syn" >/dev/null 2>&1 &
pid5=$!
sleep 300
diff --git a/tools/testing/selftests/net/fib_tests.sh b/tools/testing/selftests/net/fib_tests.sh
index 84205c3a55eb..2b5707738609 100755
--- a/tools/testing/selftests/net/fib_tests.sh
+++ b/tools/testing/selftests/net/fib_tests.sh
@@ -1055,7 +1055,6 @@ ipv6_addr_metric_test()
check_route6 "2001:db8:104::1 dev dummy2 proto kernel metric 260"
log_test $? 0 "Set metric with peer route on local side"
- log_test $? 0 "User specified metric on local address"
check_route6 "2001:db8:104::2 dev dummy2 proto kernel metric 260"
log_test $? 0 "Set metric with peer route on peer side"
diff --git a/tools/testing/selftests/net/pmtu.sh b/tools/testing/selftests/net/pmtu.sh
index 464e31eabc73..64cd2e23c568 100755
--- a/tools/testing/selftests/net/pmtu.sh
+++ b/tools/testing/selftests/net/pmtu.sh
@@ -162,7 +162,15 @@
# - list_flush_ipv6_exception
# Using the same topology as in pmtu_ipv6, create exceptions, and check
# they are shown when listing exception caches, gone after flushing them
-
+#
+# - pmtu_ipv4_route_change
+# Use the same topology as in pmtu_ipv4, but issue a route replacement
+# command and delete the corresponding device afterward. This tests for
+# proper cleanup of the PMTU exceptions by the route replacement path.
+# Device unregistration should complete successfully
+#
+# - pmtu_ipv6_route_change
+# Same as above but with IPv6
# Kselftest framework requirement - SKIP code is 4.
ksft_skip=4
@@ -224,7 +232,9 @@ tests="
cleanup_ipv4_exception ipv4: cleanup of cached exceptions 1
cleanup_ipv6_exception ipv6: cleanup of cached exceptions 1
list_flush_ipv4_exception ipv4: list and flush cached exceptions 1
- list_flush_ipv6_exception ipv6: list and flush cached exceptions 1"
+ list_flush_ipv6_exception ipv6: list and flush cached exceptions 1
+ pmtu_ipv4_route_change ipv4: PMTU exception w/route replace 1
+ pmtu_ipv6_route_change ipv6: PMTU exception w/route replace 1"
NS_A="ns-A"
NS_B="ns-B"
@@ -1782,6 +1792,63 @@ test_list_flush_ipv6_exception() {
return ${fail}
}
+test_pmtu_ipvX_route_change() {
+ family=${1}
+
+ setup namespaces routing || return 2
+ trace "${ns_a}" veth_A-R1 "${ns_r1}" veth_R1-A \
+ "${ns_r1}" veth_R1-B "${ns_b}" veth_B-R1 \
+ "${ns_a}" veth_A-R2 "${ns_r2}" veth_R2-A \
+ "${ns_r2}" veth_R2-B "${ns_b}" veth_B-R2
+
+ if [ ${family} -eq 4 ]; then
+ ping=ping
+ dst1="${prefix4}.${b_r1}.1"
+ dst2="${prefix4}.${b_r2}.1"
+ gw="${prefix4}.${a_r1}.2"
+ else
+ ping=${ping6}
+ dst1="${prefix6}:${b_r1}::1"
+ dst2="${prefix6}:${b_r2}::1"
+ gw="${prefix6}:${a_r1}::2"
+ fi
+
+ # Set up initial MTU values
+ mtu "${ns_a}" veth_A-R1 2000
+ mtu "${ns_r1}" veth_R1-A 2000
+ mtu "${ns_r1}" veth_R1-B 1400
+ mtu "${ns_b}" veth_B-R1 1400
+
+ mtu "${ns_a}" veth_A-R2 2000
+ mtu "${ns_r2}" veth_R2-A 2000
+ mtu "${ns_r2}" veth_R2-B 1500
+ mtu "${ns_b}" veth_B-R2 1500
+
+ # Create route exceptions
+ run_cmd ${ns_a} ${ping} -q -M want -i 0.1 -w 1 -s 1800 ${dst1}
+ run_cmd ${ns_a} ${ping} -q -M want -i 0.1 -w 1 -s 1800 ${dst2}
+
+ # Check that exceptions have been created with the correct PMTU
+ pmtu_1="$(route_get_dst_pmtu_from_exception "${ns_a}" ${dst1})"
+ check_pmtu_value "1400" "${pmtu_1}" "exceeding MTU" || return 1
+ pmtu_2="$(route_get_dst_pmtu_from_exception "${ns_a}" ${dst2})"
+ check_pmtu_value "1500" "${pmtu_2}" "exceeding MTU" || return 1
+
+ # Replace the route from A to R1
+ run_cmd ${ns_a} ip route change default via ${gw}
+
+ # Delete the device in A
+ run_cmd ${ns_a} ip link del "veth_A-R1"
+}
+
+test_pmtu_ipv4_route_change() {
+ test_pmtu_ipvX_route_change 4
+}
+
+test_pmtu_ipv6_route_change() {
+ test_pmtu_ipvX_route_change 6
+}
+
usage() {
echo
echo "$0 [OPTIONS] [TEST]..."
diff --git a/tools/testing/selftests/net/tls.c b/tools/testing/selftests/net/tls.c
index cb0d1890a860..e0088c2d38a5 100644
--- a/tools/testing/selftests/net/tls.c
+++ b/tools/testing/selftests/net/tls.c
@@ -103,8 +103,8 @@ FIXTURE(tls)
FIXTURE_VARIANT(tls)
{
- u16 tls_version;
- u16 cipher_type;
+ uint16_t tls_version;
+ uint16_t cipher_type;
};
FIXTURE_VARIANT_ADD(tls, 12_gcm)
diff --git a/tools/testing/selftests/net/udpgro.sh b/tools/testing/selftests/net/udpgro.sh
index ac2a30be9b32..f8a19f548ae9 100755
--- a/tools/testing/selftests/net/udpgro.sh
+++ b/tools/testing/selftests/net/udpgro.sh
@@ -5,6 +5,14 @@
readonly PEER_NS="ns-peer-$(mktemp -u XXXXXX)"
+# set global exit status, but never reset nonzero one.
+check_err()
+{
+ if [ $ret -eq 0 ]; then
+ ret=$1
+ fi
+}
+
cleanup() {
local -r jobs="$(jobs -p)"
local -r ns="$(ip netns list|grep $PEER_NS)"
@@ -44,7 +52,9 @@ run_one() {
# Hack: let bg programs complete the startup
sleep 0.1
./udpgso_bench_tx ${tx_args}
+ ret=$?
wait $(jobs -p)
+ return $ret
}
run_test() {
@@ -87,8 +97,10 @@ run_one_nat() {
sleep 0.1
./udpgso_bench_tx ${tx_args}
+ ret=$?
kill -INT $pid
wait $(jobs -p)
+ return $ret
}
run_one_2sock() {
@@ -110,7 +122,9 @@ run_one_2sock() {
sleep 0.1
# first UDP GSO socket should be closed at this point
./udpgso_bench_tx ${tx_args}
+ ret=$?
wait $(jobs -p)
+ return $ret
}
run_nat_test() {
@@ -131,36 +145,54 @@ run_all() {
local -r core_args="-l 4"
local -r ipv4_args="${core_args} -4 -D 192.168.1.1"
local -r ipv6_args="${core_args} -6 -D 2001:db8::1"
+ ret=0
echo "ipv4"
run_test "no GRO" "${ipv4_args} -M 10 -s 1400" "-4 -n 10 -l 1400"
+ check_err $?
# explicitly check we are not receiving UDP_SEGMENT cmsg (-S -1)
# when GRO does not take place
run_test "no GRO chk cmsg" "${ipv4_args} -M 10 -s 1400" "-4 -n 10 -l 1400 -S -1"
+ check_err $?
# the GSO packets are aggregated because:
# * veth schedule napi after each xmit
# * segmentation happens in BH context, veth napi poll is delayed after
# the transmission of the last segment
run_test "GRO" "${ipv4_args} -M 1 -s 14720 -S 0 " "-4 -n 1 -l 14720"
+ check_err $?
run_test "GRO chk cmsg" "${ipv4_args} -M 1 -s 14720 -S 0 " "-4 -n 1 -l 14720 -S 1472"
+ check_err $?
run_test "GRO with custom segment size" "${ipv4_args} -M 1 -s 14720 -S 500 " "-4 -n 1 -l 14720"
+ check_err $?
run_test "GRO with custom segment size cmsg" "${ipv4_args} -M 1 -s 14720 -S 500 " "-4 -n 1 -l 14720 -S 500"
+ check_err $?
run_nat_test "bad GRO lookup" "${ipv4_args} -M 1 -s 14720 -S 0" "-n 10 -l 1472"
+ check_err $?
run_2sock_test "multiple GRO socks" "${ipv4_args} -M 1 -s 14720 -S 0 " "-4 -n 1 -l 14720 -S 1472"
+ check_err $?
echo "ipv6"
run_test "no GRO" "${ipv6_args} -M 10 -s 1400" "-n 10 -l 1400"
+ check_err $?
run_test "no GRO chk cmsg" "${ipv6_args} -M 10 -s 1400" "-n 10 -l 1400 -S -1"
+ check_err $?
run_test "GRO" "${ipv6_args} -M 1 -s 14520 -S 0" "-n 1 -l 14520"
+ check_err $?
run_test "GRO chk cmsg" "${ipv6_args} -M 1 -s 14520 -S 0" "-n 1 -l 14520 -S 1452"
+ check_err $?
run_test "GRO with custom segment size" "${ipv6_args} -M 1 -s 14520 -S 500" "-n 1 -l 14520"
+ check_err $?
run_test "GRO with custom segment size cmsg" "${ipv6_args} -M 1 -s 14520 -S 500" "-n 1 -l 14520 -S 500"
+ check_err $?
run_nat_test "bad GRO lookup" "${ipv6_args} -M 1 -s 14520 -S 0" "-n 10 -l 1452"
+ check_err $?
run_2sock_test "multiple GRO socks" "${ipv6_args} -M 1 -s 14520 -S 0 " "-n 1 -l 14520 -S 1452"
+ check_err $?
+ return $ret
}
if [ ! -f ../bpf/xdp_dummy.o ]; then
@@ -180,3 +212,5 @@ elif [[ $1 == "__subprocess_2sock" ]]; then
shift
run_one_2sock $@
fi
+
+exit $?
diff --git a/tools/testing/selftests/netfilter/Makefile b/tools/testing/selftests/netfilter/Makefile
index a374e10ef506..3006a8e5b41a 100644
--- a/tools/testing/selftests/netfilter/Makefile
+++ b/tools/testing/selftests/netfilter/Makefile
@@ -4,7 +4,8 @@
TEST_PROGS := nft_trans_stress.sh nft_nat.sh bridge_brouter.sh \
conntrack_icmp_related.sh nft_flowtable.sh ipvs.sh \
nft_concat_range.sh nft_conntrack_helper.sh \
- nft_queue.sh nft_meta.sh
+ nft_queue.sh nft_meta.sh \
+ ipip-conntrack-mtu.sh
LDLIBS = -lmnl
TEST_GEN_FILES = nf-queue
diff --git a/tools/testing/selftests/netfilter/ipip-conntrack-mtu.sh b/tools/testing/selftests/netfilter/ipip-conntrack-mtu.sh
new file mode 100755
index 000000000000..4a6f5c3b3215
--- /dev/null
+++ b/tools/testing/selftests/netfilter/ipip-conntrack-mtu.sh
@@ -0,0 +1,206 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+
+# Conntrack needs to reassemble fragments in order to have complete
+# packets for rule matching. Reassembly can lead to packet loss.
+
+# Consider the following setup:
+# +--------+ +---------+ +--------+
+# |Router A|-------|Wanrouter|-------|Router B|
+# | |.IPIP..| |..IPIP.| |
+# +--------+ +---------+ +--------+
+# / mtu 1400 \
+# / \
+#+--------+ +--------+
+#|Client A| |Client B|
+#| | | |
+#+--------+ +--------+
+
+# Router A and Router B use IPIP tunnel interfaces to tunnel traffic
+# between Client A and Client B over WAN. Wanrouter has MTU 1400 set
+# on its interfaces.
+
+rnd=$(mktemp -u XXXXXXXX)
+rx=$(mktemp)
+
+r_a="ns-ra-$rnd"
+r_b="ns-rb-$rnd"
+r_w="ns-rw-$rnd"
+c_a="ns-ca-$rnd"
+c_b="ns-cb-$rnd"
+
+checktool (){
+ if ! $1 > /dev/null 2>&1; then
+ echo "SKIP: Could not $2"
+ exit $ksft_skip
+ fi
+}
+
+checktool "iptables --version" "run test without iptables"
+checktool "ip -Version" "run test without ip tool"
+checktool "which nc" "run test without nc (netcat)"
+checktool "ip netns add ${r_a}" "create net namespace"
+
+for n in ${r_b} ${r_w} ${c_a} ${c_b};do
+ ip netns add ${n}
+done
+
+cleanup() {
+ for n in ${r_a} ${r_b} ${r_w} ${c_a} ${c_b};do
+ ip netns del ${n}
+ done
+ rm -f ${rx}
+}
+
+trap cleanup EXIT
+
+test_path() {
+ msg="$1"
+
+ ip netns exec ${c_b} nc -n -w 3 -q 3 -u -l -p 5000 > ${rx} < /dev/null &
+
+ sleep 1
+ for i in 1 2 3; do
+ head -c1400 /dev/zero | tr "\000" "a" | ip netns exec ${c_a} nc -n -w 1 -u 192.168.20.2 5000
+ done
+
+ wait
+
+ bytes=$(wc -c < ${rx})
+
+ if [ $bytes -eq 1400 ];then
+ echo "OK: PMTU $msg connection tracking"
+ else
+ echo "FAIL: PMTU $msg connection tracking: got $bytes, expected 1400"
+ exit 1
+ fi
+}
+
+# Detailed setup for Router A
+# ---------------------------
+# Interfaces:
+# eth0: 10.2.2.1/24
+# eth1: 192.168.10.1/24
+# ipip0: No IP address, local 10.2.2.1 remote 10.4.4.1
+# Routes:
+# 192.168.20.0/24 dev ipip0 (192.168.20.0/24 is subnet of Client B)
+# 10.4.4.1 via 10.2.2.254 (Router B via Wanrouter)
+# No iptables rules at all.
+
+ip link add veth0 netns ${r_a} type veth peer name veth0 netns ${r_w}
+ip link add veth1 netns ${r_a} type veth peer name veth0 netns ${c_a}
+
+l_addr="10.2.2.1"
+r_addr="10.4.4.1"
+ip netns exec ${r_a} ip link add ipip0 type ipip local ${l_addr} remote ${r_addr} mode ipip || exit $ksft_skip
+
+for dev in lo veth0 veth1 ipip0; do
+ ip -net ${r_a} link set $dev up
+done
+
+ip -net ${r_a} addr add 10.2.2.1/24 dev veth0
+ip -net ${r_a} addr add 192.168.10.1/24 dev veth1
+
+ip -net ${r_a} route add 192.168.20.0/24 dev ipip0
+ip -net ${r_a} route add 10.4.4.0/24 via 10.2.2.254
+
+ip netns exec ${r_a} sysctl -q net.ipv4.conf.all.forwarding=1 > /dev/null
+
+# Detailed setup for Router B
+# ---------------------------
+# Interfaces:
+# eth0: 10.4.4.1/24
+# eth1: 192.168.20.1/24
+# ipip0: No IP address, local 10.4.4.1 remote 10.2.2.1
+# Routes:
+# 192.168.10.0/24 dev ipip0 (192.168.10.0/24 is subnet of Client A)
+# 10.2.2.1 via 10.4.4.254 (Router A via Wanrouter)
+# No iptables rules at all.
+
+ip link add veth0 netns ${r_b} type veth peer name veth1 netns ${r_w}
+ip link add veth1 netns ${r_b} type veth peer name veth0 netns ${c_b}
+
+l_addr="10.4.4.1"
+r_addr="10.2.2.1"
+
+ip netns exec ${r_b} ip link add ipip0 type ipip local ${l_addr} remote ${r_addr} mode ipip || exit $ksft_skip
+
+for dev in lo veth0 veth1 ipip0; do
+ ip -net ${r_b} link set $dev up
+done
+
+ip -net ${r_b} addr add 10.4.4.1/24 dev veth0
+ip -net ${r_b} addr add 192.168.20.1/24 dev veth1
+
+ip -net ${r_b} route add 192.168.10.0/24 dev ipip0
+ip -net ${r_b} route add 10.2.2.0/24 via 10.4.4.254
+ip netns exec ${r_b} sysctl -q net.ipv4.conf.all.forwarding=1 > /dev/null
+
+# Client A
+ip -net ${c_a} addr add 192.168.10.2/24 dev veth0
+ip -net ${c_a} link set dev lo up
+ip -net ${c_a} link set dev veth0 up
+ip -net ${c_a} route add default via 192.168.10.1
+
+# Client A
+ip -net ${c_b} addr add 192.168.20.2/24 dev veth0
+ip -net ${c_b} link set dev veth0 up
+ip -net ${c_b} link set dev lo up
+ip -net ${c_b} route add default via 192.168.20.1
+
+# Wan
+ip -net ${r_w} addr add 10.2.2.254/24 dev veth0
+ip -net ${r_w} addr add 10.4.4.254/24 dev veth1
+
+ip -net ${r_w} link set dev lo up
+ip -net ${r_w} link set dev veth0 up mtu 1400
+ip -net ${r_w} link set dev veth1 up mtu 1400
+
+ip -net ${r_a} link set dev veth0 mtu 1400
+ip -net ${r_b} link set dev veth0 mtu 1400
+
+ip netns exec ${r_w} sysctl -q net.ipv4.conf.all.forwarding=1 > /dev/null
+
+# Path MTU discovery
+# ------------------
+# Running tracepath from Client A to Client B shows PMTU discovery is working
+# as expected:
+#
+# clienta:~# tracepath 192.168.20.2
+# 1?: [LOCALHOST] pmtu 1500
+# 1: 192.168.10.1 0.867ms
+# 1: 192.168.10.1 0.302ms
+# 2: 192.168.10.1 0.312ms pmtu 1480
+# 2: no reply
+# 3: 192.168.10.1 0.510ms pmtu 1380
+# 3: 192.168.20.2 2.320ms reached
+# Resume: pmtu 1380 hops 3 back 3
+
+# ip netns exec ${c_a} traceroute --mtu 192.168.20.2
+
+# Router A has learned PMTU (1400) to Router B from Wanrouter.
+# Client A has learned PMTU (1400 - IPIP overhead = 1380) to Client B
+# from Router A.
+
+#Send large UDP packet
+#---------------------
+#Now we send a 1400 bytes UDP packet from Client A to Client B:
+
+# clienta:~# head -c1400 /dev/zero | tr "\000" "a" | nc -u 192.168.20.2 5000
+test_path "without"
+
+# The IPv4 stack on Client A already knows the PMTU to Client B, so the
+# UDP packet is sent as two fragments (1380 + 20). Router A forwards the
+# fragments between eth1 and ipip0. The fragments fit into the tunnel and
+# reach their destination.
+
+#When sending the large UDP packet again, Router A now reassembles the
+#fragments before routing the packet over ipip0. The resulting IPIP
+#packet is too big (1400) for the tunnel PMTU (1380) to Router B, it is
+#dropped on Router A before sending.
+
+ip netns exec ${r_a} iptables -A FORWARD -m conntrack --ctstate NEW
+test_path "with"
diff --git a/tools/testing/selftests/netfilter/nft_conntrack_helper.sh b/tools/testing/selftests/netfilter/nft_conntrack_helper.sh
index edf0a48da6bf..bf6b9626c7dd 100755
--- a/tools/testing/selftests/netfilter/nft_conntrack_helper.sh
+++ b/tools/testing/selftests/netfilter/nft_conntrack_helper.sh
@@ -94,7 +94,13 @@ check_for_helper()
local message=$2
local port=$3
- ip netns exec ${netns} conntrack -L -p tcp --dport $port 2> /dev/null |grep -q 'helper=ftp'
+ if echo $message |grep -q 'ipv6';then
+ local family="ipv6"
+ else
+ local family="ipv4"
+ fi
+
+ ip netns exec ${netns} conntrack -L -f $family -p tcp --dport $port 2> /dev/null |grep -q 'helper=ftp'
if [ $? -ne 0 ] ; then
echo "FAIL: ${netns} did not show attached helper $message" 1>&2
ret=1
@@ -111,8 +117,8 @@ test_helper()
sleep 3 | ip netns exec ${ns2} nc -w 2 -l -p $port > /dev/null &
- sleep 1
sleep 1 | ip netns exec ${ns1} nc -w 2 10.0.1.2 $port > /dev/null &
+ sleep 1
check_for_helper "$ns1" "ip $msg" $port
check_for_helper "$ns2" "ip $msg" $port
@@ -128,8 +134,8 @@ test_helper()
sleep 3 | ip netns exec ${ns2} nc -w 2 -6 -l -p $port > /dev/null &
- sleep 1
sleep 1 | ip netns exec ${ns1} nc -w 2 -6 dead:1::2 $port > /dev/null &
+ sleep 1
check_for_helper "$ns1" "ipv6 $msg" $port
check_for_helper "$ns2" "ipv6 $msg" $port
diff --git a/tools/testing/selftests/powerpc/alignment/alignment_handler.c b/tools/testing/selftests/powerpc/alignment/alignment_handler.c
index cb53a8b777e6..c25cf7cd45e9 100644
--- a/tools/testing/selftests/powerpc/alignment/alignment_handler.c
+++ b/tools/testing/selftests/powerpc/alignment/alignment_handler.c
@@ -443,7 +443,6 @@ int test_alignment_handler_integer(void)
LOAD_DFORM_TEST(ldu);
LOAD_XFORM_TEST(ldx);
LOAD_XFORM_TEST(ldux);
- LOAD_DFORM_TEST(lmw);
STORE_DFORM_TEST(stb);
STORE_XFORM_TEST(stbx);
STORE_DFORM_TEST(stbu);
@@ -462,7 +461,11 @@ int test_alignment_handler_integer(void)
STORE_XFORM_TEST(stdx);
STORE_DFORM_TEST(stdu);
STORE_XFORM_TEST(stdux);
+
+#ifdef __BIG_ENDIAN__
+ LOAD_DFORM_TEST(lmw);
STORE_DFORM_TEST(stmw);
+#endif
return rc;
}
diff --git a/tools/testing/selftests/powerpc/mm/pkey_exec_prot.c b/tools/testing/selftests/powerpc/mm/pkey_exec_prot.c
index 9e5c7f3f498a..0af4f02669a1 100644
--- a/tools/testing/selftests/powerpc/mm/pkey_exec_prot.c
+++ b/tools/testing/selftests/powerpc/mm/pkey_exec_prot.c
@@ -290,5 +290,5 @@ static int test(void)
int main(void)
{
- test_harness(test, "pkey_exec_prot");
+ return test_harness(test, "pkey_exec_prot");
}
diff --git a/tools/testing/selftests/powerpc/mm/pkey_siginfo.c b/tools/testing/selftests/powerpc/mm/pkey_siginfo.c
index 4f815d7c1214..2db76e56d4cb 100644
--- a/tools/testing/selftests/powerpc/mm/pkey_siginfo.c
+++ b/tools/testing/selftests/powerpc/mm/pkey_siginfo.c
@@ -329,5 +329,5 @@ static int test(void)
int main(void)
{
- test_harness(test, "pkey_siginfo");
+ return test_harness(test, "pkey_siginfo");
}
diff --git a/tools/testing/selftests/vDSO/.gitignore b/tools/testing/selftests/vDSO/.gitignore
index 5eb64d41e541..a8dc51af5a9c 100644
--- a/tools/testing/selftests/vDSO/.gitignore
+++ b/tools/testing/selftests/vDSO/.gitignore
@@ -1,5 +1,8 @@
# SPDX-License-Identifier: GPL-2.0-only
vdso_test
+vdso_test_abi
+vdso_test_clock_getres
+vdso_test_correctness
vdso_test_gettimeofday
vdso_test_getcpu
vdso_standalone_test_x86
diff --git a/tools/testing/selftests/vDSO/vdso_test_correctness.c b/tools/testing/selftests/vDSO/vdso_test_correctness.c
index 5029ef9b228c..c4aea794725a 100644
--- a/tools/testing/selftests/vDSO/vdso_test_correctness.c
+++ b/tools/testing/selftests/vDSO/vdso_test_correctness.c
@@ -349,7 +349,7 @@ static void test_one_clock_gettime64(int clock, const char *name)
return;
}
- printf("\t%llu.%09ld %llu.%09ld %llu.%09ld\n",
+ printf("\t%llu.%09lld %llu.%09lld %llu.%09lld\n",
(unsigned long long)start.tv_sec, start.tv_nsec,
(unsigned long long)vdso.tv_sec, vdso.tv_nsec,
(unsigned long long)end.tv_sec, end.tv_nsec);
diff --git a/tools/testing/selftests/vm/Makefile b/tools/testing/selftests/vm/Makefile
index 9a25307f6115..d42115e4284d 100644
--- a/tools/testing/selftests/vm/Makefile
+++ b/tools/testing/selftests/vm/Makefile
@@ -4,7 +4,7 @@
include local_config.mk
uname_M := $(shell uname -m 2>/dev/null || echo not)
-MACHINE ?= $(shell echo $(uname_M) | sed -e 's/aarch64.*/arm64/')
+MACHINE ?= $(shell echo $(uname_M) | sed -e 's/aarch64.*/arm64/' -e 's/ppc64.*/ppc64/')
# Without this, failed build products remain, with up-to-date timestamps,
# thus tricking Make (and you!) into believing that All Is Well, in subsequent
@@ -43,7 +43,7 @@ TEST_GEN_FILES += thuge-gen
TEST_GEN_FILES += transhuge-stress
TEST_GEN_FILES += userfaultfd
-ifeq ($(ARCH),x86_64)
+ifeq ($(MACHINE),x86_64)
CAN_BUILD_I386 := $(shell ./../x86/check_cc.sh $(CC) ../x86/trivial_32bit_program.c -m32)
CAN_BUILD_X86_64 := $(shell ./../x86/check_cc.sh $(CC) ../x86/trivial_64bit_program.c)
CAN_BUILD_WITH_NOPIE := $(shell ./../x86/check_cc.sh $(CC) ../x86/trivial_program.c -no-pie)
@@ -65,13 +65,13 @@ TEST_GEN_FILES += $(BINARIES_64)
endif
else
-ifneq (,$(findstring $(ARCH),powerpc))
+ifneq (,$(findstring $(MACHINE),ppc64))
TEST_GEN_FILES += protection_keys
endif
endif
-ifneq (,$(filter $(MACHINE),arm64 ia64 mips64 parisc64 ppc64 ppc64le riscv64 s390x sh64 sparc64 x86_64))
+ifneq (,$(filter $(MACHINE),arm64 ia64 mips64 parisc64 ppc64 riscv64 s390x sh64 sparc64 x86_64))
TEST_GEN_FILES += va_128TBswitch
TEST_GEN_FILES += virtual_address_range
TEST_GEN_FILES += write_to_hugetlbfs
@@ -84,7 +84,7 @@ TEST_FILES := test_vmalloc.sh
KSFT_KHDR_INSTALL := 1
include ../lib.mk
-ifeq ($(ARCH),x86_64)
+ifeq ($(MACHINE),x86_64)
BINARIES_32 := $(patsubst %,$(OUTPUT)/%,$(BINARIES_32))
BINARIES_64 := $(patsubst %,$(OUTPUT)/%,$(BINARIES_64))
diff --git a/tools/testing/selftests/wireguard/qemu/debug.config b/tools/testing/selftests/wireguard/qemu/debug.config
index b50c2085c1ac..fe07d97df9fa 100644
--- a/tools/testing/selftests/wireguard/qemu/debug.config
+++ b/tools/testing/selftests/wireguard/qemu/debug.config
@@ -1,5 +1,4 @@
CONFIG_LOCALVERSION="-debug"
-CONFIG_ENABLE_MUST_CHECK=y
CONFIG_FRAME_POINTER=y
CONFIG_STACK_VALIDATION=y
CONFIG_DEBUG_KERNEL=y
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 5f260488e999..fa9e3614d30e 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -485,9 +485,8 @@ static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
kvm->mmu_notifier_count++;
need_tlb_flush = kvm_unmap_hva_range(kvm, range->start, range->end,
range->flags);
- need_tlb_flush |= kvm->tlbs_dirty;
/* we've to flush the tlb before the pages can be freed */
- if (need_tlb_flush)
+ if (need_tlb_flush || kvm->tlbs_dirty)
kvm_flush_remote_tlbs(kvm);
spin_unlock(&kvm->mmu_lock);